From b5459eb987d89c4759c31123a7baa0a0d962c024 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 15 Apr 2018 13:21:52 -0400 Subject: add @sqrt built-in function See #767 --- src/analyze.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index c73e6b39e3..9092da6e3b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5801,9 +5801,11 @@ uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) { case ZigLLVMFnIdClz: return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817; case ZigLLVMFnIdFloor: - return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1899859168; + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1899859168; case ZigLLVMFnIdCeil: - return (uint32_t)(x.data.floor_ceil.bit_count) * (uint32_t)1953839089; + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1953839089; + case ZigLLVMFnIdSqrt: + return (uint32_t)(x.data.floating.bit_count) * (uint32_t)2225366385; case ZigLLVMFnIdOverflowArithmetic: return ((uint32_t)(x.data.overflow_arithmetic.bit_count) * 87135777) + ((uint32_t)(x.data.overflow_arithmetic.add_sub_mul) * 31640542) + @@ -5822,7 +5824,8 @@ bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) { return a.data.clz.bit_count == b.data.clz.bit_count; case ZigLLVMFnIdFloor: case ZigLLVMFnIdCeil: - return a.data.floor_ceil.bit_count == b.data.floor_ceil.bit_count; + case ZigLLVMFnIdSqrt: + return a.data.floating.bit_count == b.data.floating.bit_count; case ZigLLVMFnIdOverflowArithmetic: return (a.data.overflow_arithmetic.bit_count == b.data.overflow_arithmetic.bit_count) && (a.data.overflow_arithmetic.add_sub_mul == b.data.overflow_arithmetic.add_sub_mul) && -- cgit v1.2.3 From 96ebd8b23b39e2d4019a8019a6774d7c3d20149d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 16 Apr 2018 22:33:34 -0400 Subject: fix windows not respecting --msvc-lib-dir, --kernel32-lib-dir I believe this was a regression caused by 51a6ff18d454f4cb0faa0f1837df9f0c55a80b43 closes #927 --- src/analyze.cpp | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 9092da6e3b..ca18208ba9 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4421,24 +4421,30 @@ void find_libc_lib_path(CodeGen *g) { if (g->zig_target.os == OsWindows) { ZigWindowsSDK *sdk = get_windows_sdk(g); - Buf* vc_lib_dir = buf_alloc(); - if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) { - zig_panic("Unable to determine vcruntime path."); + if (g->msvc_lib_dir == nullptr) { + Buf* vc_lib_dir = buf_alloc(); + if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) { + zig_panic("Unable to determine vcruntime path."); + } + g->msvc_lib_dir = vc_lib_dir; } - Buf* ucrt_lib_path = buf_alloc(); - if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine ucrt path."); + if (g->libc_lib_dir == nullptr) { + Buf* ucrt_lib_path = buf_alloc(); + if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) { + zig_panic("Unable to determine ucrt path."); + } + g->libc_lib_dir = ucrt_lib_path; } - Buf* kern_lib_path = buf_alloc(); - if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine kernel32 path."); + if (g->kernel32_lib_dir == nullptr) { + Buf* kern_lib_path = buf_alloc(); + if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) { + zig_panic("Unable to determine kernel32 path."); + } + g->kernel32_lib_dir = kern_lib_path; } - g->msvc_lib_dir = vc_lib_dir; - g->libc_lib_dir = ucrt_lib_path; - g->kernel32_lib_dir = kern_lib_path; } else if (g->zig_target.os == OsLinux) { g->libc_lib_dir = get_linux_libc_lib_path("crt1.o"); } else { -- cgit v1.2.3 From 06909ceaab8ecb33d1f41049870797a3ae721610 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 18 Apr 2018 22:21:54 -0400 Subject: support break in suspend blocks * you can label suspend blocks * labeled break supports suspend blocks See #803 --- doc/langref.html.in | 2 +- src/all_types.hpp | 13 ++++++++++ src/analyze.cpp | 9 +++++++ src/analyze.hpp | 1 + src/codegen.cpp | 1 + src/ir.cpp | 60 +++++++++++++++++++++++++++++++++++++++-------- src/parser.cpp | 25 ++++++++++++++++++-- test/cases/coroutines.zig | 18 ++++++++++++++ test/compile_errors.zig | 20 ++++++++++++++++ 9 files changed, 136 insertions(+), 13 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index a5d31aada4..034b8c1629 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5918,7 +5918,7 @@ Defer(body) = ("defer" | "deferror") body IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body)) -SuspendExpression(body) = "suspend" option(("|" Symbol "|" body)) +SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body)) IfErrorExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body "else" "|" Symbol "|" BlockExpression(body) diff --git a/src/all_types.hpp b/src/all_types.hpp index 88e0ba27a8..42ce01355c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -867,6 +867,7 @@ struct AstNodeAwaitExpr { }; struct AstNodeSuspend { + Buf *name; AstNode *block; AstNode *promise_symbol; }; @@ -1757,6 +1758,7 @@ enum ScopeId { ScopeIdVarDecl, ScopeIdCImport, ScopeIdLoop, + ScopeIdSuspend, ScopeIdFnDef, ScopeIdCompTime, ScopeIdCoroPrelude, @@ -1852,6 +1854,17 @@ struct ScopeLoop { ZigList *incoming_blocks; }; +// This scope is created for a suspend block in order to have labeled +// suspend for breaking out of a suspend and for detecting if a suspend +// block is inside a suspend block. +struct ScopeSuspend { + Scope base; + + Buf *name; + IrBasicBlock *resume_block; + bool reported_err; +}; + // This scope is created for a comptime expression. // NodeTypeCompTime, NodeTypeSwitchExpr struct ScopeCompTime { diff --git a/src/analyze.cpp b/src/analyze.cpp index ca18208ba9..d142b86326 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -156,6 +156,14 @@ ScopeLoop *create_loop_scope(AstNode *node, Scope *parent) { return scope; } +ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent) { + assert(node->type == NodeTypeSuspend); + ScopeSuspend *scope = allocate(1); + init_scope(&scope->base, ScopeIdSuspend, node, parent); + scope->name = node->data.suspend.name; + return scope; +} + ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry) { ScopeFnDef *scope = allocate(1); init_scope(&scope->base, ScopeIdFnDef, node, parent); @@ -3616,6 +3624,7 @@ FnTableEntry *scope_get_fn_if_root(Scope *scope) { case ScopeIdVarDecl: case ScopeIdCImport: case ScopeIdLoop: + case ScopeIdSuspend: case ScopeIdCompTime: case ScopeIdCoroPrelude: scope = scope->parent; diff --git a/src/analyze.hpp b/src/analyze.hpp index aa4557666b..aca78f4e25 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -104,6 +104,7 @@ ScopeDeferExpr *create_defer_expr_scope(AstNode *node, Scope *parent); Scope *create_var_scope(AstNode *node, Scope *parent, VariableTableEntry *var); ScopeCImport *create_cimport_scope(AstNode *node, Scope *parent); ScopeLoop *create_loop_scope(AstNode *node, Scope *parent); +ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent); ScopeFnDef *create_fndef_scope(AstNode *node, Scope *parent, FnTableEntry *fn_entry); ScopeDecls *create_decls_scope(AstNode *node, Scope *parent, TypeTableEntry *container_type, ImportTableEntry *import); Scope *create_comptime_scope(AstNode *node, Scope *parent); diff --git a/src/codegen.cpp b/src/codegen.cpp index a7d373e9d0..5b51d9e755 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -654,6 +654,7 @@ static ZigLLVMDIScope *get_di_scope(CodeGen *g, Scope *scope) { } case ScopeIdDeferExpr: case ScopeIdLoop: + case ScopeIdSuspend: case ScopeIdCompTime: case ScopeIdCoroPrelude: return get_di_scope(g, scope->parent); diff --git a/src/ir.cpp b/src/ir.cpp index 89193a4c27..dcfe3afb48 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2829,6 +2829,18 @@ static void ir_set_cursor_at_end_and_append_block(IrBuilder *irb, IrBasicBlock * ir_set_cursor_at_end(irb, basic_block); } +static ScopeSuspend *get_scope_suspend(Scope *scope) { + while (scope) { + if (scope->id == ScopeIdSuspend) + return (ScopeSuspend *)scope; + if (scope->id == ScopeIdFnDef) + return nullptr; + + scope = scope->parent; + } + return nullptr; +} + static ScopeDeferExpr *get_scope_defer_expr(Scope *scope) { while (scope) { if (scope->id == ScopeIdDeferExpr) @@ -5665,6 +5677,15 @@ static IrInstruction *ir_gen_return_from_block(IrBuilder *irb, Scope *break_scop return ir_build_br(irb, break_scope, node, dest_block, is_comptime); } +static IrInstruction *ir_gen_break_from_suspend(IrBuilder *irb, Scope *break_scope, AstNode *node, ScopeSuspend *suspend_scope) { + IrInstruction *is_comptime = ir_build_const_bool(irb, break_scope, node, false); + + IrBasicBlock *dest_block = suspend_scope->resume_block; + ir_gen_defers_for_block(irb, break_scope, dest_block->scope, false); + + return ir_build_br(irb, break_scope, node, dest_block, is_comptime); +} + static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode *node) { assert(node->type == NodeTypeBreak); @@ -5704,6 +5725,13 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode * assert(this_block_scope->end_block != nullptr); return ir_gen_return_from_block(irb, break_scope, node, this_block_scope); } + } else if (search_scope->id == ScopeIdSuspend) { + ScopeSuspend *this_suspend_scope = (ScopeSuspend *)search_scope; + if (node->data.break_expr.name != nullptr && + (this_suspend_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_suspend_scope->name))) + { + return ir_gen_break_from_suspend(irb, break_scope, node, this_suspend_scope); + } } search_scope = search_scope->parent; } @@ -6290,14 +6318,26 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod ScopeDeferExpr *scope_defer_expr = get_scope_defer_expr(parent_scope); if (scope_defer_expr) { if (!scope_defer_expr->reported_err) { - add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside defer expression")); + add_error_note(irb->codegen, msg, scope_defer_expr->base.source_node, buf_sprintf("defer here")); scope_defer_expr->reported_err = true; } return irb->codegen->invalid_instruction; } + ScopeSuspend *existing_suspend_scope = get_scope_suspend(parent_scope); + if (existing_suspend_scope) { + if (!existing_suspend_scope->reported_err) { + ErrorMsg *msg = add_node_error(irb->codegen, node, buf_sprintf("cannot suspend inside suspend block")); + add_error_note(irb->codegen, msg, existing_suspend_scope->base.source_node, buf_sprintf("other suspend block here")); + existing_suspend_scope->reported_err = true; + } + return irb->codegen->invalid_instruction; + } Scope *outer_scope = irb->exec->begin_scope; + IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); + IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); IrInstruction *suspend_code; IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); @@ -6316,28 +6356,28 @@ static IrInstruction *ir_gen_suspend(IrBuilder *irb, Scope *parent_scope, AstNod } else { child_scope = parent_scope; } + ScopeSuspend *suspend_scope = create_suspend_scope(node, child_scope); + suspend_scope->resume_block = resume_block; + child_scope = &suspend_scope->base; IrInstruction *save_token = ir_build_coro_save(irb, child_scope, node, irb->exec->coro_handle); ir_gen_node(irb, node->data.suspend.block, child_scope); - suspend_code = ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false); + suspend_code = ir_mark_gen(ir_build_coro_suspend(irb, parent_scope, node, save_token, const_bool_false)); } - IrBasicBlock *cleanup_block = ir_create_basic_block(irb, parent_scope, "SuspendCleanup"); - IrBasicBlock *resume_block = ir_create_basic_block(irb, parent_scope, "SuspendResume"); - IrInstructionSwitchBrCase *cases = allocate(2); - cases[0].value = ir_build_const_u8(irb, parent_scope, node, 0); + cases[0].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 0)); cases[0].block = resume_block; - cases[1].value = ir_build_const_u8(irb, parent_scope, node, 1); + cases[1].value = ir_mark_gen(ir_build_const_u8(irb, parent_scope, node, 1)); cases[1].block = cleanup_block; - ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, - 2, cases, const_bool_false); + ir_mark_gen(ir_build_switch_br(irb, parent_scope, node, suspend_code, irb->exec->coro_suspend_block, + 2, cases, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, cleanup_block); ir_gen_defers_for_block(irb, parent_scope, outer_scope, true); ir_mark_gen(ir_build_br(irb, parent_scope, node, irb->exec->coro_final_cleanup_block, const_bool_false)); ir_set_cursor_at_end_and_append_block(irb, resume_block); - return ir_build_const_void(irb, parent_scope, node); + return ir_mark_gen(ir_build_const_void(irb, parent_scope, node)); } static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scope, diff --git a/src/parser.cpp b/src/parser.cpp index 2bd94033cc..4b70e904b8 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -648,12 +648,30 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m } /* -SuspendExpression(body) = "suspend" "|" Symbol "|" body +SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body)) */ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) { size_t orig_token_index = *token_index; - Token *suspend_token = &pc->tokens->at(*token_index); + Token *name_token = nullptr; + Token *token = &pc->tokens->at(*token_index); + if (token->id == TokenIdSymbol) { + *token_index += 1; + Token *colon_token = &pc->tokens->at(*token_index); + if (colon_token->id == TokenIdColon) { + *token_index += 1; + name_token = token; + token = &pc->tokens->at(*token_index); + } else if (mandatory) { + ast_expect_token(pc, colon_token, TokenIdColon); + zig_unreachable(); + } else { + *token_index = orig_token_index; + return nullptr; + } + } + + Token *suspend_token = token; if (suspend_token->id == TokenIdKeywordSuspend) { *token_index += 1; } else if (mandatory) { @@ -675,6 +693,9 @@ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, b } AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token); + if (name_token != nullptr) { + node->data.suspend.name = token_buf(name_token); + } node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index); ast_eat_token(pc, token_index, TokenIdBinOr); node->data.suspend.block = ast_parse_block(pc, token_index, true); diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index 6d28b98c9d..46055d7469 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -224,3 +224,21 @@ async fn printTrace(p: promise->error!void) void { } }; } + +test "break from suspend" { + var buf: [500]u8 = undefined; + var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; + var my_result: i32 = 1; + const p = try async testBreakFromSuspend(&my_result); + cancel p; + std.debug.assert(my_result == 2); +} + +async fn testBreakFromSuspend(my_result: &i32) void { + s: suspend |p| { + break :s; + } + *my_result += 1; + suspend; + *my_result += 1; +} diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 926e997c6e..6ac73d18a2 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,26 @@ const tests = @import("tests.zig"); pub fn addCases(cases: &tests.CompileErrorContext) void { + cases.add("suspend inside suspend block", + \\const std = @import("std"); + \\ + \\export fn entry() void { + \\ var buf: [500]u8 = undefined; + \\ var a = &std.heap.FixedBufferAllocator.init(buf[0..]).allocator; + \\ const p = (async foo()) catch unreachable; + \\ cancel p; + \\} + \\ + \\async fn foo() void { + \\ suspend |p| { + \\ suspend |p1| { + \\ } + \\ } + \\} + , + ".tmp_source.zig:12:9: error: cannot suspend inside suspend block", + ".tmp_source.zig:11:5: note: other suspend block here"); + cases.add("assign inline fn to non-comptime var", \\export fn entry() void { \\ var a = b; -- cgit v1.2.3 From 1c41f1ca6252faaa7750936c67562f1866a48075 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 22 Apr 2018 20:54:52 -0400 Subject: better error reporting for missing libc on windows closes #931 --- src/analyze.cpp | 9 ++++++--- src/os.cpp | 3 +++ 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index d142b86326..5dd7b0d18c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4433,7 +4433,8 @@ void find_libc_lib_path(CodeGen *g) { if (g->msvc_lib_dir == nullptr) { Buf* vc_lib_dir = buf_alloc(); if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) { - zig_panic("Unable to determine vcruntime path."); + fprintf(stderr, "Unable to determine vcruntime path. --msvc-lib-dir"); + exit(1); } g->msvc_lib_dir = vc_lib_dir; } @@ -4441,7 +4442,8 @@ void find_libc_lib_path(CodeGen *g) { if (g->libc_lib_dir == nullptr) { Buf* ucrt_lib_path = buf_alloc(); if (os_get_win32_ucrt_lib_path(sdk, ucrt_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine ucrt path."); + fprintf(stderr, "Unable to determine ucrt path. --libc-lib-dir"); + exit(1); } g->libc_lib_dir = ucrt_lib_path; } @@ -4449,7 +4451,8 @@ void find_libc_lib_path(CodeGen *g) { if (g->kernel32_lib_dir == nullptr) { Buf* kern_lib_path = buf_alloc(); if (os_get_win32_kern32_path(sdk, kern_lib_path, g->zig_target.arch.arch)) { - zig_panic("Unable to determine kernel32 path."); + fprintf(stderr, "Unable to determine kernel32 path. --kernel32-lib-dir"); + exit(1); } g->kernel32_lib_dir = kern_lib_path; } diff --git a/src/os.cpp b/src/os.cpp index 97462bd658..d335d8d218 100644 --- a/src/os.cpp +++ b/src/os.cpp @@ -1334,6 +1334,9 @@ com_done:; int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchType platform_type) { #if defined(ZIG_OS_WINDOWS) + if (buf_len(sdk->path10) == 0 || buf_len(sdk->version10) == 0) { + return ErrorFileNotFound; + } buf_resize(output_buf, 0); buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); switch (platform_type) { -- cgit v1.2.3 From 75328e32045d1275c03f1f37058ee0a3b775c632 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 22 Apr 2018 21:47:25 -0400 Subject: exit(1) instead of abort() for file not found --- src/analyze.cpp | 6 ++++-- src/codegen.cpp | 6 ++++-- src/os.cpp | 3 --- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 5dd7b0d18c..a598d7676e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4306,7 +4306,8 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { static ZigWindowsSDK *get_windows_sdk(CodeGen *g) { if (g->win_sdk == nullptr) { if (os_find_windows_sdk(&g->win_sdk)) { - zig_panic("Unable to determine Windows SDK path."); + fprintf(stderr, "unable to determine windows sdk path\n"); + exit(1); } } assert(g->win_sdk != nullptr); @@ -4408,7 +4409,8 @@ void find_libc_include_path(CodeGen *g) { ZigWindowsSDK *sdk = get_windows_sdk(g); g->libc_include_dir = buf_alloc(); if (os_get_win32_ucrt_include_path(sdk, g->libc_include_dir)) { - zig_panic("Unable to determine libc include path."); + fprintf(stderr, "Unable to determine libc include path. --libc-include-dir"); + exit(1); } } else if (g->zig_target.os == OsLinux) { g->libc_include_dir = get_linux_libc_include_path(); diff --git a/src/codegen.cpp b/src/codegen.cpp index 4581c3e2b3..2d8c385f44 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6638,12 +6638,14 @@ static void gen_root_source(CodeGen *g) { Buf *abs_full_path = buf_alloc(); int err; if ((err = os_path_real(rel_full_path, abs_full_path))) { - zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + fprintf(stderr, "unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + exit(1); } Buf *source_code = buf_alloc(); if ((err = os_fetch_file_path(rel_full_path, source_code, true))) { - zig_panic("unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + fprintf(stderr, "unable to open '%s': %s", buf_ptr(rel_full_path), err_str(err)); + exit(1); } g->root_import = add_source_file(g, g->root_package, abs_full_path, source_code); diff --git a/src/os.cpp b/src/os.cpp index c93e2887b7..97462bd658 100644 --- a/src/os.cpp +++ b/src/os.cpp @@ -1334,9 +1334,6 @@ com_done:; int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchType platform_type) { #if defined(ZIG_OS_WINDOWS) - if (buf_len(&sdk->path10) == 0 || buf_len(&sdk->version10) == 0) { - return ErrorFileNotFound; - } buf_resize(output_buf, 0); buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); switch (platform_type) { -- cgit v1.2.3 From 2fc34eaa581cc31827e978fbd973bf36d2c647e2 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 16:27:31 +0200 Subject: Functions with infered error set can now return literals fixes #852 --- src/analyze.cpp | 1 - src/ir.cpp | 36 +++++++++++++++++++----------------- test/cases/error.zig | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 18 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index a598d7676e..11715220c7 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -6131,4 +6131,3 @@ bool type_can_fail(TypeTableEntry *type_entry) { bool fn_type_can_fail(FnTypeId *fn_type_id) { return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync; } - diff --git a/src/ir.cpp b/src/ir.cpp index ec7f41d748..d8156b214e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8111,7 +8111,7 @@ static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t * *errors = reallocate(*errors, old_errors_count, *errors_count); } -static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, IrInstruction **instructions, size_t instruction_count) { +static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, TypeTableEntry *expected_type, IrInstruction **instructions, size_t instruction_count) { assert(instruction_count >= 1); IrInstruction *prev_inst = instructions[0]; if (type_is_invalid(prev_inst->value.type)) { @@ -8158,16 +8158,6 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdNullLit) { - prev_inst = cur_inst; - continue; - } - - if (cur_type->id == TypeTableEntryIdNullLit) { - any_are_null = true; - continue; - } - if (prev_type->id == TypeTableEntryIdErrorSet) { assert(err_set_type != nullptr); if (cur_type->id == TypeTableEntryIdErrorSet) { @@ -8427,6 +8417,16 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } + if (prev_type->id == TypeTableEntryIdNullLit) { + prev_inst = cur_inst; + continue; + } + + if (cur_type->id == TypeTableEntryIdNullLit) { + any_are_null = true; + continue; + } + if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) { continue; } @@ -8610,6 +8610,10 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } else if (err_set_type != nullptr) { if (prev_inst->value.type->id == TypeTableEntryIdErrorSet) { return err_set_type; + } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); + } else if (expected_type != nullptr && expected_type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, expected_type->data.error_union.payload_type); } else { if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) @@ -8621,8 +8625,6 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of null literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { - return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); } else { return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type); } @@ -10645,7 +10647,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp } IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; type_ensure_zero_bits_known(ira->codegen, resolved_type); @@ -11035,7 +11037,7 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp IrInstruction *op1 = bin_op_instruction->op1->other; IrInstruction *op2 = bin_op_instruction->op2->other; IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; IrBinOp op_id = bin_op_instruction->op_id; @@ -13004,7 +13006,7 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP return first_value->value.type; } - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, nullptr, new_incoming_values.items, new_incoming_values.length); if (type_is_invalid(resolved_type)) return resolved_type; @@ -18696,7 +18698,7 @@ TypeTableEntry *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutabl } else if (ira->src_implicit_return_type_list.length == 0) { return codegen->builtin_types.entry_unreachable; } else { - return ir_resolve_peer_types(ira, expected_type_source_node, ira->src_implicit_return_type_list.items, + return ir_resolve_peer_types(ira, expected_type_source_node, expected_type, ira->src_implicit_return_type_list.items, ira->src_implicit_return_type_list.length); } } diff --git a/test/cases/error.zig b/test/cases/error.zig index c64c835fc4..2a1433df5b 100644 --- a/test/cases/error.zig +++ b/test/cases/error.zig @@ -202,3 +202,42 @@ const Error = error{}; fn foo3(b: usize) Error!usize { return b; } + + +test "error: Infer error set from literals" { + _ = nullLiteral("n") catch |err| handleErrors(err); + _ = floatLiteral("n") catch |err| handleErrors(err); + _ = intLiteral("n") catch |err| handleErrors(err); + _ = comptime nullLiteral("n") catch |err| handleErrors(err); + _ = comptime floatLiteral("n") catch |err| handleErrors(err); + _ = comptime intLiteral("n") catch |err| handleErrors(err); +} + +fn handleErrors(err: var) noreturn { + switch (err) { + error.T => {} + } + + unreachable; +} + +fn nullLiteral(str: []const u8) !?i64 { + if (str[0] == 'n') + return null; + + return error.T; +} + +fn floatLiteral(str: []const u8) !?f64 { + if (str[0] == 'n') + return 1.0; + + return error.T; +} + +fn intLiteral(str: []const u8) !?i64 { + if (str[0] == 'n') + return 1; + + return error.T; +} -- cgit v1.2.3 From 837166319dd1a5df14e5d4bebd62080bb6ebdaa1 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 19:02:46 +0200 Subject: Trying to fix osx build failing by setting param_info.type to nullptr --- src/analyze.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 11715220c7..29a2fc2560 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1261,6 +1261,10 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou fn_type_id->param_info = allocate_nonzero(param_count_alloc); fn_type_id->next_param_index = 0; fn_type_id->is_var_args = fn_proto->is_var_args; + + // We set param_info to 0, as param_info[i]->type is checked for null + // when checking if a parameters type has been resolved. + memset(fn_type_id->param_info, 0, sizeof(fn_type_id->param_info[i]) * fn_type_id->param_count); } static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_t *result) { -- cgit v1.2.3 From d6f033b42dcb49cfe45cb61821f2f451e4004686 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 19:09:25 +0200 Subject: Fixed build error --- src/analyze.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 29a2fc2560..1003cf8edf 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1264,7 +1264,7 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou // We set param_info to 0, as param_info[i]->type is checked for null // when checking if a parameters type has been resolved. - memset(fn_type_id->param_info, 0, sizeof(fn_type_id->param_info[i]) * fn_type_id->param_count); + memset(fn_type_id->param_info, 0, sizeof(fn_type_id->param_info[0]) * fn_type_id->param_count); } static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_t *result) { -- cgit v1.2.3 From 73bf897b5cc25ee3f1ec9d0ba1483d779de4b7c3 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 19:21:23 +0200 Subject: Using allocate instead of allocate_nonzero so we don't have to memset --- src/analyze.cpp | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 1003cf8edf..1ecfe32f4c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1258,13 +1258,9 @@ void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_cou } fn_type_id->param_count = fn_proto->params.length; - fn_type_id->param_info = allocate_nonzero(param_count_alloc); + fn_type_id->param_info = allocate(param_count_alloc); fn_type_id->next_param_index = 0; fn_type_id->is_var_args = fn_proto->is_var_args; - - // We set param_info to 0, as param_info[i]->type is checked for null - // when checking if a parameters type has been resolved. - memset(fn_type_id->param_info, 0, sizeof(fn_type_id->param_info[0]) * fn_type_id->param_count); } static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_t *result) { -- cgit v1.2.3 From 76ab1d2b6c9eedd861920ae6b6f8ee06aa482159 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 30 Apr 2018 14:20:56 -0400 Subject: support foo.* for ptr deref See #770 --- doc/langref.html.in | 6 ++++-- src/all_types.hpp | 6 ++++++ src/analyze.cpp | 1 + src/ast_render.cpp | 9 +++++++++ src/ir.cpp | 12 ++++++++++-- src/parser.cpp | 30 ++++++++++++++++++++++++------ test/behavior.zig | 1 + test/cases/pointers.zig | 14 ++++++++++++++ 8 files changed, 69 insertions(+), 10 deletions(-) create mode 100644 test/cases/pointers.zig (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 16fafdaad9..9fb2ebf9f5 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5958,10 +5958,12 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%" PrefixOpExpression = PrefixOp TypeExpr | SuffixOpExpression -SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) +SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | PtrDerefExpression) FieldAccessExpression = "." Symbol +PtrDerefExpression = ".*" + FnCallExpression = "(" list(Expression, ",") ")" ArrayAccessExpression = "[" Expression "]" @@ -5974,7 +5976,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",") StructLiteralField = "." Symbol "=" Expression -PrefixOp = "!" | "-" | "~" | "*" | ("&" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" +PrefixOp = "!" | "-" | "~" | ("*" option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl | PromiseType diff --git a/src/all_types.hpp b/src/all_types.hpp index d1b2ad61d2..2993589f7b 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -379,6 +379,7 @@ enum NodeType { NodeTypeArrayAccessExpr, NodeTypeSliceExpr, NodeTypeFieldAccessExpr, + NodeTypePtrDeref, NodeTypeUse, NodeTypeBoolLiteral, NodeTypeNullLiteral, @@ -603,6 +604,10 @@ struct AstNodeFieldAccessExpr { Buf *field_name; }; +struct AstNodePtrDerefExpr { + AstNode *target; +}; + enum PrefixOp { PrefixOpInvalid, PrefixOpBoolNot, @@ -911,6 +916,7 @@ struct AstNode { AstNodeCompTime comptime_expr; AstNodeAsmExpr asm_expr; AstNodeFieldAccessExpr field_access_expr; + AstNodePtrDerefExpr ptr_deref_expr; AstNodeContainerDecl container_decl; AstNodeStructField struct_field; AstNodeStringLiteral string_literal; diff --git a/src/analyze.cpp b/src/analyze.cpp index 1ecfe32f4c..99712cbfaf 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3275,6 +3275,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeUnreachable: case NodeTypeAsmExpr: case NodeTypeFieldAccessExpr: + case NodeTypePtrDeref: case NodeTypeStructField: case NodeTypeContainerInitExpr: case NodeTypeStructValueField: diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 2c3e1fc873..3e5ef0fcdb 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -222,6 +222,8 @@ static const char *node_type_str(NodeType node_type) { return "AsmExpr"; case NodeTypeFieldAccessExpr: return "FieldAccessExpr"; + case NodeTypePtrDeref: + return "PtrDerefExpr"; case NodeTypeContainerDecl: return "ContainerDecl"; case NodeTypeStructField: @@ -696,6 +698,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { print_symbol(ar, rhs); break; } + case NodeTypePtrDeref: + { + AstNode *lhs = node->data.ptr_deref_expr.target; + render_node_ungrouped(ar, lhs); + fprintf(ar->f, ".*"); + break; + } case NodeTypeUndefinedLiteral: fprintf(ar->f, "undefined"); break; diff --git a/src/ir.cpp b/src/ir.cpp index 469900bf07..8c7232722e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -4548,8 +4548,14 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode } static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) { - assert(node->type == NodeTypePrefixOpExpr); - AstNode *expr_node = node->data.prefix_op_expr.primary_expr; + AstNode *expr_node; + if (node->type == NodeTypePrefixOpExpr) { + expr_node = node->data.prefix_op_expr.primary_expr; + } else if (node->type == NodeTypePtrDeref) { + expr_node = node->data.ptr_deref_expr.target; + } else { + zig_unreachable(); + } IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); if (value == irb->codegen->invalid_instruction) @@ -6527,6 +6533,8 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_build_load_ptr(irb, scope, node, ptr_instruction); } + case NodeTypePtrDeref: + return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval); case NodeTypeThisLiteral: return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval); case NodeTypeBoolLiteral: diff --git a/src/parser.cpp b/src/parser.cpp index 4b70e904b8..c02546a99d 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1046,11 +1046,12 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index } /* -SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression) +SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | PtrDerefExpression | SliceExpression) FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen) ArrayAccessExpression : token(LBracket) Expression token(RBracket) SliceExpression = "[" Expression ".." option(Expression) "]" FieldAccessExpression : token(Dot) token(Symbol) +PtrDerefExpression = ".*" StructLiteralField : token(Dot) token(Symbol) token(Eq) Expression */ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -1131,13 +1132,27 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, } else if (first_token->id == TokenIdDot) { *token_index += 1; - Token *name_token = ast_eat_token(pc, token_index, TokenIdSymbol); + Token *token = &pc->tokens->at(*token_index); + + if (token->id == TokenIdSymbol) { + *token_index += 1; - AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token); - node->data.field_access_expr.struct_expr = primary_expr; - node->data.field_access_expr.field_name = token_buf(name_token); + AstNode *node = ast_create_node(pc, NodeTypeFieldAccessExpr, first_token); + node->data.field_access_expr.struct_expr = primary_expr; + node->data.field_access_expr.field_name = token_buf(token); + + primary_expr = node; + } else if (token->id == TokenIdStar) { + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token); + node->data.ptr_deref_expr.target = primary_expr; + + primary_expr = node; + } else { + ast_invalid_token_error(pc, token); + } - primary_expr = node; } else { return primary_expr; } @@ -3012,6 +3027,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeFieldAccessExpr: visit_field(&node->data.field_access_expr.struct_expr, visit, context); break; + case NodeTypePtrDeref: + visit_field(&node->data.ptr_deref_expr.target, visit, context); + break; case NodeTypeUse: visit_field(&node->data.use.expr, visit, context); break; diff --git a/test/behavior.zig b/test/behavior.zig index 2c10c6d71b..cb484b39a5 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -33,6 +33,7 @@ comptime { _ = @import("cases/misc.zig"); _ = @import("cases/namespace_depends_on_compile_var/index.zig"); _ = @import("cases/null.zig"); + _ = @import("cases/pointers.zig"); _ = @import("cases/pub_enum/index.zig"); _ = @import("cases/ref_var_in_if_after_if_2nd_switch_prong.zig"); _ = @import("cases/reflection.zig"); diff --git a/test/cases/pointers.zig b/test/cases/pointers.zig new file mode 100644 index 0000000000..87b3d25a74 --- /dev/null +++ b/test/cases/pointers.zig @@ -0,0 +1,14 @@ +const std = @import("std"); +const assert = std.debug.assert; + +test "dereference pointer" { + comptime testDerefPtr(); + testDerefPtr(); +} + +fn testDerefPtr() void { + var x: i32 = 1234; + var y = &x; + y.* += 1; + assert(x == 1235); +} -- cgit v1.2.3 From aa2586de182e5587c924740e80468c4c4d509500 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Fri, 4 May 2018 04:27:04 +0200 Subject: Fixed extern enums having the wrong size (#970) Fixed extern enums having the wrong size See #977 --- src/analyze.cpp | 8 +++++++- test/cases/enum.zig | 9 +++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 1ecfe32f4c..0f2fdf15de 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -2325,8 +2325,14 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { HashMap occupied_tag_values = {}; occupied_tag_values.init(field_count); - TypeTableEntry *tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1); + TypeTableEntry *tag_int_type; + if (enum_type->data.enumeration.layout == ContainerLayoutExtern) { + tag_int_type = get_c_int_type(g, CIntTypeInt); + } else { + tag_int_type = get_smallest_unsigned_int_type(g, field_count - 1); + } + // TODO: Are extern enums allowed to have an init_arg_expr? if (decl_node->data.container_decl.init_arg_expr != nullptr) { TypeTableEntry *wanted_tag_int_type = analyze_type_expr(g, scope, decl_node->data.container_decl.init_arg_expr); if (type_is_invalid(wanted_tag_int_type)) { diff --git a/test/cases/enum.zig b/test/cases/enum.zig index 644c989b04..0a2658eaf7 100644 --- a/test/cases/enum.zig +++ b/test/cases/enum.zig @@ -392,3 +392,12 @@ test "enum with 1 field but explicit tag type should still have the tag type" { const Enum = enum(u8) { B = 2 }; comptime @import("std").debug.assert(@sizeOf(Enum) == @sizeOf(u8)); } + +test "empty extern enum with members" { + const E = extern enum { + A, + B, + C, + }; + assert(@sizeOf(E) == @sizeOf(c_int)); +} -- cgit v1.2.3 From 9b29c872ce1836743b64c37db5272a7d7893f474 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Wed, 9 May 2018 09:34:04 +0200 Subject: Added Slice as it's own type info in userland --- src/analyze.cpp | 6 ++-- src/analyze.hpp | 2 +- src/codegen.cpp | 4 +++ src/ir.cpp | 80 ++++++++++++++++++++++++++++-------------------- test/cases/type_info.zig | 25 +++++++++++++-- 5 files changed, 78 insertions(+), 39 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 0f2fdf15de..590c946f7e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5931,8 +5931,8 @@ size_t type_id_len() { return array_length(all_type_ids); } -size_t type_id_index(TypeTableEntryId id) { - switch (id) { +size_t type_id_index(TypeTableEntry *entry) { + switch (entry->id) { case TypeTableEntryIdInvalid: zig_unreachable(); case TypeTableEntryIdMetaType: @@ -5952,6 +5952,8 @@ size_t type_id_index(TypeTableEntryId id) { case TypeTableEntryIdArray: return 7; case TypeTableEntryIdStruct: + if (entry->data.structure.is_slice) + return 25; return 8; case TypeTableEntryIdNumLitFloat: return 9; diff --git a/src/analyze.hpp b/src/analyze.hpp index aca78f4e25..56ca21a93f 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -174,7 +174,7 @@ void update_compile_var(CodeGen *g, Buf *name, ConstExprValue *value); const char *type_id_name(TypeTableEntryId id); TypeTableEntryId type_id_at_index(size_t index); size_t type_id_len(); -size_t type_id_index(TypeTableEntryId id); +size_t type_id_index(TypeTableEntry *entry); TypeTableEntry *get_generic_fn_type(CodeGen *g, FnTypeId *fn_type_id); bool type_is_copyable(CodeGen *g, TypeTableEntry *type_entry); LinkLib *create_link_lib(Buf *name); diff --git a/src/codegen.cpp b/src/codegen.cpp index db69708e9a..4e58f86d4b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6345,6 +6345,7 @@ static void define_builtin_compile_vars(CodeGen *g) { const TypeTableEntryId id = type_id_at_index(i); buf_appendf(contents, " %s,\n", type_id_name(id)); } + buf_appendf(contents, " Slice,\n"); buf_appendf(contents, "};\n\n"); } { @@ -6357,6 +6358,7 @@ static void define_builtin_compile_vars(CodeGen *g) { " Int: Int,\n" " Float: Float,\n" " Pointer: Pointer,\n" + " Slice: Slice,\n" " Array: Array,\n" " Struct: Struct,\n" " FloatLiteral: void,\n" @@ -6392,6 +6394,8 @@ static void define_builtin_compile_vars(CodeGen *g) { " child: type,\n" " };\n" "\n" + " pub const Slice = Pointer;\n" + "\n" " pub const Array = struct {\n" " len: usize,\n" " child: type,\n" diff --git a/src/ir.cpp b/src/ir.cpp index cdf56f7fee..035e27707a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -15785,11 +15785,10 @@ static TypeTableEntry *ir_type_info_get_type(IrAnalyze *ira, const char *type_na Buf field_name = BUF_INIT; buf_init_from_str(&field_name, type_name); - auto entry = type_info_scope->decl_table.maybe_get(&field_name); + auto entry = type_info_scope->decl_table.get(&field_name); buf_deinit(&field_name); - assert(entry != nullptr); - TldVar *tld = (TldVar *)entry->value; + TldVar *tld = (TldVar *)entry; assert(tld->base.id == TldIdVar); VariableTableEntry *var = tld->var; @@ -16071,6 +16070,38 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t enum_field_val->data.x_struct.fields = inner_fields; }; + const auto create_ptr_like_type_info = [ira](const char *name, TypeTableEntry *ptr_type_entry) { + ConstExprValue *result = create_const_vals(1); + result->special = ConstValSpecialStatic; + result->type = ir_type_info_get_type(ira, name); + + ConstExprValue *fields = create_const_vals(4); + result->data.x_struct.fields = fields; + + // is_const: bool + ensure_field_index(result->type, "is_const", 0); + fields[0].special = ConstValSpecialStatic; + fields[0].type = ira->codegen->builtin_types.entry_bool; + fields[0].data.x_bool = ptr_type_entry->data.pointer.is_const; + // is_volatile: bool + ensure_field_index(result->type, "is_volatile", 1); + fields[1].special = ConstValSpecialStatic; + fields[1].type = ira->codegen->builtin_types.entry_bool; + fields[1].data.x_bool = ptr_type_entry->data.pointer.is_volatile; + // alignment: u32 + ensure_field_index(result->type, "alignment", 2); + fields[2].special = ConstValSpecialStatic; + fields[2].type = ira->codegen->builtin_types.entry_u32; + bigint_init_unsigned(&fields[2].data.x_bigint, ptr_type_entry->data.pointer.alignment); + // child: type + ensure_field_index(result->type, "child", 3); + fields[3].special = ConstValSpecialStatic; + fields[3].type = ira->codegen->builtin_types.entry_type; + fields[3].data.x_type = ptr_type_entry->data.pointer.child_type; + + return result; + }; + ConstExprValue *result = nullptr; switch (type_entry->id) { @@ -16139,34 +16170,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t } case TypeTableEntryIdPointer: { - result = create_const_vals(1); - result->special = ConstValSpecialStatic; - result->type = ir_type_info_get_type(ira, "Pointer"); - - ConstExprValue *fields = create_const_vals(4); - result->data.x_struct.fields = fields; - - // is_const: bool - ensure_field_index(result->type, "is_const", 0); - fields[0].special = ConstValSpecialStatic; - fields[0].type = ira->codegen->builtin_types.entry_bool; - fields[0].data.x_bool = type_entry->data.pointer.is_const; - // is_volatile: bool - ensure_field_index(result->type, "is_volatile", 1); - fields[1].special = ConstValSpecialStatic; - fields[1].type = ira->codegen->builtin_types.entry_bool; - fields[1].data.x_bool = type_entry->data.pointer.is_volatile; - // alignment: u32 - ensure_field_index(result->type, "alignment", 2); - fields[2].special = ConstValSpecialStatic; - fields[2].type = ira->codegen->builtin_types.entry_u32; - bigint_init_unsigned(&fields[2].data.x_bigint, type_entry->data.pointer.alignment); - // child: type - ensure_field_index(result->type, "child", 3); - fields[3].special = ConstValSpecialStatic; - fields[3].type = ira->codegen->builtin_types.entry_type; - fields[3].data.x_type = type_entry->data.pointer.child_type; - + result = create_ptr_like_type_info("Pointer", type_entry); break; } case TypeTableEntryIdArray: @@ -16436,6 +16440,16 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t } case TypeTableEntryIdStruct: { + if (type_entry->data.structure.is_slice) { + Buf ptr_field_name = BUF_INIT; + buf_init_from_str(&ptr_field_name, "ptr"); + TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry; + ensure_complete_type(ira->codegen, ptr_type); + + result = create_ptr_like_type_info("Slice", ptr_type); + break; + } + result = create_const_vals(1); result->special = ConstValSpecialStatic; result->type = ir_type_info_get_type(ira, "Struct"); @@ -16622,7 +16636,7 @@ static TypeTableEntry *ir_analyze_instruction_type_info(IrAnalyze *ira, ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->type = result_type; - bigint_init_unsigned(&out_val->data.x_union.tag, type_id_index(type_entry->id)); + bigint_init_unsigned(&out_val->data.x_union.tag, type_id_index(type_entry)); ConstExprValue *payload = ir_make_type_info_value(ira, type_entry); out_val->data.x_union.payload = payload; @@ -16650,7 +16664,7 @@ static TypeTableEntry *ir_analyze_instruction_type_id(IrAnalyze *ira, TypeTableEntry *result_type = var_value->data.x_type; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry->id)); + bigint_init_unsigned(&out_val->data.x_enum_tag, type_id_index(type_entry)); return result_type; } diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig index c9b15157e8..f10703e3ee 100644 --- a/test/cases/type_info.zig +++ b/test/cases/type_info.zig @@ -25,7 +25,7 @@ test "type info: integer, floating point type info" { } } -test "type info: pointer, array and nullable type info" { +test "type info: pointer type info" { comptime { const u32_ptr_info = @typeInfo(&u32); assert(TypeId(u32_ptr_info) == TypeId.Pointer); @@ -33,12 +33,31 @@ test "type info: pointer, array and nullable type info" { assert(u32_ptr_info.Pointer.is_volatile == false); assert(u32_ptr_info.Pointer.alignment == 4); assert(u32_ptr_info.Pointer.child == u32); + } +} + +test "type info: slice type info" { + comptime { + const u32_slice_info = @typeInfo([]u32); + assert(TypeId(u32_slice_info) == TypeId.Slice); + assert(u32_slice_info.Slice.is_const == false); + assert(u32_slice_info.Slice.is_volatile == false); + assert(u32_slice_info.Slice.alignment == 4); + assert(u32_slice_info.Slice.child == u32); + } +} +test "type info: array type info" { + comptime { const arr_info = @typeInfo([42]bool); assert(TypeId(arr_info) == TypeId.Array); assert(arr_info.Array.len == 42); assert(arr_info.Array.child == bool); + } +} +test "type info: nullable type info" { + comptime { const null_info = @typeInfo(?void); assert(TypeId(null_info) == TypeId.Nullable); assert(null_info.Nullable.child == void); @@ -100,11 +119,11 @@ test "type info: union info" { assert(TypeId(typeinfo_info) == TypeId.Union); assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto); assert(typeinfo_info.Union.tag_type == TypeId); - assert(typeinfo_info.Union.fields.len == 25); + assert(typeinfo_info.Union.fields.len == 26); assert(typeinfo_info.Union.fields[4].enum_field != null); assert((??typeinfo_info.Union.fields[4].enum_field).value == 4); assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); - assert(typeinfo_info.Union.defs.len == 20); + assert(typeinfo_info.Union.defs.len == 21); const TestNoTagUnion = union { Foo: void, -- cgit v1.2.3 From 43085417bec447ab31f3454e180213f102885cc8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 24 May 2018 21:27:44 -0400 Subject: update github.com/zig-lang to github.com/ziglang --- README.md | 4 ++-- doc/langref.html.in | 8 ++++---- example/hello_world/hello_libc.zig | 2 +- src/analyze.cpp | 4 ++-- src/codegen.cpp | 6 +++--- src/ir.cpp | 8 ++++---- std/atomic/queue.zig | 2 +- std/event.zig | 8 ++++---- std/fmt/index.zig | 2 +- std/mem.zig | 2 +- std/os/index.zig | 6 +++--- std/os/test.zig | 2 +- std/os/time.zig | 2 +- std/special/compiler_rt/comparetf2.zig | 4 ++-- std/zig/ast.zig | 4 ++-- test/build_examples.zig | 2 +- test/cases/coroutines.zig | 2 +- test/cases/misc.zig | 2 +- test/compare_output.zig | 4 ++-- 19 files changed, 37 insertions(+), 37 deletions(-) (limited to 'src/analyze.cpp') diff --git a/README.md b/README.md index cf4d8179c7..b5bf13f095 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ libc. Create demo games using Zig. ## Building -[![Build Status](https://travis-ci.org/zig-lang/zig.svg?branch=master)](https://travis-ci.org/zig-lang/zig) +[![Build Status](https://travis-ci.org/ziglang/zig.svg?branch=master)](https://travis-ci.org/ziglang/zig) [![Build status](https://ci.appveyor.com/api/projects/status/4t80mk2dmucrc38i/branch/master?svg=true)](https://ci.appveyor.com/project/andrewrk/zig-d3l86/branch/master) ### Stage 1: Build Zig from C++ Source Code @@ -161,7 +161,7 @@ bin/zig build --build-file ../build.zig test ##### Windows -See https://github.com/zig-lang/zig/wiki/Building-Zig-on-Windows +See https://github.com/ziglang/zig/wiki/Building-Zig-on-Windows ### Stage 2: Build Self-Hosted Zig from Zig Source Code diff --git a/doc/langref.html.in b/doc/langref.html.in index c3c50b117b..d63c38d0fe 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -96,7 +96,7 @@

If you search for something specific in this documentation and do not find it, - please file an issue or say something on IRC. + please file an issue or say something on IRC.

The code samples in this document are compiled and tested as part of the main test suite of Zig. @@ -2827,7 +2827,7 @@ test "fn reflection" {

The number of unique error values across the entire compilation should determine the size of the error set type. - However right now it is hard coded to be a u16. See #768. + However right now it is hard coded to be a u16. See #768.

You can implicitly cast an error from a subset to its superset: @@ -5958,7 +5958,7 @@ pub fn main() void { {#code_begin|exe#} {#link_libc#} const c = @cImport({ - // See https://github.com/zig-lang/zig/issues/515 + // See https://github.com/ziglang/zig/issues/515 @cDefine("_NO_CRT_STDIO_INLINE", "1"); @cInclude("stdio.h"); }); @@ -6301,7 +6301,7 @@ fn readU32Be() u32 {}

  • Non-Ascii Unicode line endings: U+0085 (NEL), U+2028 (LS), U+2029 (PS).
  • The codepoint U+000a (LF) (which is encoded as the single-byte value 0x0a) is the line terminator character. This character always terminates a line of zig source code (except possbly the last line of the file).

    -

    For some discussion on the rationale behind these design decisions, see issue #663

    +

    For some discussion on the rationale behind these design decisions, see issue #663

    {#header_close#} {#header_open|Grammar#}
    Root = many(TopLevelItem) EOF
    diff --git a/example/hello_world/hello_libc.zig b/example/hello_world/hello_libc.zig
    index 60123c6fd8..4a35e47b15 100644
    --- a/example/hello_world/hello_libc.zig
    +++ b/example/hello_world/hello_libc.zig
    @@ -1,5 +1,5 @@
     const c = @cImport({
    -    // See https://github.com/zig-lang/zig/issues/515
    +    // See https://github.com/ziglang/zig/issues/515
         @cDefine("_NO_CRT_STDIO_INLINE", "1");
         @cInclude("stdio.h");
         @cInclude("string.h");
    diff --git a/src/analyze.cpp b/src/analyze.cpp
    index d6137a4286..c59fde8ef6 100644
    --- a/src/analyze.cpp
    +++ b/src/analyze.cpp
    @@ -1007,7 +1007,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) {
         if (fn_type_id->return_type != nullptr) {
             ensure_complete_type(g, fn_type_id->return_type);
         } else {
    -        zig_panic("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447");
    +        zig_panic("TODO implement inferred return types https://github.com/ziglang/zig/issues/447");
         }
     
         TypeTableEntry *fn_type = new_type_table_entry(TypeTableEntryIdFn);
    @@ -1556,7 +1556,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c
                 return g->builtin_types.entry_invalid;
             }
             add_node_error(g, proto_node,
    -            buf_sprintf("TODO implement inferred return types https://github.com/zig-lang/zig/issues/447"));
    +            buf_sprintf("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"));
             return g->builtin_types.entry_invalid;
             //return get_generic_fn_type(g, &fn_type_id);
         }
    diff --git a/src/codegen.cpp b/src/codegen.cpp
    index f1e102392a..69542b3e67 100644
    --- a/src/codegen.cpp
    +++ b/src/codegen.cpp
    @@ -582,7 +582,7 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) {
                 addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "nonnull");
             }
             // Note: byval is disabled on windows due to an LLVM bug:
    -        // https://github.com/zig-lang/zig/issues/536
    +        // https://github.com/ziglang/zig/issues/536
             if (is_byval && g->zig_target.os != OsWindows) {
                 addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)gen_index, "byval");
             }
    @@ -3067,7 +3067,7 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr
         for (size_t param_i = 0; param_i < fn_type_id->param_count; param_i += 1) {
             FnGenParamInfo *gen_info = &fn_type->data.fn.gen_param_info[param_i];
             // Note: byval is disabled on windows due to an LLVM bug:
    -        // https://github.com/zig-lang/zig/issues/536
    +        // https://github.com/ziglang/zig/issues/536
             if (gen_info->is_byval && g->zig_target.os != OsWindows) {
                 addLLVMCallsiteAttr(result, (unsigned)gen_info->gen_index, "byval");
             }
    @@ -6730,7 +6730,7 @@ static void init(CodeGen *g) {
         const char *target_specific_features;
         if (g->is_native_target) {
             // LLVM creates invalid binaries on Windows sometimes.
    -        // See https://github.com/zig-lang/zig/issues/508
    +        // See https://github.com/ziglang/zig/issues/508
             // As a workaround we do not use target native features on Windows.
             if (g->zig_target.os == OsWindows) {
                 target_specific_cpu_args = "";
    diff --git a/src/ir.cpp b/src/ir.cpp
    index e2cbba48a7..440063d58d 100644
    --- a/src/ir.cpp
    +++ b/src/ir.cpp
    @@ -12130,7 +12130,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod
                 casted_arg->value.type->id == TypeTableEntryIdNumLitFloat)
         {
             ir_add_error(ira, casted_arg,
    -            buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/zig-lang/zig/issues/557"));
    +            buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557"));
             return false;
         }
     
    @@ -12331,7 +12331,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
     
             if (fn_proto_node->data.fn_proto.is_var_args) {
                 ir_add_error(ira, &call_instruction->base,
    -                    buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/zig-lang/zig/issues/313"));
    +                    buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/ziglang/zig/issues/313"));
                 return ira->codegen->builtin_types.entry_invalid;
             }
     
    @@ -12424,7 +12424,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
             }
             if (call_instruction->is_async && fn_type_id->is_var_args) {
                 ir_add_error(ira, call_instruction->fn_ref,
    -                buf_sprintf("compiler bug: TODO: implement var args async functions. https://github.com/zig-lang/zig/issues/557"));
    +                buf_sprintf("compiler bug: TODO: implement var args async functions. https://github.com/ziglang/zig/issues/557"));
                 return ira->codegen->builtin_types.entry_invalid;
             }
     
    @@ -12507,7 +12507,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal
                         VariableTableEntry *arg_var = get_fn_var_by_index(parent_fn_entry, arg_tuple_i);
                         if (arg_var == nullptr) {
                             ir_add_error(ira, arg,
    -                            buf_sprintf("compiler bug: var args can't handle void. https://github.com/zig-lang/zig/issues/557"));
    +                            buf_sprintf("compiler bug: var args can't handle void. https://github.com/ziglang/zig/issues/557"));
                             return ira->codegen->builtin_types.entry_invalid;
                         }
                         IrInstruction *arg_var_ptr_inst = ir_get_var_ptr(ira, arg, arg_var, true, false);
    diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig
    index 288a2b3b48..35180da8d1 100644
    --- a/std/atomic/queue.zig
    +++ b/std/atomic/queue.zig
    @@ -16,7 +16,7 @@ pub fn Queue(comptime T: type) type {
                 data: T,
             };
     
    -        // TODO: well defined copy elision: https://github.com/zig-lang/zig/issues/287
    +        // TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287
             pub fn init(self: &Self) void {
                 self.root.next = null;
                 self.head = &self.root;
    diff --git a/std/event.zig b/std/event.zig
    index b2e7e3ae38..558bd2a188 100644
    --- a/std/event.zig
    +++ b/std/event.zig
    @@ -148,7 +148,7 @@ pub const Loop = struct {
     };
     
     pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File {
    -    var address = _address.*; // TODO https://github.com/zig-lang/zig/issues/733
    +    var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733
     
         const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp);
         errdefer std.os.close(sockfd);
    @@ -172,7 +172,7 @@ test "listen on a port, send bytes, receive bytes" {
     
             async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address, _socket: &const std.os.File) void {
                 const self = @fieldParentPtr(Self, "tcp_server", tcp_server);
    -            var socket = _socket.*; // TODO https://github.com/zig-lang/zig/issues/733
    +            var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
                 defer socket.close();
                 const next_handler = async errorableHandler(self, _addr, socket) catch |err| switch (err) {
                     error.OutOfMemory => @panic("unable to handle connection: out of memory"),
    @@ -186,8 +186,8 @@ test "listen on a port, send bytes, receive bytes" {
             }
     
             async fn errorableHandler(self: &Self, _addr: &const std.net.Address, _socket: &const std.os.File) !void {
    -            const addr = _addr.*; // TODO https://github.com/zig-lang/zig/issues/733
    -            var socket = _socket.*; // TODO https://github.com/zig-lang/zig/issues/733
    +            const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733
    +            var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733
     
                 var adapter = std.io.FileOutStream.init(&socket);
                 var stream = &adapter.stream;
    diff --git a/std/fmt/index.zig b/std/fmt/index.zig
    index 0af772b7dc..624751822a 100644
    --- a/std/fmt/index.zig
    +++ b/std/fmt/index.zig
    @@ -824,7 +824,7 @@ test "fmt.format" {
         try testFmt("file size: 63MiB\n", "file size: {Bi}\n", usize(63 * 1024 * 1024));
         try testFmt("file size: 66.06MB\n", "file size: {B2}\n", usize(63 * 1024 * 1024));
         {
    -        // Dummy field because of https://github.com/zig-lang/zig/issues/557.
    +        // Dummy field because of https://github.com/ziglang/zig/issues/557.
             const Struct = struct {
                 unused: u8,
             };
    diff --git a/std/mem.zig b/std/mem.zig
    index 3ca87b35d3..617c1de2f5 100644
    --- a/std/mem.zig
    +++ b/std/mem.zig
    @@ -702,7 +702,7 @@ test "std.mem.rotate" {
         }));
     }
     
    -// TODO: When https://github.com/zig-lang/zig/issues/649 is solved these can be done by
    +// TODO: When https://github.com/ziglang/zig/issues/649 is solved these can be done by
     // endian-casting the pointer and then dereferencing
     
     pub fn endianSwapIfLe(comptime T: type, x: T) T {
    diff --git a/std/os/index.zig b/std/os/index.zig
    index 7d19cd82c6..01e2092e1c 100644
    --- a/std/os/index.zig
    +++ b/std/os/index.zig
    @@ -239,7 +239,7 @@ pub fn close(handle: FileHandle) void {
     /// Calls POSIX read, and keeps trying if it gets interrupted.
     pub fn posixRead(fd: i32, buf: []u8) !void {
         // Linux can return EINVAL when read amount is > 0x7ffff000
    -    // See https://github.com/zig-lang/zig/pull/743#issuecomment-363158274
    +    // See https://github.com/ziglang/zig/pull/743#issuecomment-363158274
         const max_buf_len = 0x7ffff000;
     
         var index: usize = 0;
    @@ -281,7 +281,7 @@ pub const PosixWriteError = error{
     /// Calls POSIX write, and keeps trying if it gets interrupted.
     pub fn posixWrite(fd: i32, bytes: []const u8) !void {
         // Linux can return EINVAL when write amount is > 0x7ffff000
    -    // See https://github.com/zig-lang/zig/pull/743#issuecomment-363165856
    +    // See https://github.com/ziglang/zig/pull/743#issuecomment-363165856
         const max_bytes_len = 0x7ffff000;
     
         var index: usize = 0;
    @@ -2513,7 +2513,7 @@ pub const SpawnThreadError = error{
     /// caller must call wait on the returned thread
     pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread {
         // TODO compile-time call graph analysis to determine stack upper bound
    -    // https://github.com/zig-lang/zig/issues/157
    +    // https://github.com/ziglang/zig/issues/157
         const default_stack_size = 8 * 1024 * 1024;
     
         const Context = @typeOf(context);
    diff --git a/std/os/test.zig b/std/os/test.zig
    index 56d6e8b309..4dfe76224a 100644
    --- a/std/os/test.zig
    +++ b/std/os/test.zig
    @@ -12,7 +12,7 @@ const AtomicOrder = builtin.AtomicOrder;
     test "makePath, put some files in it, deleteTree" {
         if (builtin.os == builtin.Os.windows) {
             // TODO implement os.Dir for windows
    -        // https://github.com/zig-lang/zig/issues/709
    +        // https://github.com/ziglang/zig/issues/709
             return;
         }
         try os.makePath(a, "os_test_tmp/b/c");
    diff --git a/std/os/time.zig b/std/os/time.zig
    index 4fd2c4e924..3af150ab6a 100644
    --- a/std/os/time.zig
    +++ b/std/os/time.zig
    @@ -135,7 +135,7 @@ pub const Timer = struct {
         
         //At some point we may change our minds on RAW, but for now we're
         //  sticking with posix standard MONOTONIC. For more information, see: 
    -    //  https://github.com/zig-lang/zig/pull/933
    +    //  https://github.com/ziglang/zig/pull/933
         //
         //const monotonic_clock_id = switch(builtin.os) {
         //    Os.linux => linux.CLOCK_MONOTONIC_RAW,
    diff --git a/std/special/compiler_rt/comparetf2.zig b/std/special/compiler_rt/comparetf2.zig
    index c189e5803b..760c3689c0 100644
    --- a/std/special/compiler_rt/comparetf2.zig
    +++ b/std/special/compiler_rt/comparetf2.zig
    @@ -1,4 +1,4 @@
    -// TODO https://github.com/zig-lang/zig/issues/305
    +// TODO https://github.com/ziglang/zig/issues/305
     // and then make the return types of some of these functions the enum instead of c_int
     const LE_LESS = c_int(-1);
     const LE_EQUAL = c_int(0);
    @@ -59,7 +59,7 @@ pub extern fn __letf2(a: f128, b: f128) c_int {
         ;
     }
     
    -// TODO https://github.com/zig-lang/zig/issues/305
    +// TODO https://github.com/ziglang/zig/issues/305
     // and then make the return types of some of these functions the enum instead of c_int
     const GE_LESS = c_int(-1);
     const GE_EQUAL = c_int(0);
    diff --git a/std/zig/ast.zig b/std/zig/ast.zig
    index c1552b0220..1f15046a79 100644
    --- a/std/zig/ast.zig
    +++ b/std/zig/ast.zig
    @@ -113,7 +113,7 @@ pub const Error = union(enum) {
     
         pub fn render(self: &Error, tokens: &Tree.TokenList, stream: var) !void {
             switch (self.*) {
    -            // TODO https://github.com/zig-lang/zig/issues/683
    +            // TODO https://github.com/ziglang/zig/issues/683
                 @TagType(Error).InvalidToken => |*x| return x.render(tokens, stream),
                 @TagType(Error).ExpectedVarDeclOrFn => |*x| return x.render(tokens, stream),
                 @TagType(Error).ExpectedAggregateKw => |*x| return x.render(tokens, stream),
    @@ -137,7 +137,7 @@ pub const Error = union(enum) {
     
         pub fn loc(self: &Error) TokenIndex {
             switch (self.*) {
    -            // TODO https://github.com/zig-lang/zig/issues/683
    +            // TODO https://github.com/ziglang/zig/issues/683
                 @TagType(Error).InvalidToken => |x| return x.token,
                 @TagType(Error).ExpectedVarDeclOrFn => |x| return x.token,
                 @TagType(Error).ExpectedAggregateKw => |x| return x.token,
    diff --git a/test/build_examples.zig b/test/build_examples.zig
    index a3b44b9136..7a4c0f35d9 100644
    --- a/test/build_examples.zig
    +++ b/test/build_examples.zig
    @@ -9,7 +9,7 @@ pub fn addCases(cases: &tests.BuildExamplesContext) void {
         cases.add("example/guess_number/main.zig");
         if (!is_windows) {
             // TODO get this test passing on windows
    -        // See https://github.com/zig-lang/zig/issues/538
    +        // See https://github.com/ziglang/zig/issues/538
             cases.addBuildFile("example/shared_library/build.zig");
             cases.addBuildFile("example/mix_o_files/build.zig");
         }
    diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig
    index 4aa97861ac..e983947a4c 100644
    --- a/test/cases/coroutines.zig
    +++ b/test/cases/coroutines.zig
    @@ -204,7 +204,7 @@ test "error return trace across suspend points - async return" {
         cancel p2;
     }
     
    -// TODO https://github.com/zig-lang/zig/issues/760
    +// TODO https://github.com/ziglang/zig/issues/760
     fn nonFailing() (promise->error!void) {
         return async suspendThenFail() catch unreachable;
     }
    diff --git a/test/cases/misc.zig b/test/cases/misc.zig
    index 66487a4946..deeeca8c3a 100644
    --- a/test/cases/misc.zig
    +++ b/test/cases/misc.zig
    @@ -543,7 +543,7 @@ test "@typeName" {
         comptime {
             assert(mem.eql(u8, @typeName(i64), "i64"));
             assert(mem.eql(u8, @typeName(&usize), "&usize"));
    -        // https://github.com/zig-lang/zig/issues/675
    +        // https://github.com/ziglang/zig/issues/675
             assert(mem.eql(u8, @typeName(TypeFromFn(u8)), "TypeFromFn(u8)"));
             assert(mem.eql(u8, @typeName(Struct), "Struct"));
             assert(mem.eql(u8, @typeName(Union), "Union"));
    diff --git a/test/compare_output.zig b/test/compare_output.zig
    index b01e87d4eb..905ffd37a9 100644
    --- a/test/compare_output.zig
    +++ b/test/compare_output.zig
    @@ -131,7 +131,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
             \\const is_windows = builtin.os == builtin.Os.windows;
             \\const c = @cImport({
             \\    if (is_windows) {
    -        \\        // See https://github.com/zig-lang/zig/issues/515
    +        \\        // See https://github.com/ziglang/zig/issues/515
             \\        @cDefine("_NO_CRT_STDIO_INLINE", "1");
             \\        @cInclude("io.h");
             \\        @cInclude("fcntl.h");
    @@ -316,7 +316,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void {
             \\const is_windows = builtin.os == builtin.Os.windows;
             \\const c = @cImport({
             \\    if (is_windows) {
    -        \\        // See https://github.com/zig-lang/zig/issues/515
    +        \\        // See https://github.com/ziglang/zig/issues/515
             \\        @cDefine("_NO_CRT_STDIO_INLINE", "1");
             \\        @cInclude("io.h");
             \\        @cInclude("fcntl.h");
    -- 
    cgit v1.2.3
    
    
    From 1b3aaacba260e4c8d89ac98ab856ff9b3c77dac4 Mon Sep 17 00:00:00 2001
    From: Jimmi HC 
    Date: Wed, 30 May 2018 10:34:20 +0200
    Subject: Removed copy-pasted resolve_inferred_error_set both ir.cpp and
     analyze.cpp have a function resolve_inferred_error_set, which is a nearly
     exact copy-paste. This commit removes the one in ir.cpp and exposes then one
     in analyze.cpp. This also allows us to make analyze_fn_body local to
     analyze.cpp, as it is not used anywhere in ir.cpp after this change
    
    ---
     src/analyze.cpp |  7 ++++---
     src/analyze.hpp |  2 +-
     src/ir.cpp      | 62 +++++++++++++++++++--------------------------------------
     3 files changed, 25 insertions(+), 46 deletions(-)
    
    (limited to 'src/analyze.cpp')
    
    diff --git a/src/analyze.cpp b/src/analyze.cpp
    index c59fde8ef6..b00e18a9a1 100644
    --- a/src/analyze.cpp
    +++ b/src/analyze.cpp
    @@ -25,6 +25,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type);
     static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type);
     static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type);
     static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type);
    +static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
     
     ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg) {
         if (node->owner->c_import_node != nullptr) {
    @@ -3880,7 +3881,7 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr
         }
     }
     
    -static bool analyze_resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) {
    +bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node) {
         FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
         if (infer_fn != nullptr) {
             if (infer_fn->anal_state == FnAnalStateInvalid) {
    @@ -3932,7 +3933,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
                 }
     
                 if (inferred_err_set_type->data.error_set.infer_fn != nullptr) {
    -                if (!analyze_resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
    +                if (!resolve_inferred_error_set(g, inferred_err_set_type, return_type_node)) {
                         fn_table_entry->anal_state = FnAnalStateInvalid;
                         return;
                     }
    @@ -3962,7 +3963,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ
         fn_table_entry->anal_state = FnAnalStateComplete;
     }
     
    -void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
    +static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) {
         assert(fn_table_entry->anal_state != FnAnalStateProbing);
         if (fn_table_entry->anal_state != FnAnalStateReady)
             return;
    diff --git a/src/analyze.hpp b/src/analyze.hpp
    index 56ca21a93f..d538f042ce 100644
    --- a/src/analyze.hpp
    +++ b/src/analyze.hpp
    @@ -191,7 +191,7 @@ void add_fn_export(CodeGen *g, FnTableEntry *fn_table_entry, Buf *symbol_name, G
     
     ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name);
     TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g);
    -void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry);
    +bool resolve_inferred_error_set(CodeGen *g, TypeTableEntry *err_set_type, AstNode *source_node);
     
     TypeTableEntry *get_auto_err_set_type(CodeGen *g, FnTableEntry *fn_entry);
     
    diff --git a/src/ir.cpp b/src/ir.cpp
    index 5d182fe9b0..8d32a81e25 100644
    --- a/src/ir.cpp
    +++ b/src/ir.cpp
    @@ -7633,38 +7633,16 @@ static bool slice_is_const(TypeTableEntry *type) {
         return type->data.structure.fields[slice_ptr_index].type_entry->data.pointer.is_const;
     }
     
    -static bool resolve_inferred_error_set(IrAnalyze *ira, TypeTableEntry *err_set_type, AstNode *source_node) {
    -    assert(err_set_type->id == TypeTableEntryIdErrorSet);
    -    FnTableEntry *infer_fn = err_set_type->data.error_set.infer_fn;
    -    if (infer_fn != nullptr) {
    -        if (infer_fn->anal_state == FnAnalStateInvalid) {
    -            return false;
    -        } else if (infer_fn->anal_state == FnAnalStateReady) {
    -            analyze_fn_body(ira->codegen, infer_fn);
    -            if (err_set_type->data.error_set.infer_fn != nullptr) {
    -                assert(ira->codegen->errors.length != 0);
    -                return false;
    -            }
    -        } else {
    -            ir_add_error_node(ira, source_node,
    -                buf_sprintf("cannot resolve inferred error set '%s': function '%s' not fully analyzed yet",
    -                    buf_ptr(&err_set_type->name), buf_ptr(&err_set_type->data.error_set.infer_fn->symbol_name)));
    -            return false;
    -        }
    -    }
    -    return true;
    -}
    -
     static TypeTableEntry *get_error_set_intersection(IrAnalyze *ira, TypeTableEntry *set1, TypeTableEntry *set2,
             AstNode *source_node)
     {
         assert(set1->id == TypeTableEntryIdErrorSet);
         assert(set2->id == TypeTableEntryIdErrorSet);
     
    -    if (!resolve_inferred_error_set(ira, set1, source_node)) {
    +    if (!resolve_inferred_error_set(ira->codegen, set1, source_node)) {
             return ira->codegen->builtin_types.entry_invalid;
         }
    -    if (!resolve_inferred_error_set(ira, set2, source_node)) {
    +    if (!resolve_inferred_error_set(ira->codegen, set2, source_node)) {
             return ira->codegen->builtin_types.entry_invalid;
         }
         if (type_is_global_error_set(set1)) {
    @@ -7803,7 +7781,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry
                 return result;
             }
     
    -        if (!resolve_inferred_error_set(ira, contained_set, source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, contained_set, source_node)) {
                 result.id = ConstCastResultIdUnresolvedInferredErrSet;
                 return result;
             }
    @@ -8192,7 +8170,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
                 err_set_type = ira->codegen->builtin_types.entry_global_error_set;
             } else {
                 err_set_type = prev_inst->value.type;
    -            if (!resolve_inferred_error_set(ira, err_set_type, prev_inst->source_node)) {
    +            if (!resolve_inferred_error_set(ira->codegen, err_set_type, prev_inst->source_node)) {
                     return ira->codegen->builtin_types.entry_invalid;
                 }
                 update_errors_helper(ira->codegen, &errors, &errors_count);
    @@ -8231,7 +8209,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
                     if (type_is_global_error_set(err_set_type)) {
                         continue;
                     }
    -                if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
                     if (type_is_global_error_set(cur_type)) {
    @@ -8297,7 +8275,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
                         continue;
                     }
                     TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
    -                if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
                     if (type_is_global_error_set(cur_err_set_type)) {
    @@ -8360,7 +8338,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
                 if (err_set_type != nullptr && type_is_global_error_set(err_set_type)) {
                     continue;
                 }
    -            if (!resolve_inferred_error_set(ira, cur_type, cur_inst->source_node)) {
    +            if (!resolve_inferred_error_set(ira->codegen, cur_type, cur_inst->source_node)) {
                     return ira->codegen->builtin_types.entry_invalid;
                 }
     
    @@ -8417,11 +8395,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
                     TypeTableEntry *prev_err_set_type = (err_set_type == nullptr) ? prev_type->data.error_union.err_set_type : err_set_type;
                     TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
     
    -                if (!resolve_inferred_error_set(ira, prev_err_set_type, cur_inst->source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, prev_err_set_type, cur_inst->source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
     
    -                if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
     
    @@ -8531,7 +8509,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod
             {
                 if (err_set_type != nullptr) {
                     TypeTableEntry *cur_err_set_type = cur_type->data.error_union.err_set_type;
    -                if (!resolve_inferred_error_set(ira, cur_err_set_type, cur_inst->source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, cur_err_set_type, cur_inst->source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
                     if (type_is_global_error_set(cur_err_set_type) || type_is_global_error_set(err_set_type)) {
    @@ -9213,7 +9191,7 @@ static IrInstruction *ir_analyze_err_set_cast(IrAnalyze *ira, IrInstruction *sou
             if (!val)
                 return ira->codegen->invalid_instruction;
     
    -        if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
                 return ira->codegen->invalid_instruction;
             }
             if (!type_is_global_error_set(wanted_type)) {
    @@ -9654,7 +9632,7 @@ static IrInstruction *ir_analyze_int_to_err(IrAnalyze *ira, IrInstruction *sourc
             IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope,
                     source_instr->source_node, wanted_type);
     
    -        if (!resolve_inferred_error_set(ira, wanted_type, source_instr->source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, wanted_type, source_instr->source_node)) {
                 return ira->codegen->invalid_instruction;
             }
     
    @@ -9752,7 +9730,7 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc
             zig_unreachable();
         }
         if (!type_is_global_error_set(err_set_type)) {
    -        if (!resolve_inferred_error_set(ira, err_set_type, source_instr->source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, err_set_type, source_instr->source_node)) {
                 return ira->codegen->invalid_instruction;
             }
             if (err_set_type->data.error_set.err_count == 0) {
    @@ -10647,7 +10625,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp
                 return ira->codegen->builtin_types.entry_invalid;
             }
     
    -        if (!resolve_inferred_error_set(ira, intersect_type, source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, intersect_type, source_node)) {
                 return ira->codegen->builtin_types.entry_invalid;
             }
     
    @@ -11503,11 +11481,11 @@ static TypeTableEntry *ir_analyze_merge_error_sets(IrAnalyze *ira, IrInstruction
             return ira->codegen->builtin_types.entry_type;
         }
     
    -    if (!resolve_inferred_error_set(ira, op1_type, instruction->op1->other->source_node)) {
    +    if (!resolve_inferred_error_set(ira->codegen, op1_type, instruction->op1->other->source_node)) {
             return ira->codegen->builtin_types.entry_invalid;
         }
     
    -    if (!resolve_inferred_error_set(ira, op2_type, instruction->op2->other->source_node)) {
    +    if (!resolve_inferred_error_set(ira->codegen, op2_type, instruction->op2->other->source_node)) {
             return ira->codegen->builtin_types.entry_invalid;
         }
     
    @@ -13851,7 +13829,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru
                     }
                     err_set_type = err_entry->set_with_only_this_in_it;
                 } else {
    -                if (!resolve_inferred_error_set(ira, child_type, field_ptr_instruction->base.source_node)) {
    +                if (!resolve_inferred_error_set(ira->codegen, child_type, field_ptr_instruction->base.source_node)) {
                         return ira->codegen->builtin_types.entry_invalid;
                     }
                     err_entry = find_err_table_entry(child_type, field_name);
    @@ -17559,7 +17537,7 @@ static TypeTableEntry *ir_analyze_instruction_member_count(IrAnalyze *ira, IrIns
         } else if (container_type->id == TypeTableEntryIdUnion) {
             result = container_type->data.unionation.src_field_count;
         } else if (container_type->id == TypeTableEntryIdErrorSet) {
    -        if (!resolve_inferred_error_set(ira, container_type, instruction->base.source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, container_type, instruction->base.source_node)) {
                 return ira->codegen->builtin_types.entry_invalid;
             }
             if (type_is_global_error_set(container_type)) {
    @@ -17863,7 +17841,7 @@ static TypeTableEntry *ir_analyze_instruction_test_err(IrAnalyze *ira, IrInstruc
             }
     
             TypeTableEntry *err_set_type = type_entry->data.error_union.err_set_type;
    -        if (!resolve_inferred_error_set(ira, err_set_type, instruction->base.source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, err_set_type, instruction->base.source_node)) {
                 return ira->codegen->builtin_types.entry_invalid;
             }
             if (!type_is_global_error_set(err_set_type) &&
    @@ -18131,7 +18109,7 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira
                 }
             }
         } else if (switch_type->id == TypeTableEntryIdErrorSet) {
    -        if (!resolve_inferred_error_set(ira, switch_type, target_value->source_node)) {
    +        if (!resolve_inferred_error_set(ira->codegen, switch_type, target_value->source_node)) {
                 return ira->codegen->builtin_types.entry_invalid;
             }
     
    -- 
    cgit v1.2.3
    
    
    From fcbb7426faac5e693ef195defe2d8d2a2eddadb1 Mon Sep 17 00:00:00 2001
    From: Andrew Kelley 
    Date: Thu, 31 May 2018 10:56:59 -0400
    Subject: use * for pointer type instead of &
    
    See #770
    
    To help automatically translate code, see the
    zig-fmt-pointer-reform-2 branch.
    
    This will convert all & into *. Due to the syntax
    ambiguity (which is why we are making this change),
    even address-of & will turn into *, so you'll have
    to manually fix thes instances. You will be guaranteed
    to get compile errors for them - expected 'type', found 'foo'
    ---
     build.zig                                      |  14 +-
     doc/docgen.zig                                 |  22 +-
     doc/langref.html.in                            | 212 ++++-----
     example/cat/main.zig                           |   2 +-
     example/hello_world/hello_libc.zig             |   2 +-
     example/mix_o_files/base64.zig                 |   2 +-
     example/mix_o_files/build.zig                  |   2 +-
     example/shared_library/build.zig               |   2 +-
     src-self-hosted/arg.zig                        |  12 +-
     src-self-hosted/errmsg.zig                     |  14 +-
     src-self-hosted/introspect.zig                 |   6 +-
     src-self-hosted/ir.zig                         |   2 +-
     src-self-hosted/main.zig                       |  36 +-
     src-self-hosted/module.zig                     |  30 +-
     src-self-hosted/scope.zig                      |   2 +-
     src-self-hosted/target.zig                     |  10 +-
     src/all_types.hpp                              |  31 +-
     src/analyze.cpp                                |   8 +-
     src/ast_render.cpp                             |  31 +-
     src/codegen.cpp                                |   2 +-
     src/ir.cpp                                     |  89 ++--
     src/ir_print.cpp                               |   6 +-
     src/parser.cpp                                 |  41 +-
     src/translate_c.cpp                            |  33 +-
     std/array_list.zig                             |  46 +-
     std/atomic/queue.zig                           |  32 +-
     std/atomic/stack.zig                           |  36 +-
     std/base64.zig                                 |  12 +-
     std/buf_map.zig                                |  18 +-
     std/buf_set.zig                                |  18 +-
     std/buffer.zig                                 |  40 +-
     std/build.zig                                  | 278 ++++++------
     std/c/darwin.zig                               |   8 +-
     std/c/index.zig                                |  72 ++--
     std/c/linux.zig                                |   4 +-
     std/c/windows.zig                              |   2 +-
     std/crypto/blake2.zig                          |  16 +-
     std/crypto/md5.zig                             |   8 +-
     std/crypto/sha1.zig                            |   8 +-
     std/crypto/sha2.zig                            |  16 +-
     std/crypto/sha3.zig                            |   6 +-
     std/crypto/throughput_test.zig                 |   4 +-
     std/cstr.zig                                   |  26 +-
     std/debug/failing_allocator.zig                |  10 +-
     std/debug/index.zig                            | 106 ++---
     std/elf.zig                                    |  18 +-
     std/event.zig                                  |  34 +-
     std/fmt/errol/index.zig                        |  14 +-
     std/fmt/index.zig                              |   8 +-
     std/hash/adler.zig                             |   4 +-
     std/hash/crc.zig                               |   8 +-
     std/hash/fnv.zig                               |   4 +-
     std/hash/siphash.zig                           |   8 +-
     std/hash_map.zig                               |  36 +-
     std/heap.zig                                   |  82 ++--
     std/io.zig                                     |  80 ++--
     std/json.zig                                   |  36 +-
     std/linked_list.zig                            |  32 +-
     std/macho.zig                                  |  16 +-
     std/math/complex/atan.zig                      |   4 +-
     std/math/complex/cosh.zig                      |   4 +-
     std/math/complex/exp.zig                       |   4 +-
     std/math/complex/index.zig                     |  14 +-
     std/math/complex/ldexp.zig                     |   8 +-
     std/math/complex/pow.zig                       |   2 +-
     std/math/complex/sinh.zig                      |   4 +-
     std/math/complex/sqrt.zig                      |   4 +-
     std/math/complex/tanh.zig                      |   4 +-
     std/math/hypot.zig                             |   2 +-
     std/math/index.zig                             |   4 +-
     std/mem.zig                                    |  48 +--
     std/net.zig                                    |   8 +-
     std/os/child_process.zig                       |  62 +--
     std/os/darwin.zig                              |  64 +--
     std/os/file.zig                                |  32 +-
     std/os/get_user_id.zig                         |   8 +-
     std/os/index.zig                               | 164 +++----
     std/os/linux/index.zig                         | 174 ++++----
     std/os/linux/vdso.zig                          |  36 +-
     std/os/linux/x86_64.zig                        |   8 +-
     std/os/path.zig                                |  22 +-
     std/os/test.zig                                |   2 +-
     std/os/time.zig                                |   6 +-
     std/os/windows/index.zig                       |  96 ++---
     std/os/windows/util.zig                        |  12 +-
     std/os/zen.zig                                 |  20 +-
     std/rand/index.zig                             |  46 +-
     std/rand/ziggurat.zig                          |  10 +-
     std/segmented_list.zig                         |  54 +--
     std/sort.zig                                   |  54 +--
     std/special/bootstrap.zig                      |  20 +-
     std/special/build_file_template.zig            |   4 +-
     std/special/build_runner.zig                   |   6 +-
     std/special/builtin.zig                        |   8 +-
     std/special/compiler_rt/index.zig              |   4 +-
     std/special/compiler_rt/udivmod.zig            |  18 +-
     std/special/compiler_rt/udivmoddi4.zig         |   2 +-
     std/special/compiler_rt/udivmodti4.zig         |   4 +-
     std/special/compiler_rt/udivti3.zig            |   2 +-
     std/special/compiler_rt/umodti3.zig            |   2 +-
     std/special/panic.zig                          |   2 +-
     std/unicode.zig                                |   6 +-
     std/zig/ast.zig                                | 570 ++++++++++++-------------
     std/zig/bench.zig                              |   6 +-
     std/zig/parse.zig                              | 156 +++----
     std/zig/parser_test.zig                        |   2 +-
     std/zig/render.zig                             |  56 +--
     std/zig/tokenizer.zig                          |   8 +-
     test/assemble_and_link.zig                     |   2 +-
     test/build_examples.zig                        |   2 +-
     test/cases/align.zig                           |  56 +--
     test/cases/atomics.zig                         |  12 +-
     test/cases/bugs/655.zig                        |   4 +-
     test/cases/bugs/828.zig                        |   6 +-
     test/cases/bugs/920.zig                        |   6 +-
     test/cases/cast.zig                            |  42 +-
     test/cases/const_slice_child.zig               |   6 +-
     test/cases/coroutines.zig                      |   6 +-
     test/cases/enum.zig                            |  10 +-
     test/cases/enum_with_members.zig               |   2 +-
     test/cases/eval.zig                            |  12 +-
     test/cases/field_parent_ptr.zig                |   4 +-
     test/cases/fn_in_struct_in_comptime.zig        |   6 +-
     test/cases/generics.zig                        |   8 +-
     test/cases/incomplete_struct_param_tld.zig     |   4 +-
     test/cases/math.zig                            |  18 +-
     test/cases/misc.zig                            |  48 +--
     test/cases/null.zig                            |   2 +-
     test/cases/reflection.zig                      |   2 +-
     test/cases/slice.zig                           |   2 +-
     test/cases/struct.zig                          |  28 +-
     test/cases/struct_contains_null_ptr_itself.zig |   4 +-
     test/cases/switch.zig                          |   2 +-
     test/cases/this.zig                            |   2 +-
     test/cases/type_info.zig                       |  16 +-
     test/cases/undefined.zig                       |   4 +-
     test/cases/union.zig                           |  16 +-
     test/compare_output.zig                        |  20 +-
     test/compile_errors.zig                        | 122 +++---
     test/gen_h.zig                                 |   2 +-
     test/runtime_safety.zig                        |   2 +-
     test/standalone/brace_expansion/build.zig      |   2 +-
     test/standalone/brace_expansion/main.zig       |   8 +-
     test/standalone/issue_339/build.zig            |   2 +-
     test/standalone/issue_339/test.zig             |   2 +-
     test/standalone/issue_794/build.zig            |   2 +-
     test/standalone/pkg_import/build.zig           |   2 +-
     test/standalone/use_alias/build.zig            |   2 +-
     test/tests.zig                                 | 136 +++---
     test/translate_c.zig                           |  58 +--
     150 files changed, 2162 insertions(+), 2143 deletions(-)
    
    (limited to 'src/analyze.cpp')
    
    diff --git a/build.zig b/build.zig
    index a4e3dbcdfa..109a799ac9 100644
    --- a/build.zig
    +++ b/build.zig
    @@ -10,7 +10,7 @@ const ArrayList = std.ArrayList;
     const Buffer = std.Buffer;
     const io = std.io;
     
    -pub fn build(b: &Builder) !void {
    +pub fn build(b: *Builder) !void {
         const mode = b.standardReleaseOptions();
     
         var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig");
    @@ -132,7 +132,7 @@ pub fn build(b: &Builder) !void {
         test_step.dependOn(tests.addGenHTests(b, test_filter));
     }
     
    -fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) void {
    +fn dependOnLib(lib_exe_obj: *std.build.LibExeObjStep, dep: *const LibraryDep) void {
         for (dep.libdirs.toSliceConst()) |lib_dir| {
             lib_exe_obj.addLibPath(lib_dir);
         }
    @@ -147,7 +147,7 @@ fn dependOnLib(lib_exe_obj: &std.build.LibExeObjStep, dep: &const LibraryDep) vo
         }
     }
     
    -fn addCppLib(b: &Builder, lib_exe_obj: &std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
    +fn addCppLib(b: *Builder, lib_exe_obj: *std.build.LibExeObjStep, cmake_binary_dir: []const u8, lib_name: []const u8) void {
         const lib_prefix = if (lib_exe_obj.target.isWindows()) "" else "lib";
         lib_exe_obj.addObjectFile(os.path.join(b.allocator, cmake_binary_dir, "zig_cpp", b.fmt("{}{}{}", lib_prefix, lib_name, lib_exe_obj.target.libFileExt())) catch unreachable);
     }
    @@ -159,7 +159,7 @@ const LibraryDep = struct {
         includes: ArrayList([]const u8),
     };
     
    -fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
    +fn findLLVM(b: *Builder, llvm_config_exe: []const u8) !LibraryDep {
         const libs_output = try b.exec([][]const u8{
             llvm_config_exe,
             "--libs",
    @@ -217,7 +217,7 @@ fn findLLVM(b: &Builder, llvm_config_exe: []const u8) !LibraryDep {
         return result;
     }
     
    -pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
    +pub fn installStdLib(b: *Builder, stdlib_files: []const u8) void {
         var it = mem.split(stdlib_files, ";");
         while (it.next()) |stdlib_file| {
             const src_path = os.path.join(b.allocator, "std", stdlib_file) catch unreachable;
    @@ -226,7 +226,7 @@ pub fn installStdLib(b: &Builder, stdlib_files: []const u8) void {
         }
     }
     
    -pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
    +pub fn installCHeaders(b: *Builder, c_header_files: []const u8) void {
         var it = mem.split(c_header_files, ";");
         while (it.next()) |c_header_file| {
             const src_path = os.path.join(b.allocator, "c_headers", c_header_file) catch unreachable;
    @@ -235,7 +235,7 @@ pub fn installCHeaders(b: &Builder, c_header_files: []const u8) void {
         }
     }
     
    -fn nextValue(index: &usize, build_info: []const u8) []const u8 {
    +fn nextValue(index: *usize, build_info: []const u8) []const u8 {
         const start = index.*;
         while (true) : (index.* += 1) {
             switch (build_info[index.*]) {
    diff --git a/doc/docgen.zig b/doc/docgen.zig
    index 7dc444f127..fed4bb8eba 100644
    --- a/doc/docgen.zig
    +++ b/doc/docgen.zig
    @@ -104,7 +104,7 @@ const Tokenizer = struct {
             };
         }
     
    -    fn next(self: &Tokenizer) Token {
    +    fn next(self: *Tokenizer) Token {
             var result = Token{
                 .id = Token.Id.Eof,
                 .start = self.index,
    @@ -196,7 +196,7 @@ const Tokenizer = struct {
             line_end: usize,
         };
     
    -    fn getTokenLocation(self: &Tokenizer, token: &const Token) Location {
    +    fn getTokenLocation(self: *Tokenizer, token: *const Token) Location {
             var loc = Location{
                 .line = 0,
                 .column = 0,
    @@ -221,7 +221,7 @@ const Tokenizer = struct {
         }
     };
     
    -fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const u8, args: ...) error {
    +fn parseError(tokenizer: *Tokenizer, token: *const Token, comptime fmt: []const u8, args: ...) error {
         const loc = tokenizer.getTokenLocation(token);
         warn("{}:{}:{}: error: " ++ fmt ++ "\n", tokenizer.source_file_name, loc.line + 1, loc.column + 1, args);
         if (loc.line_start <= loc.line_end) {
    @@ -244,13 +244,13 @@ fn parseError(tokenizer: &Tokenizer, token: &const Token, comptime fmt: []const
         return error.ParseError;
     }
     
    -fn assertToken(tokenizer: &Tokenizer, token: &const Token, id: Token.Id) !void {
    +fn assertToken(tokenizer: *Tokenizer, token: *const Token, id: Token.Id) !void {
         if (token.id != id) {
             return parseError(tokenizer, token, "expected {}, found {}", @tagName(id), @tagName(token.id));
         }
     }
     
    -fn eatToken(tokenizer: &Tokenizer, id: Token.Id) !Token {
    +fn eatToken(tokenizer: *Tokenizer, id: Token.Id) !Token {
         const token = tokenizer.next();
         try assertToken(tokenizer, token, id);
         return token;
    @@ -317,7 +317,7 @@ const Action = enum {
         Close,
     };
     
    -fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
    +fn genToc(allocator: *mem.Allocator, tokenizer: *Tokenizer) !Toc {
         var urls = std.HashMap([]const u8, Token, mem.hash_slice_u8, mem.eql_slice_u8).init(allocator);
         errdefer urls.deinit();
     
    @@ -546,7 +546,7 @@ fn genToc(allocator: &mem.Allocator, tokenizer: &Tokenizer) !Toc {
         };
     }
     
    -fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
    +fn urlize(allocator: *mem.Allocator, input: []const u8) ![]u8 {
         var buf = try std.Buffer.initSize(allocator, 0);
         defer buf.deinit();
     
    @@ -566,7 +566,7 @@ fn urlize(allocator: &mem.Allocator, input: []const u8) ![]u8 {
         return buf.toOwnedSlice();
     }
     
    -fn escapeHtml(allocator: &mem.Allocator, input: []const u8) ![]u8 {
    +fn escapeHtml(allocator: *mem.Allocator, input: []const u8) ![]u8 {
         var buf = try std.Buffer.initSize(allocator, 0);
         defer buf.deinit();
     
    @@ -608,7 +608,7 @@ test "term color" {
         assert(mem.eql(u8, result, "AgreenB"));
     }
     
    -fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
    +fn termColor(allocator: *mem.Allocator, input: []const u8) ![]u8 {
         var buf = try std.Buffer.initSize(allocator, 0);
         defer buf.deinit();
     
    @@ -688,7 +688,7 @@ fn termColor(allocator: &mem.Allocator, input: []const u8) ![]u8 {
         return buf.toOwnedSlice();
     }
     
    -fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var, zig_exe: []const u8) !void {
    +fn genHtml(allocator: *mem.Allocator, tokenizer: *Tokenizer, toc: *Toc, out: var, zig_exe: []const u8) !void {
         var code_progress_index: usize = 0;
         for (toc.nodes) |node| {
             switch (node) {
    @@ -1036,7 +1036,7 @@ fn genHtml(allocator: &mem.Allocator, tokenizer: &Tokenizer, toc: &Toc, out: var
         }
     }
     
    -fn exec(allocator: &mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
    +fn exec(allocator: *mem.Allocator, args: []const []const u8) !os.ChildProcess.ExecResult {
         const result = try os.ChildProcess.exec(allocator, args, null, null, max_doc_file_size);
         switch (result.term) {
             os.ChildProcess.Term.Exited => |exit_code| {
    diff --git a/doc/langref.html.in b/doc/langref.html.in
    index d63c38d0fe..3bd1124e00 100644
    --- a/doc/langref.html.in
    +++ b/doc/langref.html.in
    @@ -458,7 +458,7 @@ test "string literals" {
     
         // A C string literal is a null terminated pointer.
         const null_terminated_bytes = c"hello";
    -    assert(@typeOf(null_terminated_bytes) == &const u8);
    +    assert(@typeOf(null_terminated_bytes) == *const u8);
         assert(null_terminated_bytes[5] == 0);
     }
           {#code_end#}
    @@ -547,7 +547,7 @@ const c_string_literal =
     ;
           {#code_end#}
           

    - In this example the variable c_string_literal has type &const char and + In this example the variable c_string_literal has type *const char and has a terminating null byte.

    {#see_also|@embedFile#} @@ -1403,12 +1403,12 @@ test "address of syntax" { assert(x_ptr.* == 1234); // When you get the address of a const variable, you get a const pointer. - assert(@typeOf(x_ptr) == &const i32); + assert(@typeOf(x_ptr) == *const i32); // If you want to mutate the value, you'd need an address of a mutable variable: var y: i32 = 5678; const y_ptr = &y; - assert(@typeOf(y_ptr) == &i32); + assert(@typeOf(y_ptr) == *i32); y_ptr.* += 1; assert(y_ptr.* == 5679); } @@ -1455,7 +1455,7 @@ comptime { test "@ptrToInt and @intToPtr" { // To convert an integer address into a pointer, use @intToPtr: - const ptr = @intToPtr(&i32, 0xdeadbeef); + const ptr = @intToPtr(*i32, 0xdeadbeef); // To convert a pointer to an integer, use @ptrToInt: const addr = @ptrToInt(ptr); @@ -1467,7 +1467,7 @@ test "@ptrToInt and @intToPtr" { comptime { // Zig is able to do this at compile-time, as long as // ptr is never dereferenced. - const ptr = @intToPtr(&i32, 0xdeadbeef); + const ptr = @intToPtr(*i32, 0xdeadbeef); const addr = @ptrToInt(ptr); assert(@typeOf(addr) == usize); assert(addr == 0xdeadbeef); @@ -1477,17 +1477,17 @@ test "volatile" { // In Zig, loads and stores are assumed to not have side effects. // If a given load or store should have side effects, such as // Memory Mapped Input/Output (MMIO), use `volatile`: - const mmio_ptr = @intToPtr(&volatile u8, 0x12345678); + const mmio_ptr = @intToPtr(*volatile u8, 0x12345678); // Now loads and stores with mmio_ptr are guaranteed to all happen // and in the same order as in source code. - assert(@typeOf(mmio_ptr) == &volatile u8); + assert(@typeOf(mmio_ptr) == *volatile u8); } test "nullable pointers" { // Pointers cannot be null. If you want a null pointer, use the nullable // prefix `?` to make the pointer type nullable. - var ptr: ?&i32 = null; + var ptr: ?*i32 = null; var x: i32 = 1; ptr = &x; @@ -1496,7 +1496,7 @@ test "nullable pointers" { // Nullable pointers are the same size as normal pointers, because pointer // value 0 is used as the null value. - assert(@sizeOf(?&i32) == @sizeOf(&i32)); + assert(@sizeOf(?*i32) == @sizeOf(*i32)); } test "pointer casting" { @@ -1504,7 +1504,7 @@ test "pointer casting" { // operation that Zig cannot protect you against. Use @ptrCast only when other // conversions are not possible. const bytes align(@alignOf(u32)) = []u8{0x12, 0x12, 0x12, 0x12}; - const u32_ptr = @ptrCast(&const u32, &bytes[0]); + const u32_ptr = @ptrCast(*const u32, &bytes[0]); assert(u32_ptr.* == 0x12121212); // Even this example is contrived - there are better ways to do the above than @@ -1518,7 +1518,7 @@ test "pointer casting" { test "pointer child type" { // pointer types have a `child` field which tells you the type they point to. - assert((&u32).Child == u32); + assert((*u32).Child == u32); } {#code_end#} {#header_open|Alignment#} @@ -1543,15 +1543,15 @@ const builtin = @import("builtin"); test "variable alignment" { var x: i32 = 1234; const align_of_i32 = @alignOf(@typeOf(x)); - assert(@typeOf(&x) == &i32); - assert(&i32 == &align(align_of_i32) i32); + assert(@typeOf(&x) == *i32); + assert(*i32 == *align(align_of_i32) i32); if (builtin.arch == builtin.Arch.x86_64) { - assert((&i32).alignment == 4); + assert((*i32).alignment == 4); } } {#code_end#} -

    In the same way that a &i32 can be implicitly cast to a - &const i32, a pointer with a larger alignment can be implicitly +

    In the same way that a *i32 can be implicitly cast to a + *const i32, a pointer with a larger alignment can be implicitly cast to a pointer with a smaller alignment, but not vice versa.

    @@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100; test "global variable alignment" { assert(@typeOf(&foo).alignment == 4); - assert(@typeOf(&foo) == &align(4) u8); + assert(@typeOf(&foo) == *align(4) u8); const slice = (&foo)[0..1]; assert(@typeOf(slice) == []align(4) u8); } @@ -1610,7 +1610,7 @@ fn foo(bytes: []u8) u32 { u8 can alias any memory.

    As an example, this code produces undefined behavior:

    -
    @ptrCast(&u32, f32(12.34)).*
    +
    @ptrCast(*u32, f32(12.34)).*

    Instead, use {#link|@bitCast#}:

    @bitCast(u32, f32(12.34))

    As an added benefit, the @bitcast version works at compile-time.

    @@ -1736,7 +1736,7 @@ const Vec3 = struct { }; } - pub fn dot(self: &const Vec3, other: &const Vec3) f32 { + pub fn dot(self: *const Vec3, other: *const Vec3) f32 { return self.x * other.x + self.y * other.y + self.z * other.z; } }; @@ -1768,7 +1768,7 @@ test "struct namespaced variable" { // struct field order is determined by the compiler for optimal performance. // however, you can still calculate a struct base pointer given a field pointer: -fn setYBasedOnX(x: &f32, y: f32) void { +fn setYBasedOnX(x: *f32, y: f32) void { const point = @fieldParentPtr(Point, "x", x); point.y = y; } @@ -1786,13 +1786,13 @@ test "field parent pointer" { fn LinkedList(comptime T: type) type { return struct { pub const Node = struct { - prev: ?&Node, - next: ?&Node, + prev: ?*Node, + next: ?*Node, data: T, }; - first: ?&Node, - last: ?&Node, + first: ?*Node, + last: ?*Node, len: usize, }; } @@ -2039,7 +2039,7 @@ const Variant = union(enum) { Int: i32, Bool: bool, - fn truthy(self: &const Variant) bool { + fn truthy(self: *const Variant) bool { return switch (self.*) { Variant.Int => |x_int| x_int != 0, Variant.Bool => |x_bool| x_bool, @@ -2786,7 +2786,7 @@ test "pass aggregate type by value to function" { } {#code_end#}

    - Instead, one must use &const. Zig allows implicitly casting something + Instead, one must use *const. Zig allows implicitly casting something to a const pointer to it:

    {#code_begin|test#} @@ -2794,7 +2794,7 @@ const Foo = struct { x: i32, }; -fn bar(foo: &const Foo) void {} +fn bar(foo: *const Foo) void {} test "implicitly cast to const pointer" { bar(Foo {.x = 12,}); @@ -3208,16 +3208,16 @@ struct Foo *do_a_thing(void) {

    Zig code

    {#code_begin|syntax#} // malloc prototype included for reference -extern fn malloc(size: size_t) ?&u8; +extern fn malloc(size: size_t) ?*u8; -fn doAThing() ?&Foo { +fn doAThing() ?*Foo { const ptr = malloc(1234) ?? return null; // ... } {#code_end#}

    Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr" - is &u8 not ?&u8. The ?? operator + is *u8 not ?*u8. The ?? operator unwrapped the nullable type and therefore ptr is guaranteed to be non-null everywhere it is used in the function.

    @@ -3237,7 +3237,7 @@ fn doAThing() ?&Foo { In Zig you can accomplish the same thing:

    {#code_begin|syntax#} -fn doAThing(nullable_foo: ?&Foo) void { +fn doAThing(nullable_foo: ?*Foo) void { // do some stuff if (nullable_foo) |foo| { @@ -3713,7 +3713,7 @@ fn List(comptime T: type) type {

    {#code_begin|syntax#} const Node = struct { - next: &Node, + next: *Node, name: []u8, }; {#code_end#} @@ -3745,7 +3745,7 @@ pub fn main() void { {#code_begin|syntax#} /// Calls print and then flushes the buffer. -pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!void { +pub fn printf(self: *OutStream, comptime format: []const u8, args: ...) error!void { const State = enum { Start, OpenBrace, @@ -3817,7 +3817,7 @@ pub fn printf(self: &OutStream, comptime format: []const u8, args: ...) error!vo and emits a function that actually looks like this:

    {#code_begin|syntax#} -pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void { +pub fn printf(self: *OutStream, arg0: i32, arg1: []const u8) !void { try self.write("here is a string: '"); try self.printValue(arg0); try self.write("' here is a number: "); @@ -3831,7 +3831,7 @@ pub fn printf(self: &OutStream, arg0: i32, arg1: []const u8) !void { on the type:

    {#code_begin|syntax#} -pub fn printValue(self: &OutStream, value: var) !void { +pub fn printValue(self: *OutStream, value: var) !void { const T = @typeOf(value); if (@isInteger(T)) { return self.printInt(T, value); @@ -3911,7 +3911,7 @@ pub fn main() void { at compile time.

    {#header_open|@addWithOverflow#} -
    @addWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
    +
    @addWithOverflow(comptime T: type, a: T, b: T, result: *T) bool

    Performs result.* = a + b. If overflow or underflow occurs, stores the overflowed bits in result and returns true. @@ -3919,7 +3919,7 @@ pub fn main() void {

    {#header_close#} {#header_open|@ArgType#} -
    @ArgType(comptime T: type, comptime n: usize) -> type
    +
    @ArgType(comptime T: type, comptime n: usize) type

    This builtin function takes a function type and returns the type of the parameter at index n.

    @@ -3931,7 +3931,7 @@ pub fn main() void {

    {#header_close#} {#header_open|@atomicLoad#} -
    @atomicLoad(comptime T: type, ptr: &const T, comptime ordering: builtin.AtomicOrder) -> T
    +
    @atomicLoad(comptime T: type, ptr: *const T, comptime ordering: builtin.AtomicOrder) T

    This builtin function atomically dereferences a pointer and returns the value.

    @@ -3950,7 +3950,7 @@ pub fn main() void {

    {#header_close#} {#header_open|@atomicRmw#} -
    @atomicRmw(comptime T: type, ptr: &T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) -> T
    +
    @atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T

    This builtin function atomically modifies memory and then returns the previous value.

    @@ -3969,7 +3969,7 @@ pub fn main() void {

    {#header_close#} {#header_open|@bitCast#} -
    @bitCast(comptime DestType: type, value: var) -> DestType
    +
    @bitCast(comptime DestType: type, value: var) DestType

    Converts a value of one type to another type.

    @@ -4002,9 +4002,9 @@ pub fn main() void { {#header_close#} {#header_open|@alignCast#} -
    @alignCast(comptime alignment: u29, ptr: var) -> var
    +
    @alignCast(comptime alignment: u29, ptr: var) var

    - ptr can be &T, fn(), ?&T, + ptr can be *T, fn(), ?*T, ?fn(), or []T. It returns the same type as ptr except with the alignment adjusted to the new value.

    @@ -4013,7 +4013,7 @@ pub fn main() void { {#header_close#} {#header_open|@alignOf#} -
    @alignOf(comptime T: type) -> (number literal)
    +
    @alignOf(comptime T: type) (number literal)

    This function returns the number of bytes that this type should be aligned to for the current target to match the C ABI. When the child type of a pointer has @@ -4021,7 +4021,7 @@ pub fn main() void {

    const assert = @import("std").debug.assert;
     comptime {
    -    assert(&u32 == &align(@alignOf(u32)) u32);
    +    assert(*u32 == *align(@alignOf(u32)) u32);
     }

    The result is a target-specific compile time constant. It is guaranteed to be @@ -4049,7 +4049,7 @@ comptime { {#see_also|Import from C Header File|@cInclude|@cImport|@cUndef|void#} {#header_close#} {#header_open|@cImport#} -

    @cImport(expression) -> (namespace)
    +
    @cImport(expression) (namespace)

    This function parses C code and imports the functions, types, variables, and compatible macro definitions into the result namespace. @@ -4095,13 +4095,13 @@ comptime { {#see_also|Import from C Header File|@cImport|@cDefine|@cInclude#} {#header_close#} {#header_open|@canImplicitCast#} -

    @canImplicitCast(comptime T: type, value) -> bool
    +
    @canImplicitCast(comptime T: type, value) bool

    Returns whether a value can be implicitly casted to a given type.

    {#header_close#} {#header_open|@clz#} -
    @clz(x: T) -> U
    +
    @clz(x: T) U

    This function counts the number of leading zeroes in x which is an integer type T. @@ -4116,13 +4116,13 @@ comptime { {#header_close#} {#header_open|@cmpxchgStrong#} -

    @cmpxchgStrong(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
    +
    @cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T

    This function performs a strong atomic compare exchange operation. It's the equivalent of this code, except atomic:

    {#code_begin|syntax#} -fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T { +fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T { const old_value = ptr.*; if (old_value == expected_value) { ptr.* = new_value; @@ -4143,13 +4143,13 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_v {#see_also|Compile Variables|cmpxchgWeak#} {#header_close#} {#header_open|@cmpxchgWeak#} -
    @cmpxchgWeak(comptime T: type, ptr: &T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) -> ?T
    +
    @cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T

    This function performs a weak atomic compare exchange operation. It's the equivalent of this code, except atomic:

    {#code_begin|syntax#} -fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: &T, expected_value: T, new_value: T) ?T { +fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_value: T) ?T { const old_value = ptr.*; if (old_value == expected_value and usuallyTrueButSometimesFalse()) { ptr.* = new_value; @@ -4237,7 +4237,7 @@ test "main" { {#code_end#} {#header_close#} {#header_open|@ctz#} -
    @ctz(x: T) -> U
    +
    @ctz(x: T) U

    This function counts the number of trailing zeroes in x which is an integer type T. @@ -4251,7 +4251,7 @@ test "main" {

    {#header_close#} {#header_open|@divExact#} -
    @divExact(numerator: T, denominator: T) -> T
    +
    @divExact(numerator: T, denominator: T) T

    Exact division. Caller guarantees denominator != 0 and @divTrunc(numerator, denominator) * denominator == numerator. @@ -4264,7 +4264,7 @@ test "main" { {#see_also|@divTrunc|@divFloor#} {#header_close#} {#header_open|@divFloor#} -

    @divFloor(numerator: T, denominator: T) -> T
    +
    @divFloor(numerator: T, denominator: T) T

    Floored division. Rounds toward negative infinity. For unsigned integers it is the same as numerator / denominator. Caller guarantees denominator != 0 and @@ -4278,7 +4278,7 @@ test "main" { {#see_also|@divTrunc|@divExact#} {#header_close#} {#header_open|@divTrunc#} -

    @divTrunc(numerator: T, denominator: T) -> T
    +
    @divTrunc(numerator: T, denominator: T) T

    Truncated division. Rounds toward zero. For unsigned integers it is the same as numerator / denominator. Caller guarantees denominator != 0 and @@ -4292,7 +4292,7 @@ test "main" { {#see_also|@divFloor|@divExact#} {#header_close#} {#header_open|@embedFile#} -

    @embedFile(comptime path: []const u8) -> [X]u8
    +
    @embedFile(comptime path: []const u8) [X]u8

    This function returns a compile time constant fixed-size array with length equal to the byte count of the file given by path. The contents of the array @@ -4304,19 +4304,19 @@ test "main" { {#see_also|@import#} {#header_close#} {#header_open|@export#} -

    @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) -> []const u8
    +
    @export(comptime name: []const u8, target: var, linkage: builtin.GlobalLinkage) []const u8

    Creates a symbol in the output object file.

    {#header_close#} {#header_open|@tagName#} -
    @tagName(value: var) -> []const u8
    +
    @tagName(value: var) []const u8

    Converts an enum value or union value to a slice of bytes representing the name.

    {#header_close#} {#header_open|@TagType#} -
    @TagType(T: type) -> type
    +
    @TagType(T: type) type

    For an enum, returns the integer type that is used to store the enumeration value.

    @@ -4325,7 +4325,7 @@ test "main" {

    {#header_close#} {#header_open|@errorName#} -
    @errorName(err: error) -> []u8
    +
    @errorName(err: error) []u8

    This function returns the string representation of an error. If an error declaration is: @@ -4341,7 +4341,7 @@ test "main" {

    {#header_close#} {#header_open|@errorReturnTrace#} -
    @errorReturnTrace() -> ?&builtin.StackTrace
    +
    @errorReturnTrace() ?*builtin.StackTrace

    If the binary is built with error return tracing, and this function is invoked in a function that calls a function with an error or error union return type, returns a @@ -4360,7 +4360,7 @@ test "main" { {#header_close#} {#header_open|@fieldParentPtr#}

    @fieldParentPtr(comptime ParentType: type, comptime field_name: []const u8,
    -    field_ptr: &T) -> &ParentType
    + field_ptr: *T) *ParentType

    Given a pointer to a field, returns the base pointer of a struct.

    @@ -4380,7 +4380,7 @@ test "main" {

    {#header_close#} {#header_open|@import#} -
    @import(comptime path: []u8) -> (namespace)
    +
    @import(comptime path: []u8) (namespace)

    This function finds a zig file corresponding to path and imports all the public top level declarations into the resulting namespace. @@ -4400,7 +4400,7 @@ test "main" { {#see_also|Compile Variables|@embedFile#} {#header_close#} {#header_open|@inlineCall#} -

    @inlineCall(function: X, args: ...) -> Y
    +
    @inlineCall(function: X, args: ...) Y

    This calls a function, in the same way that invoking an expression with parentheses does:

    @@ -4420,19 +4420,19 @@ fn add(a: i32, b: i32) i32 { return a + b; } {#see_also|@noInlineCall#} {#header_close#} {#header_open|@intToPtr#} -
    @intToPtr(comptime DestType: type, int: usize) -> DestType
    +
    @intToPtr(comptime DestType: type, int: usize) DestType

    Converts an integer to a pointer. To convert the other way, use {#link|@ptrToInt#}.

    {#header_close#} {#header_open|@IntType#} -
    @IntType(comptime is_signed: bool, comptime bit_count: u8) -> type
    +
    @IntType(comptime is_signed: bool, comptime bit_count: u8) type

    This function returns an integer type with the given signness and bit count.

    {#header_close#} {#header_open|@maxValue#} -
    @maxValue(comptime T: type) -> (number literal)
    +
    @maxValue(comptime T: type) (number literal)

    This function returns the maximum value of the integer type T.

    @@ -4441,7 +4441,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }

    {#header_close#} {#header_open|@memberCount#} -
    @memberCount(comptime T: type) -> (number literal)
    +
    @memberCount(comptime T: type) (number literal)

    This function returns the number of members in a struct, enum, or union type.

    @@ -4453,7 +4453,7 @@ fn add(a: i32, b: i32) i32 { return a + b; }

    {#header_close#} {#header_open|@memberName#} -
    @memberName(comptime T: type, comptime index: usize) -> [N]u8
    +
    @memberName(comptime T: type, comptime index: usize) [N]u8

    Returns the field name of a struct, union, or enum.

    The result is a compile time constant. @@ -4463,15 +4463,15 @@ fn add(a: i32, b: i32) i32 { return a + b; }

    {#header_close#} {#header_open|@field#} -
    @field(lhs: var, comptime field_name: []const u8) -> (field)
    +
    @field(lhs: var, comptime field_name: []const u8) (field)

    Preforms field access equivalent to lhs.->field_name-<.

    {#header_close#} {#header_open|@memberType#} -
    @memberType(comptime T: type, comptime index: usize) -> type
    +
    @memberType(comptime T: type, comptime index: usize) type

    Returns the field type of a struct or union.

    {#header_close#} {#header_open|@memcpy#} -
    @memcpy(noalias dest: &u8, noalias source: &const u8, byte_count: usize)
    +
    @memcpy(noalias dest: *u8, noalias source: *const u8, byte_count: usize)

    This function copies bytes from one region of memory to another. dest and source are both pointers and must not overlap. @@ -4489,7 +4489,7 @@ fn add(a: i32, b: i32) i32 { return a + b; } mem.copy(u8, dest[0...byte_count], source[0...byte_count]); {#header_close#} {#header_open|@memset#} -

    @memset(dest: &u8, c: u8, byte_count: usize)
    +
    @memset(dest: *u8, c: u8, byte_count: usize)

    This function sets a region of memory to c. dest is a pointer.

    @@ -4506,7 +4506,7 @@ mem.copy(u8, dest[0...byte_count], source[0...byte_count]); mem.set(u8, dest, c); {#header_close#} {#header_open|@minValue#} -
    @minValue(comptime T: type) -> (number literal)
    +
    @minValue(comptime T: type) (number literal)

    This function returns the minimum value of the integer type T.

    @@ -4515,7 +4515,7 @@ mem.set(u8, dest, c);

    {#header_close#} {#header_open|@mod#} -
    @mod(numerator: T, denominator: T) -> T
    +
    @mod(numerator: T, denominator: T) T

    Modulus division. For unsigned integers this is the same as numerator % denominator. Caller guarantees denominator > 0. @@ -4528,7 +4528,7 @@ mem.set(u8, dest, c); {#see_also|@rem#} {#header_close#} {#header_open|@mulWithOverflow#} -

    @mulWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
    +
    @mulWithOverflow(comptime T: type, a: T, b: T, result: *T) bool

    Performs result.* = a * b. If overflow or underflow occurs, stores the overflowed bits in result and returns true. @@ -4536,7 +4536,7 @@ mem.set(u8, dest, c);

    {#header_close#} {#header_open|@newStackCall#} -
    @newStackCall(new_stack: []u8, function: var, args: ...) -> var
    +
    @newStackCall(new_stack: []u8, function: var, args: ...) var

    This calls a function, in the same way that invoking an expression with parentheses does. However, instead of using the same stack as the caller, the function uses the stack provided in the new_stack @@ -4572,7 +4572,7 @@ fn targetFunction(x: i32) usize { {#code_end#} {#header_close#} {#header_open|@noInlineCall#} -

    @noInlineCall(function: var, args: ...) -> var
    +
    @noInlineCall(function: var, args: ...) var

    This calls a function, in the same way that invoking an expression with parentheses does:

    @@ -4594,13 +4594,13 @@ fn add(a: i32, b: i32) i32 { {#see_also|@inlineCall#} {#header_close#} {#header_open|@offsetOf#} -
    @offsetOf(comptime T: type, comptime field_name: [] const u8) -> (number literal)
    +
    @offsetOf(comptime T: type, comptime field_name: [] const u8) (number literal)

    This function returns the byte offset of a field relative to its containing struct.

    {#header_close#} {#header_open|@OpaqueType#} -
    @OpaqueType() -> type
    +
    @OpaqueType() type

    Creates a new type with an unknown size and alignment.

    @@ -4608,12 +4608,12 @@ fn add(a: i32, b: i32) i32 { This is typically used for type safety when interacting with C code that does not expose struct details. Example:

    - {#code_begin|test_err|expected type '&Derp', found '&Wat'#} + {#code_begin|test_err|expected type '*Derp', found '*Wat'#} const Derp = @OpaqueType(); const Wat = @OpaqueType(); -extern fn bar(d: &Derp) void; -export fn foo(w: &Wat) void { +extern fn bar(d: *Derp) void; +export fn foo(w: *Wat) void { bar(w); } @@ -4623,7 +4623,7 @@ test "call foo" { {#code_end#} {#header_close#} {#header_open|@panic#} -
    @panic(message: []const u8) -> noreturn
    +
    @panic(message: []const u8) noreturn

    Invokes the panic handler function. By default the panic handler function calls the public panic function exposed in the root source file, or @@ -4639,19 +4639,19 @@ test "call foo" { {#see_also|Root Source File#} {#header_close#} {#header_open|@ptrCast#} -

    @ptrCast(comptime DestType: type, value: var) -> DestType
    +
    @ptrCast(comptime DestType: type, value: var) DestType

    Converts a pointer of one type to a pointer of another type.

    {#header_close#} {#header_open|@ptrToInt#} -
    @ptrToInt(value: var) -> usize
    +
    @ptrToInt(value: var) usize

    Converts value to a usize which is the address of the pointer. value can be one of these types:

      -
    • &T
    • -
    • ?&T
    • +
    • *T
    • +
    • ?*T
    • fn()
    • ?fn()
    @@ -4659,7 +4659,7 @@ test "call foo" { {#header_close#} {#header_open|@rem#} -
    @rem(numerator: T, denominator: T) -> T
    +
    @rem(numerator: T, denominator: T) T

    Remainder division. For unsigned integers this is the same as numerator % denominator. Caller guarantees denominator > 0. @@ -4776,13 +4776,13 @@ pub const FloatMode = enum { {#see_also|Compile Variables#} {#header_close#} {#header_open|@setGlobalSection#} -

    @setGlobalSection(global_variable_name, comptime section_name: []const u8) -> bool
    +
    @setGlobalSection(global_variable_name, comptime section_name: []const u8) bool

    Puts the global variable in the specified section.

    {#header_close#} {#header_open|@shlExact#} -
    @shlExact(value: T, shift_amt: Log2T) -> T
    +
    @shlExact(value: T, shift_amt: Log2T) T

    Performs the left shift operation (<<). Caller guarantees that the shift will not shift any 1 bits out. @@ -4794,7 +4794,7 @@ pub const FloatMode = enum { {#see_also|@shrExact|@shlWithOverflow#} {#header_close#} {#header_open|@shlWithOverflow#} -

    @shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: &T) -> bool
    +
    @shlWithOverflow(comptime T: type, a: T, shift_amt: Log2T, result: *T) bool

    Performs result.* = a << b. If overflow or underflow occurs, stores the overflowed bits in result and returns true. @@ -4807,7 +4807,7 @@ pub const FloatMode = enum { {#see_also|@shlExact|@shrExact#} {#header_close#} {#header_open|@shrExact#} -

    @shrExact(value: T, shift_amt: Log2T) -> T
    +
    @shrExact(value: T, shift_amt: Log2T) T

    Performs the right shift operation (>>). Caller guarantees that the shift will not shift any 1 bits out. @@ -4819,7 +4819,7 @@ pub const FloatMode = enum { {#see_also|@shlExact|@shlWithOverflow#} {#header_close#} {#header_open|@sizeOf#} -

    @sizeOf(comptime T: type) -> (number literal)
    +
    @sizeOf(comptime T: type) (number literal)

    This function returns the number of bytes it takes to store T in memory.

    @@ -4828,7 +4828,7 @@ pub const FloatMode = enum {

    {#header_close#} {#header_open|@sqrt#} -
    @sqrt(comptime T: type, value: T) -> T
    +
    @sqrt(comptime T: type, value: T) T

    Performs the square root of a floating point number. Uses a dedicated hardware instruction when available. Currently only supports f32 and f64 at runtime. f128 at runtime is TODO. @@ -4838,7 +4838,7 @@ pub const FloatMode = enum {

    {#header_close#} {#header_open|@subWithOverflow#} -
    @subWithOverflow(comptime T: type, a: T, b: T, result: &T) -> bool
    +
    @subWithOverflow(comptime T: type, a: T, b: T, result: *T) bool

    Performs result.* = a - b. If overflow or underflow occurs, stores the overflowed bits in result and returns true. @@ -4846,7 +4846,7 @@ pub const FloatMode = enum {

    {#header_close#} {#header_open|@truncate#} -
    @truncate(comptime T: type, integer) -> T
    +
    @truncate(comptime T: type, integer) T

    This function truncates bits from an integer type, resulting in a smaller integer type. @@ -4870,7 +4870,7 @@ const b: u8 = @truncate(u8, a); {#header_close#} {#header_open|@typeId#} -

    @typeId(comptime T: type) -> @import("builtin").TypeId
    +
    @typeId(comptime T: type) @import("builtin").TypeId

    Returns which kind of type something is. Possible values:

    @@ -4904,7 +4904,7 @@ pub const TypeId = enum { {#code_end#} {#header_close#} {#header_open|@typeInfo#} -
    @typeInfo(comptime T: type) -> @import("builtin").TypeInfo
    +
    @typeInfo(comptime T: type) @import("builtin").TypeInfo

    Returns information on the type. Returns a value of the following union:

    @@ -5080,14 +5080,14 @@ pub const TypeInfo = union(TypeId) { {#code_end#} {#header_close#} {#header_open|@typeName#} -
    @typeName(T: type) -> []u8
    +
    @typeName(T: type) []u8

    This function returns the string representation of a type.

    {#header_close#} {#header_open|@typeOf#} -
    @typeOf(expression) -> type
    +
    @typeOf(expression) type

    This function returns a compile-time constant, which is the type of the expression passed as an argument. The expression is evaluated. @@ -5937,7 +5937,7 @@ pub const __zig_test_fn_slice = {}; // overwritten later {#header_open|C String Literals#} {#code_begin|exe#} {#link_libc#} -extern fn puts(&const u8) void; +extern fn puts(*const u8) void; pub fn main() void { puts(c"this has a null terminator"); @@ -5996,8 +5996,8 @@ const c = @cImport({ {#code_begin|syntax#} const base64 = @import("std").base64; -export fn decode_base_64(dest_ptr: &u8, dest_len: usize, - source_ptr: &const u8, source_len: usize) usize +export fn decode_base_64(dest_ptr: *u8, dest_len: usize, + source_ptr: *const u8, source_len: usize) usize { const src = source_ptr[0..source_len]; const dest = dest_ptr[0..dest_len]; @@ -6028,7 +6028,7 @@ int main(int argc, char **argv) { {#code_begin|syntax#} const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const obj = b.addObject("base64", "base64.zig"); const exe = b.addCExecutable("test"); diff --git a/example/cat/main.zig b/example/cat/main.zig index de0d323bed..1b34cb22eb 100644 --- a/example/cat/main.zig +++ b/example/cat/main.zig @@ -41,7 +41,7 @@ fn usage(exe: []const u8) !void { return error.Invalid; } -fn cat_file(stdout: &os.File, file: &os.File) !void { +fn cat_file(stdout: *os.File, file: *os.File) !void { var buf: [1024 * 4]u8 = undefined; while (true) { diff --git a/example/hello_world/hello_libc.zig b/example/hello_world/hello_libc.zig index 1df8f04ce4..f64beda40f 100644 --- a/example/hello_world/hello_libc.zig +++ b/example/hello_world/hello_libc.zig @@ -7,7 +7,7 @@ const c = @cImport({ const msg = c"Hello, world!\n"; -export fn main(argc: c_int, argv: &&u8) c_int { +export fn main(argc: c_int, argv: **u8) c_int { if (c.printf(msg) != c_int(c.strlen(msg))) return -1; return 0; diff --git a/example/mix_o_files/base64.zig b/example/mix_o_files/base64.zig index e682a97055..35b090825b 100644 --- a/example/mix_o_files/base64.zig +++ b/example/mix_o_files/base64.zig @@ -1,6 +1,6 @@ const base64 = @import("std").base64; -export fn decode_base_64(dest_ptr: &u8, dest_len: usize, source_ptr: &const u8, source_len: usize) usize { +export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize { const src = source_ptr[0..source_len]; const dest = dest_ptr[0..dest_len]; const base64_decoder = base64.standard_decoder_unsafe; diff --git a/example/mix_o_files/build.zig b/example/mix_o_files/build.zig index e5d2e6a446..a4e7fbbf8f 100644 --- a/example/mix_o_files/build.zig +++ b/example/mix_o_files/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const obj = b.addObject("base64", "base64.zig"); const exe = b.addCExecutable("test"); diff --git a/example/shared_library/build.zig b/example/shared_library/build.zig index 30c714c6c6..05648cf9eb 100644 --- a/example/shared_library/build.zig +++ b/example/shared_library/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0)); const exe = b.addCExecutable("test"); diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig index fa2166e3a5..df2c04ef1f 100644 --- a/src-self-hosted/arg.zig +++ b/src-self-hosted/arg.zig @@ -30,7 +30,7 @@ fn argInAllowedSet(maybe_set: ?[]const []const u8, arg: []const u8) bool { } // Modifies the current argument index during iteration -fn readFlagArguments(allocator: &Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: &usize) !FlagArg { +fn readFlagArguments(allocator: *Allocator, args: []const []const u8, required: usize, allowed_set: ?[]const []const u8, index: *usize) !FlagArg { switch (required) { 0 => return FlagArg{ .None = undefined }, // TODO: Required to force non-tag but value? 1 => { @@ -79,7 +79,7 @@ pub const Args = struct { flags: HashMapFlags, positionals: ArrayList([]const u8), - pub fn parse(allocator: &Allocator, comptime spec: []const Flag, args: []const []const u8) !Args { + pub fn parse(allocator: *Allocator, comptime spec: []const Flag, args: []const []const u8) !Args { var parsed = Args{ .flags = HashMapFlags.init(allocator), .positionals = ArrayList([]const u8).init(allocator), @@ -143,18 +143,18 @@ pub const Args = struct { return parsed; } - pub fn deinit(self: &Args) void { + pub fn deinit(self: *Args) void { self.flags.deinit(); self.positionals.deinit(); } // e.g. --help - pub fn present(self: &Args, name: []const u8) bool { + pub fn present(self: *Args, name: []const u8) bool { return self.flags.contains(name); } // e.g. --name value - pub fn single(self: &Args, name: []const u8) ?[]const u8 { + pub fn single(self: *Args, name: []const u8) ?[]const u8 { if (self.flags.get(name)) |entry| { switch (entry.value) { FlagArg.Single => |inner| { @@ -168,7 +168,7 @@ pub const Args = struct { } // e.g. --names value1 value2 value3 - pub fn many(self: &Args, name: []const u8) ?[]const []const u8 { + pub fn many(self: *Args, name: []const u8) ?[]const []const u8 { if (self.flags.get(name)) |entry| { switch (entry.value) { FlagArg.Many => |inner| { diff --git a/src-self-hosted/errmsg.zig b/src-self-hosted/errmsg.zig index 9905b8e3a6..32d2450aac 100644 --- a/src-self-hosted/errmsg.zig +++ b/src-self-hosted/errmsg.zig @@ -16,18 +16,18 @@ pub const Msg = struct { text: []u8, first_token: TokenIndex, last_token: TokenIndex, - tree: &ast.Tree, + tree: *ast.Tree, }; /// `path` must outlive the returned Msg /// `tree` must outlive the returned Msg /// Caller owns returned Msg and must free with `allocator` pub fn createFromParseError( - allocator: &mem.Allocator, - parse_error: &const ast.Error, - tree: &ast.Tree, + allocator: *mem.Allocator, + parse_error: *const ast.Error, + tree: *ast.Tree, path: []const u8, -) !&Msg { +) !*Msg { const loc_token = parse_error.loc(); var text_buf = try std.Buffer.initSize(allocator, 0); defer text_buf.deinit(); @@ -47,7 +47,7 @@ pub fn createFromParseError( return msg; } -pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void { +pub fn printToStream(stream: var, msg: *const Msg, color_on: bool) !void { const first_token = msg.tree.tokens.at(msg.first_token); const last_token = msg.tree.tokens.at(msg.last_token); const start_loc = msg.tree.tokenLocationPtr(0, first_token); @@ -76,7 +76,7 @@ pub fn printToStream(stream: var, msg: &const Msg, color_on: bool) !void { try stream.write("\n"); } -pub fn printToFile(file: &os.File, msg: &const Msg, color: Color) !void { +pub fn printToFile(file: *os.File, msg: *const Msg, color: Color) !void { const color_on = switch (color) { Color.Auto => file.isTty(), Color.On => true, diff --git a/src-self-hosted/introspect.zig b/src-self-hosted/introspect.zig index adab00286b..56b56c0c78 100644 --- a/src-self-hosted/introspect.zig +++ b/src-self-hosted/introspect.zig @@ -7,7 +7,7 @@ const os = std.os; const warn = std.debug.warn; /// Caller must free result -pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![]u8 { +pub fn testZigInstallPrefix(allocator: *mem.Allocator, test_path: []const u8) ![]u8 { const test_zig_dir = try os.path.join(allocator, test_path, "lib", "zig"); errdefer allocator.free(test_zig_dir); @@ -21,7 +21,7 @@ pub fn testZigInstallPrefix(allocator: &mem.Allocator, test_path: []const u8) ![ } /// Caller must free result -pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 { +pub fn findZigLibDir(allocator: *mem.Allocator) ![]u8 { const self_exe_path = try os.selfExeDirPath(allocator); defer allocator.free(self_exe_path); @@ -42,7 +42,7 @@ pub fn findZigLibDir(allocator: &mem.Allocator) ![]u8 { return error.FileNotFound; } -pub fn resolveZigLibDir(allocator: &mem.Allocator) ![]u8 { +pub fn resolveZigLibDir(allocator: *mem.Allocator) ![]u8 { return findZigLibDir(allocator) catch |err| { warn( \\Unable to find zig lib directory: {}. diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index c4550b5179..3334d9511b 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -2,7 +2,7 @@ const Scope = @import("scope.zig").Scope; pub const Instruction = struct { id: Id, - scope: &Scope, + scope: *Scope, pub const Id = enum { Br, diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 71838503b7..80b1c3889a 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -18,8 +18,8 @@ const Target = @import("target.zig").Target; const errmsg = @import("errmsg.zig"); var stderr_file: os.File = undefined; -var stderr: &io.OutStream(io.FileOutStream.Error) = undefined; -var stdout: &io.OutStream(io.FileOutStream.Error) = undefined; +var stderr: *io.OutStream(io.FileOutStream.Error) = undefined; +var stdout: *io.OutStream(io.FileOutStream.Error) = undefined; const usage = \\usage: zig [command] [options] @@ -43,7 +43,7 @@ const usage = const Command = struct { name: []const u8, - exec: fn (&Allocator, []const []const u8) error!void, + exec: fn (*Allocator, []const []const u8) error!void, }; pub fn main() !void { @@ -191,7 +191,7 @@ const missing_build_file = \\ ; -fn cmdBuild(allocator: &Allocator, args: []const []const u8) !void { +fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void { var flags = try Args.parse(allocator, args_build_spec, args); defer flags.deinit(); @@ -426,7 +426,7 @@ const args_build_generic = []Flag{ Flag.Arg1("--ver-patch"), }; -fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Module.Kind) !void { +fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Module.Kind) !void { var flags = try Args.parse(allocator, args_build_generic, args); defer flags.deinit(); @@ -661,19 +661,19 @@ fn buildOutputType(allocator: &Allocator, args: []const []const u8, out_type: Mo try stderr.print("building {}: {}\n", @tagName(out_type), in_file); } -fn cmdBuildExe(allocator: &Allocator, args: []const []const u8) !void { +fn cmdBuildExe(allocator: *Allocator, args: []const []const u8) !void { try buildOutputType(allocator, args, Module.Kind.Exe); } // cmd:build-lib /////////////////////////////////////////////////////////////////////////////////// -fn cmdBuildLib(allocator: &Allocator, args: []const []const u8) !void { +fn cmdBuildLib(allocator: *Allocator, args: []const []const u8) !void { try buildOutputType(allocator, args, Module.Kind.Lib); } // cmd:build-obj /////////////////////////////////////////////////////////////////////////////////// -fn cmdBuildObj(allocator: &Allocator, args: []const []const u8) !void { +fn cmdBuildObj(allocator: *Allocator, args: []const []const u8) !void { try buildOutputType(allocator, args, Module.Kind.Obj); } @@ -700,7 +700,7 @@ const args_fmt_spec = []Flag{ }), }; -fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void { +fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void { var flags = try Args.parse(allocator, args_fmt_spec, args); defer flags.deinit(); @@ -768,7 +768,7 @@ fn cmdFmt(allocator: &Allocator, args: []const []const u8) !void { // cmd:targets ///////////////////////////////////////////////////////////////////////////////////// -fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void { +fn cmdTargets(allocator: *Allocator, args: []const []const u8) !void { try stdout.write("Architectures:\n"); { comptime var i: usize = 0; @@ -810,7 +810,7 @@ fn cmdTargets(allocator: &Allocator, args: []const []const u8) !void { // cmd:version ///////////////////////////////////////////////////////////////////////////////////// -fn cmdVersion(allocator: &Allocator, args: []const []const u8) !void { +fn cmdVersion(allocator: *Allocator, args: []const []const u8) !void { try stdout.print("{}\n", std.cstr.toSliceConst(c.ZIG_VERSION_STRING)); } @@ -827,7 +827,7 @@ const usage_test = const args_test_spec = []Flag{Flag.Bool("--help")}; -fn cmdTest(allocator: &Allocator, args: []const []const u8) !void { +fn cmdTest(allocator: *Allocator, args: []const []const u8) !void { var flags = try Args.parse(allocator, args_build_spec, args); defer flags.deinit(); @@ -862,7 +862,7 @@ const usage_run = const args_run_spec = []Flag{Flag.Bool("--help")}; -fn cmdRun(allocator: &Allocator, args: []const []const u8) !void { +fn cmdRun(allocator: *Allocator, args: []const []const u8) !void { var compile_args = args; var runtime_args: []const []const u8 = []const []const u8{}; @@ -912,7 +912,7 @@ const args_translate_c_spec = []Flag{ Flag.Arg1("--output"), }; -fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void { +fn cmdTranslateC(allocator: *Allocator, args: []const []const u8) !void { var flags = try Args.parse(allocator, args_translate_c_spec, args); defer flags.deinit(); @@ -958,7 +958,7 @@ fn cmdTranslateC(allocator: &Allocator, args: []const []const u8) !void { // cmd:help //////////////////////////////////////////////////////////////////////////////////////// -fn cmdHelp(allocator: &Allocator, args: []const []const u8) !void { +fn cmdHelp(allocator: *Allocator, args: []const []const u8) !void { try stderr.write(usage); } @@ -981,7 +981,7 @@ const info_zen = \\ ; -fn cmdZen(allocator: &Allocator, args: []const []const u8) !void { +fn cmdZen(allocator: *Allocator, args: []const []const u8) !void { try stdout.write(info_zen); } @@ -996,7 +996,7 @@ const usage_internal = \\ ; -fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void { +fn cmdInternal(allocator: *Allocator, args: []const []const u8) !void { if (args.len == 0) { try stderr.write(usage_internal); os.exit(1); @@ -1018,7 +1018,7 @@ fn cmdInternal(allocator: &Allocator, args: []const []const u8) !void { try stderr.write(usage_internal); } -fn cmdInternalBuildInfo(allocator: &Allocator, args: []const []const u8) !void { +fn cmdInternalBuildInfo(allocator: *Allocator, args: []const []const u8) !void { try stdout.print( \\ZIG_CMAKE_BINARY_DIR {} \\ZIG_CXX_COMPILER {} diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig index 61834eab66..a7ddf3f9e9 100644 --- a/src-self-hosted/module.zig +++ b/src-self-hosted/module.zig @@ -13,7 +13,7 @@ const ArrayList = std.ArrayList; const errmsg = @import("errmsg.zig"); pub const Module = struct { - allocator: &mem.Allocator, + allocator: *mem.Allocator, name: Buffer, root_src_path: ?[]const u8, module: llvm.ModuleRef, @@ -53,8 +53,8 @@ pub const Module = struct { windows_subsystem_windows: bool, windows_subsystem_console: bool, - link_libs_list: ArrayList(&LinkLib), - libc_link_lib: ?&LinkLib, + link_libs_list: ArrayList(*LinkLib), + libc_link_lib: ?*LinkLib, err_color: errmsg.Color, @@ -106,19 +106,19 @@ pub const Module = struct { pub const CliPkg = struct { name: []const u8, path: []const u8, - children: ArrayList(&CliPkg), - parent: ?&CliPkg, + children: ArrayList(*CliPkg), + parent: ?*CliPkg, - pub fn init(allocator: &mem.Allocator, name: []const u8, path: []const u8, parent: ?&CliPkg) !&CliPkg { + pub fn init(allocator: *mem.Allocator, name: []const u8, path: []const u8, parent: ?*CliPkg) !*CliPkg { var pkg = try allocator.create(CliPkg); pkg.name = name; pkg.path = path; - pkg.children = ArrayList(&CliPkg).init(allocator); + pkg.children = ArrayList(*CliPkg).init(allocator); pkg.parent = parent; return pkg; } - pub fn deinit(self: &CliPkg) void { + pub fn deinit(self: *CliPkg) void { for (self.children.toSliceConst()) |child| { child.deinit(); } @@ -126,7 +126,7 @@ pub const Module = struct { } }; - pub fn create(allocator: &mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: &const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !&Module { + pub fn create(allocator: *mem.Allocator, name: []const u8, root_src_path: ?[]const u8, target: *const Target, kind: Kind, build_mode: builtin.Mode, zig_lib_dir: []const u8, cache_dir: []const u8) !*Module { var name_buffer = try Buffer.init(allocator, name); errdefer name_buffer.deinit(); @@ -188,7 +188,7 @@ pub const Module = struct { .link_objects = [][]const u8{}, .windows_subsystem_windows = false, .windows_subsystem_console = false, - .link_libs_list = ArrayList(&LinkLib).init(allocator), + .link_libs_list = ArrayList(*LinkLib).init(allocator), .libc_link_lib = null, .err_color = errmsg.Color.Auto, .darwin_frameworks = [][]const u8{}, @@ -200,11 +200,11 @@ pub const Module = struct { return module_ptr; } - fn dump(self: &Module) void { + fn dump(self: *Module) void { c.LLVMDumpModule(self.module); } - pub fn destroy(self: &Module) void { + pub fn destroy(self: *Module) void { c.LLVMDisposeBuilder(self.builder); c.LLVMDisposeModule(self.module); c.LLVMContextDispose(self.context); @@ -213,7 +213,7 @@ pub const Module = struct { self.allocator.destroy(self); } - pub fn build(self: &Module) !void { + pub fn build(self: *Module) !void { if (self.llvm_argv.len != 0) { var c_compatible_args = try std.cstr.NullTerminated2DArray.fromSlices(self.allocator, [][]const []const u8{ [][]const u8{"zig (LLVM option parsing)"}, @@ -259,12 +259,12 @@ pub const Module = struct { self.dump(); } - pub fn link(self: &Module, out_file: ?[]const u8) !void { + pub fn link(self: *Module, out_file: ?[]const u8) !void { warn("TODO link"); return error.Todo; } - pub fn addLinkLib(self: &Module, name: []const u8, provided_explicitly: bool) !&LinkLib { + pub fn addLinkLib(self: *Module, name: []const u8, provided_explicitly: bool) !*LinkLib { const is_libc = mem.eql(u8, name, "c"); if (is_libc) { diff --git a/src-self-hosted/scope.zig b/src-self-hosted/scope.zig index 05e586daae..b73dcb4ed3 100644 --- a/src-self-hosted/scope.zig +++ b/src-self-hosted/scope.zig @@ -1,6 +1,6 @@ pub const Scope = struct { id: Id, - parent: &Scope, + parent: *Scope, pub const Id = enum { Decls, diff --git a/src-self-hosted/target.zig b/src-self-hosted/target.zig index 7983a3ddec..724d99ea23 100644 --- a/src-self-hosted/target.zig +++ b/src-self-hosted/target.zig @@ -11,7 +11,7 @@ pub const Target = union(enum) { Native, Cross: CrossTarget, - pub fn oFileExt(self: &const Target) []const u8 { + pub fn oFileExt(self: *const Target) []const u8 { const environ = switch (self.*) { Target.Native => builtin.environ, Target.Cross => |t| t.environ, @@ -22,28 +22,28 @@ pub const Target = union(enum) { }; } - pub fn exeFileExt(self: &const Target) []const u8 { + pub fn exeFileExt(self: *const Target) []const u8 { return switch (self.getOs()) { builtin.Os.windows => ".exe", else => "", }; } - pub fn getOs(self: &const Target) builtin.Os { + pub fn getOs(self: *const Target) builtin.Os { return switch (self.*) { Target.Native => builtin.os, Target.Cross => |t| t.os, }; } - pub fn isDarwin(self: &const Target) bool { + pub fn isDarwin(self: *const Target) bool { return switch (self.getOs()) { builtin.Os.ios, builtin.Os.macosx => true, else => false, }; } - pub fn isWindows(self: &const Target) bool { + pub fn isWindows(self: *const Target) bool { return switch (self.getOs()) { builtin.Os.windows => true, else => false, diff --git a/src/all_types.hpp b/src/all_types.hpp index 9c156fb58b..b9199c2757 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -374,7 +374,7 @@ enum NodeType { NodeTypeCharLiteral, NodeTypeSymbol, NodeTypePrefixOpExpr, - NodeTypeAddrOfExpr, + NodeTypePointerType, NodeTypeFnCallExpr, NodeTypeArrayAccessExpr, NodeTypeSliceExpr, @@ -616,6 +616,7 @@ enum PrefixOp { PrefixOpNegationWrap, PrefixOpMaybe, PrefixOpUnwrapMaybe, + PrefixOpAddrOf, }; struct AstNodePrefixOpExpr { @@ -623,7 +624,7 @@ struct AstNodePrefixOpExpr { AstNode *primary_expr; }; -struct AstNodeAddrOfExpr { +struct AstNodePointerType { AstNode *align_expr; BigInt *bit_offset_start; BigInt *bit_offset_end; @@ -899,7 +900,7 @@ struct AstNode { AstNodeBinOpExpr bin_op_expr; AstNodeCatchExpr unwrap_err_expr; AstNodePrefixOpExpr prefix_op_expr; - AstNodeAddrOfExpr addr_of_expr; + AstNodePointerType pointer_type; AstNodeFnCallExpr fn_call_expr; AstNodeArrayAccessExpr array_access_expr; AstNodeSliceExpr slice_expr; @@ -2053,7 +2054,7 @@ enum IrInstructionId { IrInstructionIdTypeInfo, IrInstructionIdTypeId, IrInstructionIdSetEvalBranchQuota, - IrInstructionIdPtrTypeOf, + IrInstructionIdPtrType, IrInstructionIdAlignCast, IrInstructionIdOpaqueType, IrInstructionIdSetAlignStack, @@ -2412,6 +2413,17 @@ struct IrInstructionArrayType { IrInstruction *child_type; }; +struct IrInstructionPtrType { + IrInstruction base; + + IrInstruction *align_value; + IrInstruction *child_type; + uint32_t bit_offset_start; + uint32_t bit_offset_end; + bool is_const; + bool is_volatile; +}; + struct IrInstructionPromiseType { IrInstruction base; @@ -2891,17 +2903,6 @@ struct IrInstructionSetEvalBranchQuota { IrInstruction *new_quota; }; -struct IrInstructionPtrTypeOf { - IrInstruction base; - - IrInstruction *align_value; - IrInstruction *child_type; - uint32_t bit_offset_start; - uint32_t bit_offset_end; - bool is_const; - bool is_volatile; -}; - struct IrInstructionAlignCast { IrInstruction base; diff --git a/src/analyze.cpp b/src/analyze.cpp index b00e18a9a1..a5011035c5 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -418,12 +418,12 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type const char *volatile_str = is_volatile ? "volatile " : ""; buf_resize(&entry->name, 0); if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) { - buf_appendf(&entry->name, "&%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name)); + buf_appendf(&entry->name, "*%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name)); } else if (unaligned_bit_count == 0) { - buf_appendf(&entry->name, "&align(%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "*align(%" PRIu32 ") %s%s%s", byte_alignment, const_str, volatile_str, buf_ptr(&child_type->name)); } else { - buf_appendf(&entry->name, "&align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "*align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment, bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name)); } @@ -3270,7 +3270,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeThisLiteral: case NodeTypeSymbol: case NodeTypePrefixOpExpr: - case NodeTypeAddrOfExpr: + case NodeTypePointerType: case NodeTypeIfBoolExpr: case NodeTypeWhileExpr: case NodeTypeForExpr: diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 5a1e81b36d..f356f406b0 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -68,6 +68,7 @@ static const char *prefix_op_str(PrefixOp prefix_op) { case PrefixOpBinNot: return "~"; case PrefixOpMaybe: return "?"; case PrefixOpUnwrapMaybe: return "??"; + case PrefixOpAddrOf: return "&"; } zig_unreachable(); } @@ -185,8 +186,6 @@ static const char *node_type_str(NodeType node_type) { return "Symbol"; case NodeTypePrefixOpExpr: return "PrefixOpExpr"; - case NodeTypeAddrOfExpr: - return "AddrOfExpr"; case NodeTypeUse: return "Use"; case NodeTypeBoolLiteral: @@ -251,6 +250,8 @@ static const char *node_type_str(NodeType node_type) { return "Suspend"; case NodeTypePromiseType: return "PromiseType"; + case NodeTypePointerType: + return "PointerType"; } zig_unreachable(); } @@ -616,41 +617,41 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, "%s", prefix_op_str(op)); AstNode *child_node = node->data.prefix_op_expr.primary_expr; - bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypeAddrOfExpr; + bool new_grouped = child_node->type == NodeTypePrefixOpExpr || child_node->type == NodeTypePointerType; render_node_extra(ar, child_node, new_grouped); if (!grouped) fprintf(ar->f, ")"); break; } - case NodeTypeAddrOfExpr: + case NodeTypePointerType: { if (!grouped) fprintf(ar->f, "("); - fprintf(ar->f, "&"); - if (node->data.addr_of_expr.align_expr != nullptr) { + fprintf(ar->f, "*"); + if (node->data.pointer_type.align_expr != nullptr) { fprintf(ar->f, "align("); - render_node_grouped(ar, node->data.addr_of_expr.align_expr); - if (node->data.addr_of_expr.bit_offset_start != nullptr) { - assert(node->data.addr_of_expr.bit_offset_end != nullptr); + render_node_grouped(ar, node->data.pointer_type.align_expr); + if (node->data.pointer_type.bit_offset_start != nullptr) { + assert(node->data.pointer_type.bit_offset_end != nullptr); Buf offset_start_buf = BUF_INIT; buf_resize(&offset_start_buf, 0); - bigint_append_buf(&offset_start_buf, node->data.addr_of_expr.bit_offset_start, 10); + bigint_append_buf(&offset_start_buf, node->data.pointer_type.bit_offset_start, 10); Buf offset_end_buf = BUF_INIT; buf_resize(&offset_end_buf, 0); - bigint_append_buf(&offset_end_buf, node->data.addr_of_expr.bit_offset_end, 10); + bigint_append_buf(&offset_end_buf, node->data.pointer_type.bit_offset_end, 10); fprintf(ar->f, ":%s:%s ", buf_ptr(&offset_start_buf), buf_ptr(&offset_end_buf)); } fprintf(ar->f, ") "); } - if (node->data.addr_of_expr.is_const) { + if (node->data.pointer_type.is_const) { fprintf(ar->f, "const "); } - if (node->data.addr_of_expr.is_volatile) { + if (node->data.pointer_type.is_volatile) { fprintf(ar->f, "volatile "); } - render_node_ungrouped(ar, node->data.addr_of_expr.op_expr); + render_node_ungrouped(ar, node->data.pointer_type.op_expr); if (!grouped) fprintf(ar->f, ")"); break; } @@ -669,7 +670,7 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, " "); } AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr; - bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypeAddrOfExpr); + bool grouped = (fn_ref_node->type != NodeTypePrefixOpExpr && fn_ref_node->type != NodeTypePointerType); render_node_extra(ar, fn_ref_node, grouped); fprintf(ar->f, "("); for (size_t i = 0; i < node->data.fn_call_expr.params.length; i += 1) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 69542b3e67..d07d427729 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4600,7 +4600,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, case IrInstructionIdTypeInfo: case IrInstructionIdTypeId: case IrInstructionIdSetEvalBranchQuota: - case IrInstructionIdPtrTypeOf: + case IrInstructionIdPtrType: case IrInstructionIdOpaqueType: case IrInstructionIdSetAlignStack: case IrInstructionIdArgType: diff --git a/src/ir.cpp b/src/ir.cpp index 6e944a8976..b1fac9f485 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -41,10 +41,6 @@ struct IrAnalyze { static const LVal LVAL_NONE = { false, false, false }; static const LVal LVAL_PTR = { true, false, false }; -static LVal make_lval_addr(bool is_const, bool is_volatile) { - return { true, is_const, is_volatile }; -} - enum ConstCastResultId { ConstCastResultIdOk, ConstCastResultIdErrSet, @@ -629,8 +625,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionSetEvalBranchQuo return IrInstructionIdSetEvalBranchQuota; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrTypeOf *) { - return IrInstructionIdPtrTypeOf; +static constexpr IrInstructionId ir_instruction_id(IrInstructionPtrType *) { + return IrInstructionIdPtrType; } static constexpr IrInstructionId ir_instruction_id(IrInstructionAlignCast *) { @@ -1196,11 +1192,11 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru return new_instruction; } -static IrInstruction *ir_build_ptr_type_of(IrBuilder *irb, Scope *scope, AstNode *source_node, +static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, uint32_t bit_offset_start, uint32_t bit_offset_end) { - IrInstructionPtrTypeOf *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node); ptr_type_of_instruction->align_value = align_value; ptr_type_of_instruction->child_type = child_type; ptr_type_of_instruction->is_const = is_const; @@ -4609,14 +4605,8 @@ static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode } static IrInstruction *ir_gen_prefix_op_id_lval(IrBuilder *irb, Scope *scope, AstNode *node, IrUnOp op_id, LVal lval) { - AstNode *expr_node; - if (node->type == NodeTypePrefixOpExpr) { - expr_node = node->data.prefix_op_expr.primary_expr; - } else if (node->type == NodeTypePtrDeref) { - expr_node = node->data.ptr_deref_expr.target; - } else { - zig_unreachable(); - } + assert(node->type == NodeTypePrefixOpExpr); + AstNode *expr_node = node->data.prefix_op_expr.primary_expr; IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); if (value == irb->codegen->invalid_instruction) @@ -4640,16 +4630,12 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction * return ir_build_ref(irb, scope, value->source_node, value, lval.is_const, lval.is_volatile); } -static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *node) { - assert(node->type == NodeTypeAddrOfExpr); - bool is_const = node->data.addr_of_expr.is_const; - bool is_volatile = node->data.addr_of_expr.is_volatile; - AstNode *expr_node = node->data.addr_of_expr.op_expr; - AstNode *align_expr = node->data.addr_of_expr.align_expr; - - if (align_expr == nullptr && !is_const && !is_volatile) { - return ir_gen_node_extra(irb, expr_node, scope, make_lval_addr(is_const, is_volatile)); - } +static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) { + assert(node->type == NodeTypePointerType); + bool is_const = node->data.pointer_type.is_const; + bool is_volatile = node->data.pointer_type.is_volatile; + AstNode *expr_node = node->data.pointer_type.op_expr; + AstNode *align_expr = node->data.pointer_type.align_expr; IrInstruction *align_value; if (align_expr != nullptr) { @@ -4665,27 +4651,27 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n return child_type; uint32_t bit_offset_start = 0; - if (node->data.addr_of_expr.bit_offset_start != nullptr) { - if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_start, 32, false)) { + if (node->data.pointer_type.bit_offset_start != nullptr) { + if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_start, 32, false)) { Buf *val_buf = buf_alloc(); - bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_start, 10); + bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_start, 10); exec_add_error_node(irb->codegen, irb->exec, node, buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf))); return irb->codegen->invalid_instruction; } - bit_offset_start = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_start); + bit_offset_start = bigint_as_unsigned(node->data.pointer_type.bit_offset_start); } uint32_t bit_offset_end = 0; - if (node->data.addr_of_expr.bit_offset_end != nullptr) { - if (!bigint_fits_in_bits(node->data.addr_of_expr.bit_offset_end, 32, false)) { + if (node->data.pointer_type.bit_offset_end != nullptr) { + if (!bigint_fits_in_bits(node->data.pointer_type.bit_offset_end, 32, false)) { Buf *val_buf = buf_alloc(); - bigint_append_buf(val_buf, node->data.addr_of_expr.bit_offset_end, 10); + bigint_append_buf(val_buf, node->data.pointer_type.bit_offset_end, 10); exec_add_error_node(irb->codegen, irb->exec, node, buf_sprintf("value %s too large for u32 bit offset", buf_ptr(val_buf))); return irb->codegen->invalid_instruction; } - bit_offset_end = bigint_as_unsigned(node->data.addr_of_expr.bit_offset_end); + bit_offset_end = bigint_as_unsigned(node->data.pointer_type.bit_offset_end); } if ((bit_offset_start != 0 || bit_offset_end != 0) && bit_offset_start >= bit_offset_end) { @@ -4694,7 +4680,7 @@ static IrInstruction *ir_gen_address_of(IrBuilder *irb, Scope *scope, AstNode *n return irb->codegen->invalid_instruction; } - return ir_build_ptr_type_of(irb, scope, node, child_type, is_const, is_volatile, + return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile, align_value, bit_offset_start, bit_offset_end); } @@ -4761,6 +4747,10 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval); case PrefixOpUnwrapMaybe: return ir_gen_maybe_assert_ok(irb, scope, node, lval); + case PrefixOpAddrOf: { + AstNode *expr_node = node->data.prefix_op_expr.primary_expr; + return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval); + } } zig_unreachable(); } @@ -6568,8 +6558,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_lval_wrap(irb, scope, ir_gen_if_bool_expr(irb, scope, node), lval); case NodeTypePrefixOpExpr: return ir_gen_prefix_op_expr(irb, scope, node, lval); - case NodeTypeAddrOfExpr: - return ir_lval_wrap(irb, scope, ir_gen_address_of(irb, scope, node), lval); case NodeTypeContainerInitExpr: return ir_lval_wrap(irb, scope, ir_gen_container_init_expr(irb, scope, node), lval); case NodeTypeVariableDeclaration: @@ -6592,14 +6580,23 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_build_load_ptr(irb, scope, node, ptr_instruction); } - case NodeTypePtrDeref: - return ir_gen_prefix_op_id_lval(irb, scope, node, IrUnOpDereference, lval); + case NodeTypePtrDeref: { + assert(node->type == NodeTypePtrDeref); + AstNode *expr_node = node->data.ptr_deref_expr.target; + IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); + if (value == irb->codegen->invalid_instruction) + return value; + + return ir_build_un_op(irb, scope, node, IrUnOpDereference, value); + } case NodeTypeThisLiteral: return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval); case NodeTypeBoolLiteral: return ir_lval_wrap(irb, scope, ir_gen_bool_literal(irb, scope, node), lval); case NodeTypeArrayType: return ir_lval_wrap(irb, scope, ir_gen_array_type(irb, scope, node), lval); + case NodeTypePointerType: + return ir_lval_wrap(irb, scope, ir_gen_pointer_type(irb, scope, node), lval); case NodeTypePromiseType: return ir_lval_wrap(irb, scope, ir_gen_promise_type(irb, scope, node), lval); case NodeTypeStringLiteral: @@ -8961,6 +8958,7 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio ConstExprValue *pointee, TypeTableEntry *pointee_type, ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align) { + // TODO remove this special case for types if (pointee_type->id == TypeTableEntryIdMetaType) { TypeTableEntry *type_entry = pointee->data.x_type; if (type_entry->id == TypeTableEntryIdUnreachable) { @@ -18778,11 +18776,16 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr return usize; } -static TypeTableEntry *ir_analyze_instruction_ptr_type_of(IrAnalyze *ira, IrInstructionPtrTypeOf *instruction) { +static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstructionPtrType *instruction) { TypeTableEntry *child_type = ir_resolve_type(ira, instruction->child_type->other); if (type_is_invalid(child_type)) return ira->codegen->builtin_types.entry_invalid; + if (child_type->id == TypeTableEntryIdUnreachable) { + ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed")); + return ira->codegen->builtin_types.entry_invalid; + } + uint32_t align_bytes; if (instruction->align_value != nullptr) { if (!ir_resolve_align(ira, instruction->align_value->other, &align_bytes)) @@ -19606,8 +19609,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_type_id(ira, (IrInstructionTypeId *)instruction); case IrInstructionIdSetEvalBranchQuota: return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstructionSetEvalBranchQuota *)instruction); - case IrInstructionIdPtrTypeOf: - return ir_analyze_instruction_ptr_type_of(ira, (IrInstructionPtrTypeOf *)instruction); + case IrInstructionIdPtrType: + return ir_analyze_instruction_ptr_type(ira, (IrInstructionPtrType *)instruction); case IrInstructionIdAlignCast: return ir_analyze_instruction_align_cast(ira, (IrInstructionAlignCast *)instruction); case IrInstructionIdOpaqueType: @@ -19783,7 +19786,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdCheckStatementIsVoid: case IrInstructionIdPanic: case IrInstructionIdSetEvalBranchQuota: - case IrInstructionIdPtrTypeOf: + case IrInstructionIdPtrType: case IrInstructionIdSetAlignStack: case IrInstructionIdExport: case IrInstructionIdCancel: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 9678120f1d..3c177a8bbf 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -921,7 +921,7 @@ static void ir_print_can_implicit_cast(IrPrint *irp, IrInstructionCanImplicitCas fprintf(irp->f, ")"); } -static void ir_print_ptr_type_of(IrPrint *irp, IrInstructionPtrTypeOf *instruction) { +static void ir_print_ptr_type(IrPrint *irp, IrInstructionPtrType *instruction) { fprintf(irp->f, "&"); if (instruction->align_value != nullptr) { fprintf(irp->f, "align("); @@ -1527,8 +1527,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCanImplicitCast: ir_print_can_implicit_cast(irp, (IrInstructionCanImplicitCast *)instruction); break; - case IrInstructionIdPtrTypeOf: - ir_print_ptr_type_of(irp, (IrInstructionPtrTypeOf *)instruction); + case IrInstructionIdPtrType: + ir_print_ptr_type(irp, (IrInstructionPtrType *)instruction); break; case IrInstructionIdDeclRef: ir_print_decl_ref(irp, (IrInstructionDeclRef *)instruction); diff --git a/src/parser.cpp b/src/parser.cpp index 4763d3b987..ef390a3a2e 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1167,20 +1167,19 @@ static PrefixOp tok_to_prefix_op(Token *token) { case TokenIdTilde: return PrefixOpBinNot; case TokenIdMaybe: return PrefixOpMaybe; case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe; + case TokenIdAmpersand: return PrefixOpAddrOf; default: return PrefixOpInvalid; } } -static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) { - Token *ampersand_tok = ast_eat_token(pc, token_index, TokenIdAmpersand); - - AstNode *node = ast_create_node(pc, NodeTypeAddrOfExpr, ampersand_tok); +static AstNode *ast_parse_pointer_type(ParseContext *pc, size_t *token_index, Token *star_tok) { + AstNode *node = ast_create_node(pc, NodeTypePointerType, star_tok); Token *token = &pc->tokens->at(*token_index); if (token->id == TokenIdKeywordAlign) { *token_index += 1; ast_eat_token(pc, token_index, TokenIdLParen); - node->data.addr_of_expr.align_expr = ast_parse_expression(pc, token_index, true); + node->data.pointer_type.align_expr = ast_parse_expression(pc, token_index, true); token = &pc->tokens->at(*token_index); if (token->id == TokenIdColon) { @@ -1189,24 +1188,24 @@ static AstNode *ast_parse_addr_of(ParseContext *pc, size_t *token_index) { ast_eat_token(pc, token_index, TokenIdColon); Token *bit_offset_end_tok = ast_eat_token(pc, token_index, TokenIdIntLiteral); - node->data.addr_of_expr.bit_offset_start = token_bigint(bit_offset_start_tok); - node->data.addr_of_expr.bit_offset_end = token_bigint(bit_offset_end_tok); + node->data.pointer_type.bit_offset_start = token_bigint(bit_offset_start_tok); + node->data.pointer_type.bit_offset_end = token_bigint(bit_offset_end_tok); } ast_eat_token(pc, token_index, TokenIdRParen); token = &pc->tokens->at(*token_index); } if (token->id == TokenIdKeywordConst) { *token_index += 1; - node->data.addr_of_expr.is_const = true; + node->data.pointer_type.is_const = true; token = &pc->tokens->at(*token_index); } if (token->id == TokenIdKeywordVolatile) { *token_index += 1; - node->data.addr_of_expr.is_volatile = true; + node->data.pointer_type.is_volatile = true; } - node->data.addr_of_expr.op_expr = ast_parse_prefix_op_expr(pc, token_index, true); + node->data.pointer_type.op_expr = ast_parse_prefix_op_expr(pc, token_index, true); return node; } @@ -1216,8 +1215,17 @@ PrefixOp = "!" | "-" | "~" | ("*" option("align" "(" Expression option(":" Integ */ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { Token *token = &pc->tokens->at(*token_index); - if (token->id == TokenIdAmpersand) { - return ast_parse_addr_of(pc, token_index); + if (token->id == TokenIdStar) { + *token_index += 1; + return ast_parse_pointer_type(pc, token_index, token); + } + if (token->id == TokenIdStarStar) { + *token_index += 1; + AstNode *child_node = ast_parse_pointer_type(pc, token_index, token); + child_node->column += 1; + AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token); + parent_node->data.pointer_type.op_expr = child_node; + return parent_node; } if (token->id == TokenIdKeywordTry) { return ast_parse_try_expr(pc, token_index); @@ -1234,13 +1242,12 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, token); - AstNode *parent_node = node; AstNode *prefix_op_expr = ast_parse_error_set_expr(pc, token_index, true); node->data.prefix_op_expr.primary_expr = prefix_op_expr; node->data.prefix_op_expr.prefix_op = prefix_op; - return parent_node; + return node; } @@ -3121,9 +3128,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypeErrorType: // none break; - case NodeTypeAddrOfExpr: - visit_field(&node->data.addr_of_expr.align_expr, visit, context); - visit_field(&node->data.addr_of_expr.op_expr, visit, context); + case NodeTypePointerType: + visit_field(&node->data.pointer_type.align_expr, visit, context); + visit_field(&node->data.pointer_type.op_expr, visit, context); break; case NodeTypeErrorSetDecl: visit_node_list(&node->data.err_set_decl.decls, visit, context); diff --git a/src/translate_c.cpp b/src/translate_c.cpp index 50ff073008..db541d34f3 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -276,11 +276,18 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod node); } -static AstNode *trans_create_node_addr_of(Context *c, bool is_const, bool is_volatile, AstNode *child_node) { - AstNode *node = trans_create_node(c, NodeTypeAddrOfExpr); - node->data.addr_of_expr.is_const = is_const; - node->data.addr_of_expr.is_volatile = is_volatile; - node->data.addr_of_expr.op_expr = child_node; +static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypePointerType); + node->data.pointer_type.is_const = is_const; + node->data.pointer_type.is_volatile = is_volatile; + node->data.pointer_type.op_expr = child_node; + return node; +} + +static AstNode *trans_create_node_addr_of(Context *c, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypePrefixOpExpr); + node->data.prefix_op_expr.prefix_op = PrefixOpAddrOf; + node->data.prefix_op_expr.primary_expr = child_node; return node; } @@ -848,7 +855,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node); } - AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(), + AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), child_qt.isVolatileQualified(), child_node); return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node); } @@ -1033,7 +1040,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou emit_warning(c, source_loc, "unresolved array element type"); return nullptr; } - AstNode *pointer_node = trans_create_node_addr_of(c, child_qt.isConstQualified(), + AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), child_qt.isVolatileQualified(), child_type_node); return pointer_node; } @@ -1402,7 +1409,7 @@ static AstNode *trans_create_compound_assign_shift(Context *c, ResultUsed result // const _ref = &lhs; AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue); if (lhs == nullptr) return nullptr; - AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs); + AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs); // TODO: avoid name collisions with generated variable names Buf* tmp_var_name = buf_create_from_str("_ref"); AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs); @@ -1476,7 +1483,7 @@ static AstNode *trans_create_compound_assign(Context *c, ResultUsed result_used, // const _ref = &lhs; AstNode *lhs = trans_expr(c, ResultUsedYes, &child_scope->base, stmt->getLHS(), TransLValue); if (lhs == nullptr) return nullptr; - AstNode *addr_of_lhs = trans_create_node_addr_of(c, false, false, lhs); + AstNode *addr_of_lhs = trans_create_node_addr_of(c, lhs); // TODO: avoid name collisions with generated variable names Buf* tmp_var_name = buf_create_from_str("_ref"); AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, addr_of_lhs); @@ -1813,7 +1820,7 @@ static AstNode *trans_create_post_crement(Context *c, ResultUsed result_used, Tr // const _ref = &expr; AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue); if (expr == nullptr) return nullptr; - AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr); + AstNode *addr_of_expr = trans_create_node_addr_of(c, expr); // TODO: avoid name collisions with generated variable names Buf* ref_var_name = buf_create_from_str("_ref"); AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr); @@ -1868,7 +1875,7 @@ static AstNode *trans_create_pre_crement(Context *c, ResultUsed result_used, Tra // const _ref = &expr; AstNode *expr = trans_expr(c, ResultUsedYes, &child_scope->base, op_expr, TransLValue); if (expr == nullptr) return nullptr; - AstNode *addr_of_expr = trans_create_node_addr_of(c, false, false, expr); + AstNode *addr_of_expr = trans_create_node_addr_of(c, expr); // TODO: avoid name collisions with generated variable names Buf* ref_var_name = buf_create_from_str("_ref"); AstNode *ref_var_decl = trans_create_node_var_decl_local(c, true, ref_var_name, nullptr, addr_of_expr); @@ -1917,7 +1924,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc AstNode *value_node = trans_expr(c, result_used, scope, stmt->getSubExpr(), TransLValue); if (value_node == nullptr) return value_node; - return trans_create_node_addr_of(c, false, false, value_node); + return trans_create_node_addr_of(c, value_node); } case UO_Deref: { @@ -4441,7 +4448,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t } else if (first_tok->id == CTokIdAsterisk) { *tok_i += 1; - node = trans_create_node_addr_of(c, false, false, node); + node = trans_create_node_ptr_type(c, false, false, node); } else { return node; } diff --git a/std/array_list.zig b/std/array_list.zig index b315194c33..07a1db6451 100644 --- a/std/array_list.zig +++ b/std/array_list.zig @@ -17,10 +17,10 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { /// you uninitialized memory. items: []align(A) T, len: usize, - allocator: &Allocator, + allocator: *Allocator, /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn init(allocator: &Allocator) Self { + pub fn init(allocator: *Allocator) Self { return Self{ .items = []align(A) T{}, .len = 0, @@ -28,30 +28,30 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { }; } - pub fn deinit(l: &const Self) void { + pub fn deinit(l: *const Self) void { l.allocator.free(l.items); } - pub fn toSlice(l: &const Self) []align(A) T { + pub fn toSlice(l: *const Self) []align(A) T { return l.items[0..l.len]; } - pub fn toSliceConst(l: &const Self) []align(A) const T { + pub fn toSliceConst(l: *const Self) []align(A) const T { return l.items[0..l.len]; } - pub fn at(l: &const Self, n: usize) T { + pub fn at(l: *const Self, n: usize) T { return l.toSliceConst()[n]; } - pub fn count(self: &const Self) usize { + pub fn count(self: *const Self) usize { return self.len; } /// ArrayList takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Deinitialize with `deinit` or use `toOwnedSlice`. - pub fn fromOwnedSlice(allocator: &Allocator, slice: []align(A) T) Self { + pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self { return Self{ .items = slice, .len = slice.len, @@ -60,14 +60,14 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { } /// The caller owns the returned memory. ArrayList becomes empty. - pub fn toOwnedSlice(self: &Self) []align(A) T { + pub fn toOwnedSlice(self: *Self) []align(A) T { const allocator = self.allocator; const result = allocator.alignedShrink(T, A, self.items, self.len); self.* = init(allocator); return result; } - pub fn insert(l: &Self, n: usize, item: &const T) !void { + pub fn insert(l: *Self, n: usize, item: *const T) !void { try l.ensureCapacity(l.len + 1); l.len += 1; @@ -75,7 +75,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { l.items[n] = item.*; } - pub fn insertSlice(l: &Self, n: usize, items: []align(A) const T) !void { + pub fn insertSlice(l: *Self, n: usize, items: []align(A) const T) !void { try l.ensureCapacity(l.len + items.len); l.len += items.len; @@ -83,28 +83,28 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { mem.copy(T, l.items[n .. n + items.len], items); } - pub fn append(l: &Self, item: &const T) !void { + pub fn append(l: *Self, item: *const T) !void { const new_item_ptr = try l.addOne(); new_item_ptr.* = item.*; } - pub fn appendSlice(l: &Self, items: []align(A) const T) !void { + pub fn appendSlice(l: *Self, items: []align(A) const T) !void { try l.ensureCapacity(l.len + items.len); mem.copy(T, l.items[l.len..], items); l.len += items.len; } - pub fn resize(l: &Self, new_len: usize) !void { + pub fn resize(l: *Self, new_len: usize) !void { try l.ensureCapacity(new_len); l.len = new_len; } - pub fn shrink(l: &Self, new_len: usize) void { + pub fn shrink(l: *Self, new_len: usize) void { assert(new_len <= l.len); l.len = new_len; } - pub fn ensureCapacity(l: &Self, new_capacity: usize) !void { + pub fn ensureCapacity(l: *Self, new_capacity: usize) !void { var better_capacity = l.items.len; if (better_capacity >= new_capacity) return; while (true) { @@ -114,7 +114,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { l.items = try l.allocator.alignedRealloc(T, A, l.items, better_capacity); } - pub fn addOne(l: &Self) !&T { + pub fn addOne(l: *Self) !*T { const new_length = l.len + 1; try l.ensureCapacity(new_length); const result = &l.items[l.len]; @@ -122,34 +122,34 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type { return result; } - pub fn pop(self: &Self) T { + pub fn pop(self: *Self) T { self.len -= 1; return self.items[self.len]; } - pub fn popOrNull(self: &Self) ?T { + pub fn popOrNull(self: *Self) ?T { if (self.len == 0) return null; return self.pop(); } pub const Iterator = struct { - list: &const Self, + list: *const Self, // how many items have we returned count: usize, - pub fn next(it: &Iterator) ?T { + pub fn next(it: *Iterator) ?T { if (it.count >= it.list.len) return null; const val = it.list.at(it.count); it.count += 1; return val; } - pub fn reset(it: &Iterator) void { + pub fn reset(it: *Iterator) void { it.count = 0; } }; - pub fn iterator(self: &const Self) Iterator { + pub fn iterator(self: *const Self) Iterator { return Iterator{ .list = self, .count = 0, diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig index 35180da8d1..142c958173 100644 --- a/std/atomic/queue.zig +++ b/std/atomic/queue.zig @@ -5,36 +5,36 @@ const AtomicRmwOp = builtin.AtomicRmwOp; /// Many reader, many writer, non-allocating, thread-safe, lock-free pub fn Queue(comptime T: type) type { return struct { - head: &Node, - tail: &Node, + head: *Node, + tail: *Node, root: Node, pub const Self = this; pub const Node = struct { - next: ?&Node, + next: ?*Node, data: T, }; // TODO: well defined copy elision: https://github.com/ziglang/zig/issues/287 - pub fn init(self: &Self) void { + pub fn init(self: *Self) void { self.root.next = null; self.head = &self.root; self.tail = &self.root; } - pub fn put(self: &Self, node: &Node) void { + pub fn put(self: *Self, node: *Node) void { node.next = null; - const tail = @atomicRmw(&Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); - _ = @atomicRmw(?&Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); + const tail = @atomicRmw(*Node, &self.tail, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); + _ = @atomicRmw(?*Node, &tail.next, AtomicRmwOp.Xchg, node, AtomicOrder.SeqCst); } - pub fn get(self: &Self) ?&Node { - var head = @atomicLoad(&Node, &self.head, AtomicOrder.SeqCst); + pub fn get(self: *Self) ?*Node { + var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst); while (true) { const node = head.next ?? return null; - head = @cmpxchgWeak(&Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node; + head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node; } } }; @@ -42,8 +42,8 @@ pub fn Queue(comptime T: type) type { const std = @import("std"); const Context = struct { - allocator: &std.mem.Allocator, - queue: &Queue(i32), + allocator: *std.mem.Allocator, + queue: *Queue(i32), put_sum: isize, get_sum: isize, get_count: usize, @@ -79,11 +79,11 @@ test "std.atomic.queue" { .get_count = 0, }; - var putters: [put_thread_count]&std.os.Thread = undefined; + var putters: [put_thread_count]*std.os.Thread = undefined; for (putters) |*t| { t.* = try std.os.spawnThread(&context, startPuts); } - var getters: [put_thread_count]&std.os.Thread = undefined; + var getters: [put_thread_count]*std.os.Thread = undefined; for (getters) |*t| { t.* = try std.os.spawnThread(&context, startGets); } @@ -98,7 +98,7 @@ test "std.atomic.queue" { std.debug.assert(context.get_count == puts_per_thread * put_thread_count); } -fn startPuts(ctx: &Context) u8 { +fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var r = std.rand.DefaultPrng.init(0xdeadbeef); while (put_count != 0) : (put_count -= 1) { @@ -112,7 +112,7 @@ fn startPuts(ctx: &Context) u8 { return 0; } -fn startGets(ctx: &Context) u8 { +fn startGets(ctx: *Context) u8 { while (true) { while (ctx.queue.get()) |node| { std.os.time.sleep(0, 1); // let the os scheduler be our fuzz diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig index 400a1a3c4f..15611188d2 100644 --- a/std/atomic/stack.zig +++ b/std/atomic/stack.zig @@ -4,12 +4,12 @@ const AtomicOrder = builtin.AtomicOrder; /// Many reader, many writer, non-allocating, thread-safe, lock-free pub fn Stack(comptime T: type) type { return struct { - root: ?&Node, + root: ?*Node, pub const Self = this; pub const Node = struct { - next: ?&Node, + next: ?*Node, data: T, }; @@ -19,36 +19,36 @@ pub fn Stack(comptime T: type) type { /// push operation, but only if you are the first item in the stack. if you did not succeed in /// being the first item in the stack, returns the other item that was there. - pub fn pushFirst(self: &Self, node: &Node) ?&Node { + pub fn pushFirst(self: *Self, node: *Node) ?*Node { node.next = null; - return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst); + return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst); } - pub fn push(self: &Self, node: &Node) void { - var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst); + pub fn push(self: *Self, node: *Node) void { + var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); while (true) { node.next = root; - root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break; + root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break; } } - pub fn pop(self: &Self) ?&Node { - var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst); + pub fn pop(self: *Self) ?*Node { + var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); while (true) { - root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root; + root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root; } } - pub fn isEmpty(self: &Self) bool { - return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null; + pub fn isEmpty(self: *Self) bool { + return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null; } }; } const std = @import("std"); const Context = struct { - allocator: &std.mem.Allocator, - stack: &Stack(i32), + allocator: *std.mem.Allocator, + stack: *Stack(i32), put_sum: isize, get_sum: isize, get_count: usize, @@ -82,11 +82,11 @@ test "std.atomic.stack" { .get_count = 0, }; - var putters: [put_thread_count]&std.os.Thread = undefined; + var putters: [put_thread_count]*std.os.Thread = undefined; for (putters) |*t| { t.* = try std.os.spawnThread(&context, startPuts); } - var getters: [put_thread_count]&std.os.Thread = undefined; + var getters: [put_thread_count]*std.os.Thread = undefined; for (getters) |*t| { t.* = try std.os.spawnThread(&context, startGets); } @@ -101,7 +101,7 @@ test "std.atomic.stack" { std.debug.assert(context.get_count == puts_per_thread * put_thread_count); } -fn startPuts(ctx: &Context) u8 { +fn startPuts(ctx: *Context) u8 { var put_count: usize = puts_per_thread; var r = std.rand.DefaultPrng.init(0xdeadbeef); while (put_count != 0) : (put_count -= 1) { @@ -115,7 +115,7 @@ fn startPuts(ctx: &Context) u8 { return 0; } -fn startGets(ctx: &Context) u8 { +fn startGets(ctx: *Context) u8 { while (true) { while (ctx.stack.pop()) |node| { std.os.time.sleep(0, 1); // let the os scheduler be our fuzz diff --git a/std/base64.zig b/std/base64.zig index 204628a405..d27bcbd201 100644 --- a/std/base64.zig +++ b/std/base64.zig @@ -32,7 +32,7 @@ pub const Base64Encoder = struct { } /// dest.len must be what you get from ::calcSize. - pub fn encode(encoder: &const Base64Encoder, dest: []u8, source: []const u8) void { + pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) void { assert(dest.len == Base64Encoder.calcSize(source.len)); var i: usize = 0; @@ -107,7 +107,7 @@ pub const Base64Decoder = struct { } /// If the encoded buffer is detected to be invalid, returns error.InvalidPadding. - pub fn calcSize(decoder: &const Base64Decoder, source: []const u8) !usize { + pub fn calcSize(decoder: *const Base64Decoder, source: []const u8) !usize { if (source.len % 4 != 0) return error.InvalidPadding; return calcDecodedSizeExactUnsafe(source, decoder.pad_char); } @@ -115,7 +115,7 @@ pub const Base64Decoder = struct { /// dest.len must be what you get from ::calcSize. /// invalid characters result in error.InvalidCharacter. /// invalid padding results in error.InvalidPadding. - pub fn decode(decoder: &const Base64Decoder, dest: []u8, source: []const u8) !void { + pub fn decode(decoder: *const Base64Decoder, dest: []u8, source: []const u8) !void { assert(dest.len == (decoder.calcSize(source) catch unreachable)); assert(source.len % 4 == 0); @@ -181,7 +181,7 @@ pub const Base64DecoderWithIgnore = struct { /// Invalid padding results in error.InvalidPadding. /// Decoding more data than can fit in dest results in error.OutputTooSmall. See also ::calcSizeUpperBound. /// Returns the number of bytes writen to dest. - pub fn decode(decoder_with_ignore: &const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize { + pub fn decode(decoder_with_ignore: *const Base64DecoderWithIgnore, dest: []u8, source: []const u8) !usize { const decoder = &decoder_with_ignore.decoder; var src_cursor: usize = 0; @@ -290,13 +290,13 @@ pub const Base64DecoderUnsafe = struct { } /// The source buffer must be valid. - pub fn calcSize(decoder: &const Base64DecoderUnsafe, source: []const u8) usize { + pub fn calcSize(decoder: *const Base64DecoderUnsafe, source: []const u8) usize { return calcDecodedSizeExactUnsafe(source, decoder.pad_char); } /// dest.len must be what you get from ::calcDecodedSizeExactUnsafe. /// invalid characters or padding will result in undefined values. - pub fn decode(decoder: &const Base64DecoderUnsafe, dest: []u8, source: []const u8) void { + pub fn decode(decoder: *const Base64DecoderUnsafe, dest: []u8, source: []const u8) void { assert(dest.len == decoder.calcSize(source)); var src_index: usize = 0; diff --git a/std/buf_map.zig b/std/buf_map.zig index 930fc36a78..22d821ae7b 100644 --- a/std/buf_map.zig +++ b/std/buf_map.zig @@ -11,12 +11,12 @@ pub const BufMap = struct { const BufMapHashMap = HashMap([]const u8, []const u8, mem.hash_slice_u8, mem.eql_slice_u8); - pub fn init(allocator: &Allocator) BufMap { + pub fn init(allocator: *Allocator) BufMap { var self = BufMap{ .hash_map = BufMapHashMap.init(allocator) }; return self; } - pub fn deinit(self: &const BufMap) void { + pub fn deinit(self: *const BufMap) void { var it = self.hash_map.iterator(); while (true) { const entry = it.next() ?? break; @@ -27,7 +27,7 @@ pub const BufMap = struct { self.hash_map.deinit(); } - pub fn set(self: &BufMap, key: []const u8, value: []const u8) !void { + pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void { self.delete(key); const key_copy = try self.copy(key); errdefer self.free(key_copy); @@ -36,30 +36,30 @@ pub const BufMap = struct { _ = try self.hash_map.put(key_copy, value_copy); } - pub fn get(self: &const BufMap, key: []const u8) ?[]const u8 { + pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 { const entry = self.hash_map.get(key) ?? return null; return entry.value; } - pub fn delete(self: &BufMap, key: []const u8) void { + pub fn delete(self: *BufMap, key: []const u8) void { const entry = self.hash_map.remove(key) ?? return; self.free(entry.key); self.free(entry.value); } - pub fn count(self: &const BufMap) usize { + pub fn count(self: *const BufMap) usize { return self.hash_map.count(); } - pub fn iterator(self: &const BufMap) BufMapHashMap.Iterator { + pub fn iterator(self: *const BufMap) BufMapHashMap.Iterator { return self.hash_map.iterator(); } - fn free(self: &const BufMap, value: []const u8) void { + fn free(self: *const BufMap, value: []const u8) void { self.hash_map.allocator.free(value); } - fn copy(self: &const BufMap, value: []const u8) ![]const u8 { + fn copy(self: *const BufMap, value: []const u8) ![]const u8 { return mem.dupe(self.hash_map.allocator, u8, value); } }; diff --git a/std/buf_set.zig b/std/buf_set.zig index c5a80e16fb..03a050ed8b 100644 --- a/std/buf_set.zig +++ b/std/buf_set.zig @@ -9,12 +9,12 @@ pub const BufSet = struct { const BufSetHashMap = HashMap([]const u8, void, mem.hash_slice_u8, mem.eql_slice_u8); - pub fn init(a: &Allocator) BufSet { + pub fn init(a: *Allocator) BufSet { var self = BufSet{ .hash_map = BufSetHashMap.init(a) }; return self; } - pub fn deinit(self: &const BufSet) void { + pub fn deinit(self: *const BufSet) void { var it = self.hash_map.iterator(); while (true) { const entry = it.next() ?? break; @@ -24,7 +24,7 @@ pub const BufSet = struct { self.hash_map.deinit(); } - pub fn put(self: &BufSet, key: []const u8) !void { + pub fn put(self: *BufSet, key: []const u8) !void { if (self.hash_map.get(key) == null) { const key_copy = try self.copy(key); errdefer self.free(key_copy); @@ -32,28 +32,28 @@ pub const BufSet = struct { } } - pub fn delete(self: &BufSet, key: []const u8) void { + pub fn delete(self: *BufSet, key: []const u8) void { const entry = self.hash_map.remove(key) ?? return; self.free(entry.key); } - pub fn count(self: &const BufSet) usize { + pub fn count(self: *const BufSet) usize { return self.hash_map.count(); } - pub fn iterator(self: &const BufSet) BufSetHashMap.Iterator { + pub fn iterator(self: *const BufSet) BufSetHashMap.Iterator { return self.hash_map.iterator(); } - pub fn allocator(self: &const BufSet) &Allocator { + pub fn allocator(self: *const BufSet) *Allocator { return self.hash_map.allocator; } - fn free(self: &const BufSet, value: []const u8) void { + fn free(self: *const BufSet, value: []const u8) void { self.hash_map.allocator.free(value); } - fn copy(self: &const BufSet, value: []const u8) ![]const u8 { + fn copy(self: *const BufSet, value: []const u8) ![]const u8 { const result = try self.hash_map.allocator.alloc(u8, value.len); mem.copy(u8, result, value); return result; diff --git a/std/buffer.zig b/std/buffer.zig index 90d63719e3..305746e183 100644 --- a/std/buffer.zig +++ b/std/buffer.zig @@ -12,14 +12,14 @@ pub const Buffer = struct { list: ArrayList(u8), /// Must deinitialize with deinit. - pub fn init(allocator: &Allocator, m: []const u8) !Buffer { + pub fn init(allocator: *Allocator, m: []const u8) !Buffer { var self = try initSize(allocator, m.len); mem.copy(u8, self.list.items, m); return self; } /// Must deinitialize with deinit. - pub fn initSize(allocator: &Allocator, size: usize) !Buffer { + pub fn initSize(allocator: *Allocator, size: usize) !Buffer { var self = initNull(allocator); try self.resize(size); return self; @@ -30,19 +30,19 @@ pub const Buffer = struct { /// * ::replaceContents /// * ::replaceContentsBuffer /// * ::resize - pub fn initNull(allocator: &Allocator) Buffer { + pub fn initNull(allocator: *Allocator) Buffer { return Buffer{ .list = ArrayList(u8).init(allocator) }; } /// Must deinitialize with deinit. - pub fn initFromBuffer(buffer: &const Buffer) !Buffer { + pub fn initFromBuffer(buffer: *const Buffer) !Buffer { return Buffer.init(buffer.list.allocator, buffer.toSliceConst()); } /// Buffer takes ownership of the passed in slice. The slice must have been /// allocated with `allocator`. /// Must deinitialize with deinit. - pub fn fromOwnedSlice(allocator: &Allocator, slice: []u8) Buffer { + pub fn fromOwnedSlice(allocator: *Allocator, slice: []u8) Buffer { var self = Buffer{ .list = ArrayList(u8).fromOwnedSlice(allocator, slice) }; self.list.append(0); return self; @@ -50,79 +50,79 @@ pub const Buffer = struct { /// The caller owns the returned memory. The Buffer becomes null and /// is safe to `deinit`. - pub fn toOwnedSlice(self: &Buffer) []u8 { + pub fn toOwnedSlice(self: *Buffer) []u8 { const allocator = self.list.allocator; const result = allocator.shrink(u8, self.list.items, self.len()); self.* = initNull(allocator); return result; } - pub fn deinit(self: &Buffer) void { + pub fn deinit(self: *Buffer) void { self.list.deinit(); } - pub fn toSlice(self: &const Buffer) []u8 { + pub fn toSlice(self: *const Buffer) []u8 { return self.list.toSlice()[0..self.len()]; } - pub fn toSliceConst(self: &const Buffer) []const u8 { + pub fn toSliceConst(self: *const Buffer) []const u8 { return self.list.toSliceConst()[0..self.len()]; } - pub fn shrink(self: &Buffer, new_len: usize) void { + pub fn shrink(self: *Buffer, new_len: usize) void { assert(new_len <= self.len()); self.list.shrink(new_len + 1); self.list.items[self.len()] = 0; } - pub fn resize(self: &Buffer, new_len: usize) !void { + pub fn resize(self: *Buffer, new_len: usize) !void { try self.list.resize(new_len + 1); self.list.items[self.len()] = 0; } - pub fn isNull(self: &const Buffer) bool { + pub fn isNull(self: *const Buffer) bool { return self.list.len == 0; } - pub fn len(self: &const Buffer) usize { + pub fn len(self: *const Buffer) usize { return self.list.len - 1; } - pub fn append(self: &Buffer, m: []const u8) !void { + pub fn append(self: *Buffer, m: []const u8) !void { const old_len = self.len(); try self.resize(old_len + m.len); mem.copy(u8, self.list.toSlice()[old_len..], m); } - pub fn appendByte(self: &Buffer, byte: u8) !void { + pub fn appendByte(self: *Buffer, byte: u8) !void { const old_len = self.len(); try self.resize(old_len + 1); self.list.toSlice()[old_len] = byte; } - pub fn eql(self: &const Buffer, m: []const u8) bool { + pub fn eql(self: *const Buffer, m: []const u8) bool { return mem.eql(u8, self.toSliceConst(), m); } - pub fn startsWith(self: &const Buffer, m: []const u8) bool { + pub fn startsWith(self: *const Buffer, m: []const u8) bool { if (self.len() < m.len) return false; return mem.eql(u8, self.list.items[0..m.len], m); } - pub fn endsWith(self: &const Buffer, m: []const u8) bool { + pub fn endsWith(self: *const Buffer, m: []const u8) bool { const l = self.len(); if (l < m.len) return false; const start = l - m.len; return mem.eql(u8, self.list.items[start..l], m); } - pub fn replaceContents(self: &const Buffer, m: []const u8) !void { + pub fn replaceContents(self: *const Buffer, m: []const u8) !void { try self.resize(m.len); mem.copy(u8, self.list.toSlice(), m); } /// For passing to C functions. - pub fn ptr(self: &const Buffer) &u8 { + pub fn ptr(self: *const Buffer) *u8 { return self.list.items.ptr; } }; diff --git a/std/build.zig b/std/build.zig index 9a6e17f728..fed02e0815 100644 --- a/std/build.zig +++ b/std/build.zig @@ -20,7 +20,7 @@ pub const Builder = struct { install_tls: TopLevelStep, have_uninstall_step: bool, have_install_step: bool, - allocator: &Allocator, + allocator: *Allocator, lib_paths: ArrayList([]const u8), include_paths: ArrayList([]const u8), rpaths: ArrayList([]const u8), @@ -36,9 +36,9 @@ pub const Builder = struct { verbose_cimport: bool, invalid_user_input: bool, zig_exe: []const u8, - default_step: &Step, + default_step: *Step, env_map: BufMap, - top_level_steps: ArrayList(&TopLevelStep), + top_level_steps: ArrayList(*TopLevelStep), prefix: []const u8, search_prefixes: ArrayList([]const u8), lib_dir: []const u8, @@ -82,7 +82,7 @@ pub const Builder = struct { description: []const u8, }; - pub fn init(allocator: &Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder { + pub fn init(allocator: *Allocator, zig_exe: []const u8, build_root: []const u8, cache_root: []const u8) Builder { var self = Builder{ .zig_exe = zig_exe, .build_root = build_root, @@ -102,7 +102,7 @@ pub const Builder = struct { .user_input_options = UserInputOptionsMap.init(allocator), .available_options_map = AvailableOptionsMap.init(allocator), .available_options_list = ArrayList(AvailableOption).init(allocator), - .top_level_steps = ArrayList(&TopLevelStep).init(allocator), + .top_level_steps = ArrayList(*TopLevelStep).init(allocator), .default_step = undefined, .env_map = os.getEnvMap(allocator) catch unreachable, .prefix = undefined, @@ -127,7 +127,7 @@ pub const Builder = struct { return self; } - pub fn deinit(self: &Builder) void { + pub fn deinit(self: *Builder) void { self.lib_paths.deinit(); self.include_paths.deinit(); self.rpaths.deinit(); @@ -135,81 +135,81 @@ pub const Builder = struct { self.top_level_steps.deinit(); } - pub fn setInstallPrefix(self: &Builder, maybe_prefix: ?[]const u8) void { + pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void { self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable; self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable; } - pub fn addExecutable(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep { + pub fn addExecutable(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { return LibExeObjStep.createExecutable(self, name, root_src); } - pub fn addObject(self: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep { + pub fn addObject(self: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep { return LibExeObjStep.createObject(self, name, root_src); } - pub fn addSharedLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep { + pub fn addSharedLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep { return LibExeObjStep.createSharedLibrary(self, name, root_src, ver); } - pub fn addStaticLibrary(self: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep { + pub fn addStaticLibrary(self: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { return LibExeObjStep.createStaticLibrary(self, name, root_src); } - pub fn addTest(self: &Builder, root_src: []const u8) &TestStep { + pub fn addTest(self: *Builder, root_src: []const u8) *TestStep { const test_step = self.allocator.create(TestStep) catch unreachable; test_step.* = TestStep.init(self, root_src); return test_step; } - pub fn addAssemble(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep { + pub fn addAssemble(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep { const obj_step = LibExeObjStep.createObject(self, name, null); obj_step.addAssemblyFile(src); return obj_step; } - pub fn addCStaticLibrary(self: &Builder, name: []const u8) &LibExeObjStep { + pub fn addCStaticLibrary(self: *Builder, name: []const u8) *LibExeObjStep { return LibExeObjStep.createCStaticLibrary(self, name); } - pub fn addCSharedLibrary(self: &Builder, name: []const u8, ver: &const Version) &LibExeObjStep { + pub fn addCSharedLibrary(self: *Builder, name: []const u8, ver: *const Version) *LibExeObjStep { return LibExeObjStep.createCSharedLibrary(self, name, ver); } - pub fn addCExecutable(self: &Builder, name: []const u8) &LibExeObjStep { + pub fn addCExecutable(self: *Builder, name: []const u8) *LibExeObjStep { return LibExeObjStep.createCExecutable(self, name); } - pub fn addCObject(self: &Builder, name: []const u8, src: []const u8) &LibExeObjStep { + pub fn addCObject(self: *Builder, name: []const u8, src: []const u8) *LibExeObjStep { return LibExeObjStep.createCObject(self, name, src); } /// ::argv is copied. - pub fn addCommand(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep { + pub fn addCommand(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep { return CommandStep.create(self, cwd, env_map, argv); } - pub fn addWriteFile(self: &Builder, file_path: []const u8, data: []const u8) &WriteFileStep { + pub fn addWriteFile(self: *Builder, file_path: []const u8, data: []const u8) *WriteFileStep { const write_file_step = self.allocator.create(WriteFileStep) catch unreachable; write_file_step.* = WriteFileStep.init(self, file_path, data); return write_file_step; } - pub fn addLog(self: &Builder, comptime format: []const u8, args: ...) &LogStep { + pub fn addLog(self: *Builder, comptime format: []const u8, args: ...) *LogStep { const data = self.fmt(format, args); const log_step = self.allocator.create(LogStep) catch unreachable; log_step.* = LogStep.init(self, data); return log_step; } - pub fn addRemoveDirTree(self: &Builder, dir_path: []const u8) &RemoveDirStep { + pub fn addRemoveDirTree(self: *Builder, dir_path: []const u8) *RemoveDirStep { const remove_dir_step = self.allocator.create(RemoveDirStep) catch unreachable; remove_dir_step.* = RemoveDirStep.init(self, dir_path); return remove_dir_step; } - pub fn version(self: &const Builder, major: u32, minor: u32, patch: u32) Version { + pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) Version { return Version{ .major = major, .minor = minor, @@ -217,20 +217,20 @@ pub const Builder = struct { }; } - pub fn addCIncludePath(self: &Builder, path: []const u8) void { + pub fn addCIncludePath(self: *Builder, path: []const u8) void { self.include_paths.append(path) catch unreachable; } - pub fn addRPath(self: &Builder, path: []const u8) void { + pub fn addRPath(self: *Builder, path: []const u8) void { self.rpaths.append(path) catch unreachable; } - pub fn addLibPath(self: &Builder, path: []const u8) void { + pub fn addLibPath(self: *Builder, path: []const u8) void { self.lib_paths.append(path) catch unreachable; } - pub fn make(self: &Builder, step_names: []const []const u8) !void { - var wanted_steps = ArrayList(&Step).init(self.allocator); + pub fn make(self: *Builder, step_names: []const []const u8) !void { + var wanted_steps = ArrayList(*Step).init(self.allocator); defer wanted_steps.deinit(); if (step_names.len == 0) { @@ -247,7 +247,7 @@ pub const Builder = struct { } } - pub fn getInstallStep(self: &Builder) &Step { + pub fn getInstallStep(self: *Builder) *Step { if (self.have_install_step) return &self.install_tls.step; self.top_level_steps.append(&self.install_tls) catch unreachable; @@ -255,7 +255,7 @@ pub const Builder = struct { return &self.install_tls.step; } - pub fn getUninstallStep(self: &Builder) &Step { + pub fn getUninstallStep(self: *Builder) *Step { if (self.have_uninstall_step) return &self.uninstall_tls.step; self.top_level_steps.append(&self.uninstall_tls) catch unreachable; @@ -263,7 +263,7 @@ pub const Builder = struct { return &self.uninstall_tls.step; } - fn makeUninstall(uninstall_step: &Step) error!void { + fn makeUninstall(uninstall_step: *Step) error!void { const uninstall_tls = @fieldParentPtr(TopLevelStep, "step", uninstall_step); const self = @fieldParentPtr(Builder, "uninstall_tls", uninstall_tls); @@ -277,7 +277,7 @@ pub const Builder = struct { // TODO remove empty directories } - fn makeOneStep(self: &Builder, s: &Step) error!void { + fn makeOneStep(self: *Builder, s: *Step) error!void { if (s.loop_flag) { warn("Dependency loop detected:\n {}\n", s.name); return error.DependencyLoopDetected; @@ -298,7 +298,7 @@ pub const Builder = struct { try s.make(); } - fn getTopLevelStepByName(self: &Builder, name: []const u8) !&Step { + fn getTopLevelStepByName(self: *Builder, name: []const u8) !*Step { for (self.top_level_steps.toSliceConst()) |top_level_step| { if (mem.eql(u8, top_level_step.step.name, name)) { return &top_level_step.step; @@ -308,7 +308,7 @@ pub const Builder = struct { return error.InvalidStepName; } - fn processNixOSEnvVars(self: &Builder) void { + fn processNixOSEnvVars(self: *Builder) void { if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| { var it = mem.split(nix_cflags_compile, " "); while (true) { @@ -350,7 +350,7 @@ pub const Builder = struct { } } - pub fn option(self: &Builder, comptime T: type, name: []const u8, description: []const u8) ?T { + pub fn option(self: *Builder, comptime T: type, name: []const u8, description: []const u8) ?T { const type_id = comptime typeToEnum(T); const available_option = AvailableOption{ .name = name, @@ -403,7 +403,7 @@ pub const Builder = struct { } } - pub fn step(self: &Builder, name: []const u8, description: []const u8) &Step { + pub fn step(self: *Builder, name: []const u8, description: []const u8) *Step { const step_info = self.allocator.create(TopLevelStep) catch unreachable; step_info.* = TopLevelStep{ .step = Step.initNoOp(name, self.allocator), @@ -413,7 +413,7 @@ pub const Builder = struct { return &step_info.step; } - pub fn standardReleaseOptions(self: &Builder) builtin.Mode { + pub fn standardReleaseOptions(self: *Builder) builtin.Mode { if (self.release_mode) |mode| return mode; const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false; @@ -429,7 +429,7 @@ pub const Builder = struct { return mode; } - pub fn addUserInputOption(self: &Builder, name: []const u8, value: []const u8) bool { + pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool { if (self.user_input_options.put(name, UserInputOption{ .name = name, .value = UserValue{ .Scalar = value }, @@ -466,7 +466,7 @@ pub const Builder = struct { return false; } - pub fn addUserInputFlag(self: &Builder, name: []const u8) bool { + pub fn addUserInputFlag(self: *Builder, name: []const u8) bool { if (self.user_input_options.put(name, UserInputOption{ .name = name, .value = UserValue{ .Flag = {} }, @@ -500,7 +500,7 @@ pub const Builder = struct { }; } - fn markInvalidUserInput(self: &Builder) void { + fn markInvalidUserInput(self: *Builder) void { self.invalid_user_input = true; } @@ -514,7 +514,7 @@ pub const Builder = struct { }; } - pub fn validateUserInputDidItFail(self: &Builder) bool { + pub fn validateUserInputDidItFail(self: *Builder) bool { // make sure all args are used var it = self.user_input_options.iterator(); while (true) { @@ -528,7 +528,7 @@ pub const Builder = struct { return self.invalid_user_input; } - fn spawnChild(self: &Builder, argv: []const []const u8) !void { + fn spawnChild(self: *Builder, argv: []const []const u8) !void { return self.spawnChildEnvMap(null, &self.env_map, argv); } @@ -540,7 +540,7 @@ pub const Builder = struct { warn("\n"); } - fn spawnChildEnvMap(self: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) !void { + fn spawnChildEnvMap(self: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) !void { if (self.verbose) { printCmd(cwd, argv); } @@ -573,28 +573,28 @@ pub const Builder = struct { } } - pub fn makePath(self: &Builder, path: []const u8) !void { + pub fn makePath(self: *Builder, path: []const u8) !void { os.makePath(self.allocator, self.pathFromRoot(path)) catch |err| { warn("Unable to create path {}: {}\n", path, @errorName(err)); return err; }; } - pub fn installArtifact(self: &Builder, artifact: &LibExeObjStep) void { + pub fn installArtifact(self: *Builder, artifact: *LibExeObjStep) void { self.getInstallStep().dependOn(&self.addInstallArtifact(artifact).step); } - pub fn addInstallArtifact(self: &Builder, artifact: &LibExeObjStep) &InstallArtifactStep { + pub fn addInstallArtifact(self: *Builder, artifact: *LibExeObjStep) *InstallArtifactStep { return InstallArtifactStep.create(self, artifact); } ///::dest_rel_path is relative to prefix path or it can be an absolute path - pub fn installFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) void { + pub fn installFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) void { self.getInstallStep().dependOn(&self.addInstallFile(src_path, dest_rel_path).step); } ///::dest_rel_path is relative to prefix path or it can be an absolute path - pub fn addInstallFile(self: &Builder, src_path: []const u8, dest_rel_path: []const u8) &InstallFileStep { + pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep { const full_dest_path = os.path.resolve(self.allocator, self.prefix, dest_rel_path) catch unreachable; self.pushInstalledFile(full_dest_path); @@ -603,16 +603,16 @@ pub const Builder = struct { return install_step; } - pub fn pushInstalledFile(self: &Builder, full_path: []const u8) void { + pub fn pushInstalledFile(self: *Builder, full_path: []const u8) void { _ = self.getUninstallStep(); self.installed_files.append(full_path) catch unreachable; } - fn copyFile(self: &Builder, source_path: []const u8, dest_path: []const u8) !void { + fn copyFile(self: *Builder, source_path: []const u8, dest_path: []const u8) !void { return self.copyFileMode(source_path, dest_path, os.default_file_mode); } - fn copyFileMode(self: &Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void { + fn copyFileMode(self: *Builder, source_path: []const u8, dest_path: []const u8, mode: os.FileMode) !void { if (self.verbose) { warn("cp {} {}\n", source_path, dest_path); } @@ -629,15 +629,15 @@ pub const Builder = struct { }; } - fn pathFromRoot(self: &Builder, rel_path: []const u8) []u8 { + fn pathFromRoot(self: *Builder, rel_path: []const u8) []u8 { return os.path.resolve(self.allocator, self.build_root, rel_path) catch unreachable; } - pub fn fmt(self: &Builder, comptime format: []const u8, args: ...) []u8 { + pub fn fmt(self: *Builder, comptime format: []const u8, args: ...) []u8 { return fmt_lib.allocPrint(self.allocator, format, args) catch unreachable; } - fn getCCExe(self: &Builder) []const u8 { + fn getCCExe(self: *Builder) []const u8 { if (builtin.environ == builtin.Environ.msvc) { return "cl.exe"; } else { @@ -645,7 +645,7 @@ pub const Builder = struct { } } - pub fn findProgram(self: &Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 { + pub fn findProgram(self: *Builder, names: []const []const u8, paths: []const []const u8) ![]const u8 { // TODO report error for ambiguous situations const exe_extension = (Target{ .Native = {} }).exeFileExt(); for (self.search_prefixes.toSliceConst()) |search_prefix| { @@ -693,7 +693,7 @@ pub const Builder = struct { return error.FileNotFound; } - pub fn exec(self: &Builder, argv: []const []const u8) ![]u8 { + pub fn exec(self: *Builder, argv: []const []const u8) ![]u8 { const max_output_size = 100 * 1024; const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size); switch (result.term) { @@ -715,7 +715,7 @@ pub const Builder = struct { } } - pub fn addSearchPrefix(self: &Builder, search_prefix: []const u8) void { + pub fn addSearchPrefix(self: *Builder, search_prefix: []const u8) void { self.search_prefixes.append(search_prefix) catch unreachable; } }; @@ -736,7 +736,7 @@ pub const Target = union(enum) { Native: void, Cross: CrossTarget, - pub fn oFileExt(self: &const Target) []const u8 { + pub fn oFileExt(self: *const Target) []const u8 { const environ = switch (self.*) { Target.Native => builtin.environ, Target.Cross => |t| t.environ, @@ -747,49 +747,49 @@ pub const Target = union(enum) { }; } - pub fn exeFileExt(self: &const Target) []const u8 { + pub fn exeFileExt(self: *const Target) []const u8 { return switch (self.getOs()) { builtin.Os.windows => ".exe", else => "", }; } - pub fn libFileExt(self: &const Target) []const u8 { + pub fn libFileExt(self: *const Target) []const u8 { return switch (self.getOs()) { builtin.Os.windows => ".lib", else => ".a", }; } - pub fn getOs(self: &const Target) builtin.Os { + pub fn getOs(self: *const Target) builtin.Os { return switch (self.*) { Target.Native => builtin.os, Target.Cross => |t| t.os, }; } - pub fn isDarwin(self: &const Target) bool { + pub fn isDarwin(self: *const Target) bool { return switch (self.getOs()) { builtin.Os.ios, builtin.Os.macosx => true, else => false, }; } - pub fn isWindows(self: &const Target) bool { + pub fn isWindows(self: *const Target) bool { return switch (self.getOs()) { builtin.Os.windows => true, else => false, }; } - pub fn wantSharedLibSymLinks(self: &const Target) bool { + pub fn wantSharedLibSymLinks(self: *const Target) bool { return !self.isWindows(); } }; pub const LibExeObjStep = struct { step: Step, - builder: &Builder, + builder: *Builder, name: []const u8, target: Target, link_libs: BufSet, @@ -836,56 +836,56 @@ pub const LibExeObjStep = struct { Obj, }; - pub fn createSharedLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8, ver: &const Version) &LibExeObjStep { + pub fn createSharedLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8, ver: *const Version) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initExtraArgs(builder, name, root_src, Kind.Lib, false, ver); return self; } - pub fn createCSharedLibrary(builder: &Builder, name: []const u8, version: &const Version) &LibExeObjStep { + pub fn createCSharedLibrary(builder: *Builder, name: []const u8, version: *const Version) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initC(builder, name, Kind.Lib, version, false); return self; } - pub fn createStaticLibrary(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep { + pub fn createStaticLibrary(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initExtraArgs(builder, name, root_src, Kind.Lib, true, builder.version(0, 0, 0)); return self; } - pub fn createCStaticLibrary(builder: &Builder, name: []const u8) &LibExeObjStep { + pub fn createCStaticLibrary(builder: *Builder, name: []const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initC(builder, name, Kind.Lib, builder.version(0, 0, 0), true); return self; } - pub fn createObject(builder: &Builder, name: []const u8, root_src: []const u8) &LibExeObjStep { + pub fn createObject(builder: *Builder, name: []const u8, root_src: []const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initExtraArgs(builder, name, root_src, Kind.Obj, false, builder.version(0, 0, 0)); return self; } - pub fn createCObject(builder: &Builder, name: []const u8, src: []const u8) &LibExeObjStep { + pub fn createCObject(builder: *Builder, name: []const u8, src: []const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initC(builder, name, Kind.Obj, builder.version(0, 0, 0), false); self.object_src = src; return self; } - pub fn createExecutable(builder: &Builder, name: []const u8, root_src: ?[]const u8) &LibExeObjStep { + pub fn createExecutable(builder: *Builder, name: []const u8, root_src: ?[]const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initExtraArgs(builder, name, root_src, Kind.Exe, false, builder.version(0, 0, 0)); return self; } - pub fn createCExecutable(builder: &Builder, name: []const u8) &LibExeObjStep { + pub fn createCExecutable(builder: *Builder, name: []const u8) *LibExeObjStep { const self = builder.allocator.create(LibExeObjStep) catch unreachable; self.* = initC(builder, name, Kind.Exe, builder.version(0, 0, 0), false); return self; } - fn initExtraArgs(builder: &Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: &const Version) LibExeObjStep { + fn initExtraArgs(builder: *Builder, name: []const u8, root_src: ?[]const u8, kind: Kind, static: bool, ver: *const Version) LibExeObjStep { var self = LibExeObjStep{ .strip = false, .builder = builder, @@ -924,7 +924,7 @@ pub const LibExeObjStep = struct { return self; } - fn initC(builder: &Builder, name: []const u8, kind: Kind, version: &const Version, static: bool) LibExeObjStep { + fn initC(builder: *Builder, name: []const u8, kind: Kind, version: *const Version, static: bool) LibExeObjStep { var self = LibExeObjStep{ .builder = builder, .name = name, @@ -964,7 +964,7 @@ pub const LibExeObjStep = struct { return self; } - fn computeOutFileNames(self: &LibExeObjStep) void { + fn computeOutFileNames(self: *LibExeObjStep) void { switch (self.kind) { Kind.Obj => { self.out_filename = self.builder.fmt("{}{}", self.name, self.target.oFileExt()); @@ -996,7 +996,7 @@ pub const LibExeObjStep = struct { } } - pub fn setTarget(self: &LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void { + pub fn setTarget(self: *LibExeObjStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void { self.target = Target{ .Cross = CrossTarget{ .arch = target_arch, @@ -1008,16 +1008,16 @@ pub const LibExeObjStep = struct { } // TODO respect this in the C args - pub fn setLinkerScriptPath(self: &LibExeObjStep, path: []const u8) void { + pub fn setLinkerScriptPath(self: *LibExeObjStep, path: []const u8) void { self.linker_script = path; } - pub fn linkFramework(self: &LibExeObjStep, framework_name: []const u8) void { + pub fn linkFramework(self: *LibExeObjStep, framework_name: []const u8) void { assert(self.target.isDarwin()); self.frameworks.put(framework_name) catch unreachable; } - pub fn linkLibrary(self: &LibExeObjStep, lib: &LibExeObjStep) void { + pub fn linkLibrary(self: *LibExeObjStep, lib: *LibExeObjStep) void { assert(self.kind != Kind.Obj); assert(lib.kind == Kind.Lib); @@ -1038,26 +1038,26 @@ pub const LibExeObjStep = struct { } } - pub fn linkSystemLibrary(self: &LibExeObjStep, name: []const u8) void { + pub fn linkSystemLibrary(self: *LibExeObjStep, name: []const u8) void { assert(self.kind != Kind.Obj); self.link_libs.put(name) catch unreachable; } - pub fn addSourceFile(self: &LibExeObjStep, file: []const u8) void { + pub fn addSourceFile(self: *LibExeObjStep, file: []const u8) void { assert(self.kind != Kind.Obj); assert(!self.is_zig); self.source_files.append(file) catch unreachable; } - pub fn setVerboseLink(self: &LibExeObjStep, value: bool) void { + pub fn setVerboseLink(self: *LibExeObjStep, value: bool) void { self.verbose_link = value; } - pub fn setBuildMode(self: &LibExeObjStep, mode: builtin.Mode) void { + pub fn setBuildMode(self: *LibExeObjStep, mode: builtin.Mode) void { self.build_mode = mode; } - pub fn setOutputPath(self: &LibExeObjStep, file_path: []const u8) void { + pub fn setOutputPath(self: *LibExeObjStep, file_path: []const u8) void { self.output_path = file_path; // catch a common mistake @@ -1066,11 +1066,11 @@ pub const LibExeObjStep = struct { } } - pub fn getOutputPath(self: &LibExeObjStep) []const u8 { + pub fn getOutputPath(self: *LibExeObjStep) []const u8 { return if (self.output_path) |output_path| output_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_filename) catch unreachable; } - pub fn setOutputHPath(self: &LibExeObjStep, file_path: []const u8) void { + pub fn setOutputHPath(self: *LibExeObjStep, file_path: []const u8) void { self.output_h_path = file_path; // catch a common mistake @@ -1079,21 +1079,21 @@ pub const LibExeObjStep = struct { } } - pub fn getOutputHPath(self: &LibExeObjStep) []const u8 { + pub fn getOutputHPath(self: *LibExeObjStep) []const u8 { return if (self.output_h_path) |output_h_path| output_h_path else os.path.join(self.builder.allocator, self.builder.cache_root, self.out_h_filename) catch unreachable; } - pub fn addAssemblyFile(self: &LibExeObjStep, path: []const u8) void { + pub fn addAssemblyFile(self: *LibExeObjStep, path: []const u8) void { self.assembly_files.append(path) catch unreachable; } - pub fn addObjectFile(self: &LibExeObjStep, path: []const u8) void { + pub fn addObjectFile(self: *LibExeObjStep, path: []const u8) void { assert(self.kind != Kind.Obj); self.object_files.append(path) catch unreachable; } - pub fn addObject(self: &LibExeObjStep, obj: &LibExeObjStep) void { + pub fn addObject(self: *LibExeObjStep, obj: *LibExeObjStep) void { assert(obj.kind == Kind.Obj); assert(self.kind != Kind.Obj); @@ -1110,15 +1110,15 @@ pub const LibExeObjStep = struct { self.include_dirs.append(self.builder.cache_root) catch unreachable; } - pub fn addIncludeDir(self: &LibExeObjStep, path: []const u8) void { + pub fn addIncludeDir(self: *LibExeObjStep, path: []const u8) void { self.include_dirs.append(path) catch unreachable; } - pub fn addLibPath(self: &LibExeObjStep, path: []const u8) void { + pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void { self.lib_paths.append(path) catch unreachable; } - pub fn addPackagePath(self: &LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void { + pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void { assert(self.is_zig); self.packages.append(Pkg{ @@ -1127,23 +1127,23 @@ pub const LibExeObjStep = struct { }) catch unreachable; } - pub fn addCompileFlags(self: &LibExeObjStep, flags: []const []const u8) void { + pub fn addCompileFlags(self: *LibExeObjStep, flags: []const []const u8) void { for (flags) |flag| { self.cflags.append(flag) catch unreachable; } } - pub fn setNoStdLib(self: &LibExeObjStep, disable: bool) void { + pub fn setNoStdLib(self: *LibExeObjStep, disable: bool) void { assert(!self.is_zig); self.disable_libc = disable; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(LibExeObjStep, "step", step); return if (self.is_zig) self.makeZig() else self.makeC(); } - fn makeZig(self: &LibExeObjStep) !void { + fn makeZig(self: *LibExeObjStep) !void { const builder = self.builder; assert(self.is_zig); @@ -1309,7 +1309,7 @@ pub const LibExeObjStep = struct { } } - fn appendCompileFlags(self: &LibExeObjStep, args: &ArrayList([]const u8)) void { + fn appendCompileFlags(self: *LibExeObjStep, args: *ArrayList([]const u8)) void { if (!self.strip) { args.append("-g") catch unreachable; } @@ -1354,7 +1354,7 @@ pub const LibExeObjStep = struct { } } - fn makeC(self: &LibExeObjStep) !void { + fn makeC(self: *LibExeObjStep) !void { const builder = self.builder; const cc = builder.getCCExe(); @@ -1580,7 +1580,7 @@ pub const LibExeObjStep = struct { pub const TestStep = struct { step: Step, - builder: &Builder, + builder: *Builder, root_src: []const u8, build_mode: builtin.Mode, verbose: bool, @@ -1591,7 +1591,7 @@ pub const TestStep = struct { exec_cmd_args: ?[]const ?[]const u8, include_dirs: ArrayList([]const u8), - pub fn init(builder: &Builder, root_src: []const u8) TestStep { + pub fn init(builder: *Builder, root_src: []const u8) TestStep { const step_name = builder.fmt("test {}", root_src); return TestStep{ .step = Step.init(step_name, builder.allocator, make), @@ -1608,31 +1608,31 @@ pub const TestStep = struct { }; } - pub fn setVerbose(self: &TestStep, value: bool) void { + pub fn setVerbose(self: *TestStep, value: bool) void { self.verbose = value; } - pub fn addIncludeDir(self: &TestStep, path: []const u8) void { + pub fn addIncludeDir(self: *TestStep, path: []const u8) void { self.include_dirs.append(path) catch unreachable; } - pub fn setBuildMode(self: &TestStep, mode: builtin.Mode) void { + pub fn setBuildMode(self: *TestStep, mode: builtin.Mode) void { self.build_mode = mode; } - pub fn linkSystemLibrary(self: &TestStep, name: []const u8) void { + pub fn linkSystemLibrary(self: *TestStep, name: []const u8) void { self.link_libs.put(name) catch unreachable; } - pub fn setNamePrefix(self: &TestStep, text: []const u8) void { + pub fn setNamePrefix(self: *TestStep, text: []const u8) void { self.name_prefix = text; } - pub fn setFilter(self: &TestStep, text: ?[]const u8) void { + pub fn setFilter(self: *TestStep, text: ?[]const u8) void { self.filter = text; } - pub fn setTarget(self: &TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void { + pub fn setTarget(self: *TestStep, target_arch: builtin.Arch, target_os: builtin.Os, target_environ: builtin.Environ) void { self.target = Target{ .Cross = CrossTarget{ .arch = target_arch, @@ -1642,11 +1642,11 @@ pub const TestStep = struct { }; } - pub fn setExecCmd(self: &TestStep, args: []const ?[]const u8) void { + pub fn setExecCmd(self: *TestStep, args: []const ?[]const u8) void { self.exec_cmd_args = args; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(TestStep, "step", step); const builder = self.builder; @@ -1739,13 +1739,13 @@ pub const TestStep = struct { pub const CommandStep = struct { step: Step, - builder: &Builder, + builder: *Builder, argv: [][]const u8, cwd: ?[]const u8, - env_map: &const BufMap, + env_map: *const BufMap, /// ::argv is copied. - pub fn create(builder: &Builder, cwd: ?[]const u8, env_map: &const BufMap, argv: []const []const u8) &CommandStep { + pub fn create(builder: *Builder, cwd: ?[]const u8, env_map: *const BufMap, argv: []const []const u8) *CommandStep { const self = builder.allocator.create(CommandStep) catch unreachable; self.* = CommandStep{ .builder = builder, @@ -1759,7 +1759,7 @@ pub const CommandStep = struct { return self; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(CommandStep, "step", step); const cwd = if (self.cwd) |cwd| self.builder.pathFromRoot(cwd) else self.builder.build_root; @@ -1769,13 +1769,13 @@ pub const CommandStep = struct { const InstallArtifactStep = struct { step: Step, - builder: &Builder, - artifact: &LibExeObjStep, + builder: *Builder, + artifact: *LibExeObjStep, dest_file: []const u8, const Self = this; - pub fn create(builder: &Builder, artifact: &LibExeObjStep) &Self { + pub fn create(builder: *Builder, artifact: *LibExeObjStep) *Self { const self = builder.allocator.create(Self) catch unreachable; const dest_dir = switch (artifact.kind) { LibExeObjStep.Kind.Obj => unreachable, @@ -1797,7 +1797,7 @@ const InstallArtifactStep = struct { return self; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(Self, "step", step); const builder = self.builder; @@ -1818,11 +1818,11 @@ const InstallArtifactStep = struct { pub const InstallFileStep = struct { step: Step, - builder: &Builder, + builder: *Builder, src_path: []const u8, dest_path: []const u8, - pub fn init(builder: &Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep { + pub fn init(builder: *Builder, src_path: []const u8, dest_path: []const u8) InstallFileStep { return InstallFileStep{ .builder = builder, .step = Step.init(builder.fmt("install {}", src_path), builder.allocator, make), @@ -1831,7 +1831,7 @@ pub const InstallFileStep = struct { }; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(InstallFileStep, "step", step); try self.builder.copyFile(self.src_path, self.dest_path); } @@ -1839,11 +1839,11 @@ pub const InstallFileStep = struct { pub const WriteFileStep = struct { step: Step, - builder: &Builder, + builder: *Builder, file_path: []const u8, data: []const u8, - pub fn init(builder: &Builder, file_path: []const u8, data: []const u8) WriteFileStep { + pub fn init(builder: *Builder, file_path: []const u8, data: []const u8) WriteFileStep { return WriteFileStep{ .builder = builder, .step = Step.init(builder.fmt("writefile {}", file_path), builder.allocator, make), @@ -1852,7 +1852,7 @@ pub const WriteFileStep = struct { }; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(WriteFileStep, "step", step); const full_path = self.builder.pathFromRoot(self.file_path); const full_path_dir = os.path.dirname(full_path); @@ -1869,10 +1869,10 @@ pub const WriteFileStep = struct { pub const LogStep = struct { step: Step, - builder: &Builder, + builder: *Builder, data: []const u8, - pub fn init(builder: &Builder, data: []const u8) LogStep { + pub fn init(builder: *Builder, data: []const u8) LogStep { return LogStep{ .builder = builder, .step = Step.init(builder.fmt("log {}", data), builder.allocator, make), @@ -1880,7 +1880,7 @@ pub const LogStep = struct { }; } - fn make(step: &Step) error!void { + fn make(step: *Step) error!void { const self = @fieldParentPtr(LogStep, "step", step); warn("{}", self.data); } @@ -1888,10 +1888,10 @@ pub const LogStep = struct { pub const RemoveDirStep = struct { step: Step, - builder: &Builder, + builder: *Builder, dir_path: []const u8, - pub fn init(builder: &Builder, dir_path: []const u8) RemoveDirStep { + pub fn init(builder: *Builder, dir_path: []const u8) RemoveDirStep { return RemoveDirStep{ .builder = builder, .step = Step.init(builder.fmt("RemoveDir {}", dir_path), builder.allocator, make), @@ -1899,7 +1899,7 @@ pub const RemoveDirStep = struct { }; } - fn make(step: &Step) !void { + fn make(step: *Step) !void { const self = @fieldParentPtr(RemoveDirStep, "step", step); const full_path = self.builder.pathFromRoot(self.dir_path); @@ -1912,39 +1912,39 @@ pub const RemoveDirStep = struct { pub const Step = struct { name: []const u8, - makeFn: fn (self: &Step) error!void, - dependencies: ArrayList(&Step), + makeFn: fn (self: *Step) error!void, + dependencies: ArrayList(*Step), loop_flag: bool, done_flag: bool, - pub fn init(name: []const u8, allocator: &Allocator, makeFn: fn (&Step) error!void) Step { + pub fn init(name: []const u8, allocator: *Allocator, makeFn: fn (*Step) error!void) Step { return Step{ .name = name, .makeFn = makeFn, - .dependencies = ArrayList(&Step).init(allocator), + .dependencies = ArrayList(*Step).init(allocator), .loop_flag = false, .done_flag = false, }; } - pub fn initNoOp(name: []const u8, allocator: &Allocator) Step { + pub fn initNoOp(name: []const u8, allocator: *Allocator) Step { return init(name, allocator, makeNoOp); } - pub fn make(self: &Step) !void { + pub fn make(self: *Step) !void { if (self.done_flag) return; try self.makeFn(self); self.done_flag = true; } - pub fn dependOn(self: &Step, other: &Step) void { + pub fn dependOn(self: *Step, other: *Step) void { self.dependencies.append(other) catch unreachable; } - fn makeNoOp(self: &Step) error!void {} + fn makeNoOp(self: *Step) error!void {} }; -fn doAtomicSymLinks(allocator: &Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void { +fn doAtomicSymLinks(allocator: *Allocator, output_path: []const u8, filename_major_only: []const u8, filename_name_only: []const u8) !void { const out_dir = os.path.dirname(output_path); const out_basename = os.path.basename(output_path); // sym link for libfoo.so.1 to libfoo.so.1.2.3 diff --git a/std/c/darwin.zig b/std/c/darwin.zig index 6a33c994bf..69395e6b27 100644 --- a/std/c/darwin.zig +++ b/std/c/darwin.zig @@ -1,10 +1,10 @@ -extern "c" fn __error() &c_int; -pub extern "c" fn _NSGetExecutablePath(buf: &u8, bufsize: &u32) c_int; +extern "c" fn __error() *c_int; +pub extern "c" fn _NSGetExecutablePath(buf: *u8, bufsize: *u32) c_int; -pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: &u8, buf_len: usize, basep: &i64) usize; +pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: *u8, buf_len: usize, basep: *i64) usize; pub extern "c" fn mach_absolute_time() u64; -pub extern "c" fn mach_timebase_info(tinfo: ?&mach_timebase_info_data) void; +pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void; pub use @import("../os/darwin_errno.zig"); diff --git a/std/c/index.zig b/std/c/index.zig index f9704f4738..114b79cdae 100644 --- a/std/c/index.zig +++ b/std/c/index.zig @@ -13,49 +13,49 @@ pub extern "c" fn abort() noreturn; pub extern "c" fn exit(code: c_int) noreturn; pub extern "c" fn isatty(fd: c_int) c_int; pub extern "c" fn close(fd: c_int) c_int; -pub extern "c" fn fstat(fd: c_int, buf: &Stat) c_int; -pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: &Stat) c_int; +pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int; +pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int; pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize; -pub extern "c" fn open(path: &const u8, oflag: c_int, ...) c_int; +pub extern "c" fn open(path: *const u8, oflag: c_int, ...) c_int; pub extern "c" fn raise(sig: c_int) c_int; -pub extern "c" fn read(fd: c_int, buf: &c_void, nbyte: usize) isize; -pub extern "c" fn stat(noalias path: &const u8, noalias buf: &Stat) c_int; -pub extern "c" fn write(fd: c_int, buf: &const c_void, nbyte: usize) isize; -pub extern "c" fn mmap(addr: ?&c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?&c_void; -pub extern "c" fn munmap(addr: &c_void, len: usize) c_int; -pub extern "c" fn unlink(path: &const u8) c_int; -pub extern "c" fn getcwd(buf: &u8, size: usize) ?&u8; -pub extern "c" fn waitpid(pid: c_int, stat_loc: &c_int, options: c_int) c_int; +pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize; +pub extern "c" fn stat(noalias path: *const u8, noalias buf: *Stat) c_int; +pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize; +pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void; +pub extern "c" fn munmap(addr: *c_void, len: usize) c_int; +pub extern "c" fn unlink(path: *const u8) c_int; +pub extern "c" fn getcwd(buf: *u8, size: usize) ?*u8; +pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int; pub extern "c" fn fork() c_int; -pub extern "c" fn access(path: &const u8, mode: c_uint) c_int; -pub extern "c" fn pipe(fds: &c_int) c_int; -pub extern "c" fn mkdir(path: &const u8, mode: c_uint) c_int; -pub extern "c" fn symlink(existing: &const u8, new: &const u8) c_int; -pub extern "c" fn rename(old: &const u8, new: &const u8) c_int; -pub extern "c" fn chdir(path: &const u8) c_int; -pub extern "c" fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) c_int; +pub extern "c" fn access(path: *const u8, mode: c_uint) c_int; +pub extern "c" fn pipe(fds: *c_int) c_int; +pub extern "c" fn mkdir(path: *const u8, mode: c_uint) c_int; +pub extern "c" fn symlink(existing: *const u8, new: *const u8) c_int; +pub extern "c" fn rename(old: *const u8, new: *const u8) c_int; +pub extern "c" fn chdir(path: *const u8) c_int; +pub extern "c" fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) c_int; pub extern "c" fn dup(fd: c_int) c_int; pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int; -pub extern "c" fn readlink(noalias path: &const u8, noalias buf: &u8, bufsize: usize) isize; -pub extern "c" fn realpath(noalias file_name: &const u8, noalias resolved_name: &u8) ?&u8; -pub extern "c" fn sigprocmask(how: c_int, noalias set: &const sigset_t, noalias oset: ?&sigset_t) c_int; -pub extern "c" fn gettimeofday(tv: ?&timeval, tz: ?&timezone) c_int; -pub extern "c" fn sigaction(sig: c_int, noalias act: &const Sigaction, noalias oact: ?&Sigaction) c_int; -pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?×pec) c_int; +pub extern "c" fn readlink(noalias path: *const u8, noalias buf: *u8, bufsize: usize) isize; +pub extern "c" fn realpath(noalias file_name: *const u8, noalias resolved_name: *u8) ?*u8; +pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int; +pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int; +pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int; +pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int; pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int; pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int; -pub extern "c" fn rmdir(path: &const u8) c_int; +pub extern "c" fn rmdir(path: *const u8) c_int; -pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void; -pub extern "c" fn malloc(usize) ?&c_void; -pub extern "c" fn realloc(&c_void, usize) ?&c_void; -pub extern "c" fn free(&c_void) void; -pub extern "c" fn posix_memalign(memptr: &&c_void, alignment: usize, size: usize) c_int; +pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void; +pub extern "c" fn malloc(usize) ?*c_void; +pub extern "c" fn realloc(*c_void, usize) ?*c_void; +pub extern "c" fn free(*c_void) void; +pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int; -pub extern "pthread" fn pthread_create(noalias newthread: &pthread_t, noalias attr: ?&const pthread_attr_t, start_routine: extern fn (?&c_void) ?&c_void, noalias arg: ?&c_void) c_int; -pub extern "pthread" fn pthread_attr_init(attr: &pthread_attr_t) c_int; -pub extern "pthread" fn pthread_attr_setstack(attr: &pthread_attr_t, stackaddr: &c_void, stacksize: usize) c_int; -pub extern "pthread" fn pthread_attr_destroy(attr: &pthread_attr_t) c_int; -pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?&?&c_void) c_int; +pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int; +pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int; +pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int; +pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int; +pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int; -pub const pthread_t = &@OpaqueType(); +pub const pthread_t = *@OpaqueType(); diff --git a/std/c/linux.zig b/std/c/linux.zig index 7810fec130..0ab043533e 100644 --- a/std/c/linux.zig +++ b/std/c/linux.zig @@ -1,7 +1,7 @@ pub use @import("../os/linux/errno.zig"); -pub extern "c" fn getrandom(buf_ptr: &u8, buf_len: usize, flags: c_uint) c_int; -extern "c" fn __errno_location() &c_int; +pub extern "c" fn getrandom(buf_ptr: *u8, buf_len: usize, flags: c_uint) c_int; +extern "c" fn __errno_location() *c_int; pub const _errno = __errno_location; pub const pthread_attr_t = extern struct { diff --git a/std/c/windows.zig b/std/c/windows.zig index 6e8b17eda8..35ca217131 100644 --- a/std/c/windows.zig +++ b/std/c/windows.zig @@ -1 +1 @@ -pub extern "c" fn _errno() &c_int; +pub extern "c" fn _errno() *c_int; diff --git a/std/crypto/blake2.zig b/std/crypto/blake2.zig index bf3193b5d9..f0a9766c00 100644 --- a/std/crypto/blake2.zig +++ b/std/crypto/blake2.zig @@ -75,7 +75,7 @@ fn Blake2s(comptime out_len: usize) type { return s; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { mem.copy(u32, d.h[0..], iv[0..]); // No key plus default parameters @@ -90,7 +90,7 @@ fn Blake2s(comptime out_len: usize) type { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -113,7 +113,7 @@ fn Blake2s(comptime out_len: usize) type { d.buf_len += u8(b[off..].len); } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { debug.assert(out.len >= out_len / 8); mem.set(u8, d.buf[d.buf_len..], 0); @@ -127,7 +127,7 @@ fn Blake2s(comptime out_len: usize) type { } } - fn round(d: &Self, b: []const u8, last: bool) void { + fn round(d: *Self, b: []const u8, last: bool) void { debug.assert(b.len == 64); var m: [16]u32 = undefined; @@ -310,7 +310,7 @@ fn Blake2b(comptime out_len: usize) type { return s; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { mem.copy(u64, d.h[0..], iv[0..]); // No key plus default parameters @@ -325,7 +325,7 @@ fn Blake2b(comptime out_len: usize) type { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -348,7 +348,7 @@ fn Blake2b(comptime out_len: usize) type { d.buf_len += u8(b[off..].len); } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { mem.set(u8, d.buf[d.buf_len..], 0); d.t += d.buf_len; d.round(d.buf[0..], true); @@ -360,7 +360,7 @@ fn Blake2b(comptime out_len: usize) type { } } - fn round(d: &Self, b: []const u8, last: bool) void { + fn round(d: *Self, b: []const u8, last: bool) void { debug.assert(b.len == 128); var m: [16]u64 = undefined; diff --git a/std/crypto/md5.zig b/std/crypto/md5.zig index 3d05597273..c0d1732d37 100644 --- a/std/crypto/md5.zig +++ b/std/crypto/md5.zig @@ -44,7 +44,7 @@ pub const Md5 = struct { return d; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { d.s[0] = 0x67452301; d.s[1] = 0xEFCDAB89; d.s[2] = 0x98BADCFE; @@ -59,7 +59,7 @@ pub const Md5 = struct { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -84,7 +84,7 @@ pub const Md5 = struct { d.total_len +%= b.len; } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { debug.assert(out.len >= 16); // The buffer here will never be completely full. @@ -116,7 +116,7 @@ pub const Md5 = struct { } } - fn round(d: &Self, b: []const u8) void { + fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 64); var s: [16]u32 = undefined; diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig index e9d8e3e132..9e46fc9239 100644 --- a/std/crypto/sha1.zig +++ b/std/crypto/sha1.zig @@ -43,7 +43,7 @@ pub const Sha1 = struct { return d; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { d.s[0] = 0x67452301; d.s[1] = 0xEFCDAB89; d.s[2] = 0x98BADCFE; @@ -59,7 +59,7 @@ pub const Sha1 = struct { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -83,7 +83,7 @@ pub const Sha1 = struct { d.total_len += b.len; } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { debug.assert(out.len >= 20); // The buffer here will never be completely full. @@ -115,7 +115,7 @@ pub const Sha1 = struct { } } - fn round(d: &Self, b: []const u8) void { + fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 64); var s: [16]u32 = undefined; diff --git a/std/crypto/sha2.zig b/std/crypto/sha2.zig index aedc820f44..d1375d73e8 100644 --- a/std/crypto/sha2.zig +++ b/std/crypto/sha2.zig @@ -93,7 +93,7 @@ fn Sha2_32(comptime params: Sha2Params32) type { return d; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { d.s[0] = params.iv0; d.s[1] = params.iv1; d.s[2] = params.iv2; @@ -112,7 +112,7 @@ fn Sha2_32(comptime params: Sha2Params32) type { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -136,7 +136,7 @@ fn Sha2_32(comptime params: Sha2Params32) type { d.total_len += b.len; } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { debug.assert(out.len >= params.out_len / 8); // The buffer here will never be completely full. @@ -171,7 +171,7 @@ fn Sha2_32(comptime params: Sha2Params32) type { } } - fn round(d: &Self, b: []const u8) void { + fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 64); var s: [64]u32 = undefined; @@ -434,7 +434,7 @@ fn Sha2_64(comptime params: Sha2Params64) type { return d; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { d.s[0] = params.iv0; d.s[1] = params.iv1; d.s[2] = params.iv2; @@ -453,7 +453,7 @@ fn Sha2_64(comptime params: Sha2Params64) type { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial buffer exists from previous update. Copy into buffer then hash. @@ -477,7 +477,7 @@ fn Sha2_64(comptime params: Sha2Params64) type { d.total_len += b.len; } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { debug.assert(out.len >= params.out_len / 8); // The buffer here will never be completely full. @@ -512,7 +512,7 @@ fn Sha2_64(comptime params: Sha2Params64) type { } } - fn round(d: &Self, b: []const u8) void { + fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 128); var s: [80]u64 = undefined; diff --git a/std/crypto/sha3.zig b/std/crypto/sha3.zig index 75bec57a87..ae02d7a482 100644 --- a/std/crypto/sha3.zig +++ b/std/crypto/sha3.zig @@ -26,7 +26,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type { return d; } - pub fn reset(d: &Self) void { + pub fn reset(d: *Self) void { mem.set(u8, d.s[0..], 0); d.offset = 0; d.rate = 200 - (bits / 4); @@ -38,7 +38,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type { d.final(out); } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var ip: usize = 0; var len = b.len; var rate = d.rate - d.offset; @@ -63,7 +63,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type { d.offset = offset + len; } - pub fn final(d: &Self, out: []u8) void { + pub fn final(d: *Self, out: []u8) void { // padding d.s[d.offset] ^= delim; d.s[d.rate - 1] ^= 0x80; diff --git a/std/crypto/throughput_test.zig b/std/crypto/throughput_test.zig index c5c4f9fe10..0ad6845d1a 100644 --- a/std/crypto/throughput_test.zig +++ b/std/crypto/throughput_test.zig @@ -15,8 +15,8 @@ const BytesToHash = 1024 * MiB; pub fn main() !void { var stdout_file = try std.io.getStdOut(); - var stdout_out_stream = std.io.FileOutStream.init(&stdout_file); - const stdout = &stdout_out_stream.stream; + var stdout_out_stream = std.io.FileOutStream.init(*stdout_file); + const stdout = *stdout_out_stream.stream; var block: [HashFunction.block_size]u8 = undefined; std.mem.set(u8, block[0..], 0); diff --git a/std/cstr.zig b/std/cstr.zig index c9f3026064..dfbfb8047f 100644 --- a/std/cstr.zig +++ b/std/cstr.zig @@ -9,13 +9,13 @@ pub const line_sep = switch (builtin.os) { else => "\n", }; -pub fn len(ptr: &const u8) usize { +pub fn len(ptr: *const u8) usize { var count: usize = 0; while (ptr[count] != 0) : (count += 1) {} return count; } -pub fn cmp(a: &const u8, b: &const u8) i8 { +pub fn cmp(a: *const u8, b: *const u8) i8 { var index: usize = 0; while (a[index] == b[index] and a[index] != 0) : (index += 1) {} if (a[index] > b[index]) { @@ -27,11 +27,11 @@ pub fn cmp(a: &const u8, b: &const u8) i8 { } } -pub fn toSliceConst(str: &const u8) []const u8 { +pub fn toSliceConst(str: *const u8) []const u8 { return str[0..len(str)]; } -pub fn toSlice(str: &u8) []u8 { +pub fn toSlice(str: *u8) []u8 { return str[0..len(str)]; } @@ -47,7 +47,7 @@ fn testCStrFnsImpl() void { /// Returns a mutable slice with 1 more byte of length which is a null byte. /// Caller owns the returned memory. -pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 { +pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 { const result = try allocator.alloc(u8, slice.len + 1); mem.copy(u8, result, slice); result[slice.len] = 0; @@ -55,13 +55,13 @@ pub fn addNullByte(allocator: &mem.Allocator, slice: []const u8) ![]u8 { } pub const NullTerminated2DArray = struct { - allocator: &mem.Allocator, + allocator: *mem.Allocator, byte_count: usize, - ptr: ?&?&u8, + ptr: ?*?*u8, /// Takes N lists of strings, concatenates the lists together, and adds a null terminator /// Caller must deinit result - pub fn fromSlices(allocator: &mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray { + pub fn fromSlices(allocator: *mem.Allocator, slices: []const []const []const u8) !NullTerminated2DArray { var new_len: usize = 1; // 1 for the list null var byte_count: usize = 0; for (slices) |slice| { @@ -75,11 +75,11 @@ pub const NullTerminated2DArray = struct { const index_size = @sizeOf(usize) * new_len; // size of the ptrs byte_count += index_size; - const buf = try allocator.alignedAlloc(u8, @alignOf(?&u8), byte_count); + const buf = try allocator.alignedAlloc(u8, @alignOf(?*u8), byte_count); errdefer allocator.free(buf); var write_index = index_size; - const index_buf = ([]?&u8)(buf); + const index_buf = ([]?*u8)(buf); var i: usize = 0; for (slices) |slice| { @@ -97,12 +97,12 @@ pub const NullTerminated2DArray = struct { return NullTerminated2DArray{ .allocator = allocator, .byte_count = byte_count, - .ptr = @ptrCast(?&?&u8, buf.ptr), + .ptr = @ptrCast(?*?*u8, buf.ptr), }; } - pub fn deinit(self: &NullTerminated2DArray) void { - const buf = @ptrCast(&u8, self.ptr); + pub fn deinit(self: *NullTerminated2DArray) void { + const buf = @ptrCast(*u8, self.ptr); self.allocator.free(buf[0..self.byte_count]); } }; diff --git a/std/debug/failing_allocator.zig b/std/debug/failing_allocator.zig index 6b5edff5bf..e16dd21db4 100644 --- a/std/debug/failing_allocator.zig +++ b/std/debug/failing_allocator.zig @@ -7,12 +7,12 @@ pub const FailingAllocator = struct { allocator: mem.Allocator, index: usize, fail_index: usize, - internal_allocator: &mem.Allocator, + internal_allocator: *mem.Allocator, allocated_bytes: usize, freed_bytes: usize, deallocations: usize, - pub fn init(allocator: &mem.Allocator, fail_index: usize) FailingAllocator { + pub fn init(allocator: *mem.Allocator, fail_index: usize) FailingAllocator { return FailingAllocator{ .internal_allocator = allocator, .fail_index = fail_index, @@ -28,7 +28,7 @@ pub const FailingAllocator = struct { }; } - fn alloc(allocator: &mem.Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); if (self.index == self.fail_index) { return error.OutOfMemory; @@ -39,7 +39,7 @@ pub const FailingAllocator = struct { return result; } - fn realloc(allocator: &mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); if (new_size <= old_mem.len) { self.freed_bytes += old_mem.len - new_size; @@ -55,7 +55,7 @@ pub const FailingAllocator = struct { return result; } - fn free(allocator: &mem.Allocator, bytes: []u8) void { + fn free(allocator: *mem.Allocator, bytes: []u8) void { const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); self.freed_bytes += bytes.len; self.deallocations += 1; diff --git a/std/debug/index.zig b/std/debug/index.zig index 92e565b391..00d9bef121 100644 --- a/std/debug/index.zig +++ b/std/debug/index.zig @@ -16,12 +16,12 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator; /// TODO atomic/multithread support var stderr_file: os.File = undefined; var stderr_file_out_stream: io.FileOutStream = undefined; -var stderr_stream: ?&io.OutStream(io.FileOutStream.Error) = null; +var stderr_stream: ?*io.OutStream(io.FileOutStream.Error) = null; pub fn warn(comptime fmt: []const u8, args: ...) void { const stderr = getStderrStream() catch return; stderr.print(fmt, args) catch return; } -fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) { +fn getStderrStream() !*io.OutStream(io.FileOutStream.Error) { if (stderr_stream) |st| { return st; } else { @@ -33,8 +33,8 @@ fn getStderrStream() !&io.OutStream(io.FileOutStream.Error) { } } -var self_debug_info: ?&ElfStackTrace = null; -pub fn getSelfDebugInfo() !&ElfStackTrace { +var self_debug_info: ?*ElfStackTrace = null; +pub fn getSelfDebugInfo() !*ElfStackTrace { if (self_debug_info) |info| { return info; } else { @@ -58,7 +58,7 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void { } /// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned. -pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) void { +pub fn dumpStackTrace(stack_trace: *const builtin.StackTrace) void { const stderr = getStderrStream() catch return; const debug_info = getSelfDebugInfo() catch |err| { stderr.print("Unable to dump stack trace: Unable to open debug info: {}\n", @errorName(err)) catch return; @@ -104,7 +104,7 @@ pub fn panic(comptime format: []const u8, args: ...) noreturn { var panicking: u8 = 0; // TODO make this a bool -pub fn panicExtra(trace: ?&const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn { +pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, comptime format: []const u8, args: ...) noreturn { @setCold(true); if (@atomicRmw(u8, &panicking, builtin.AtomicRmwOp.Xchg, 1, builtin.AtomicOrder.SeqCst) == 1) { @@ -130,7 +130,7 @@ const WHITE = "\x1b[37;1m"; const DIM = "\x1b[2m"; const RESET = "\x1b[0m"; -pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool) !void { +pub fn writeStackTrace(stack_trace: *const builtin.StackTrace, out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool) !void { var frame_index: usize = undefined; var frames_left: usize = undefined; if (stack_trace.index < stack_trace.instruction_addresses.len) { @@ -150,7 +150,7 @@ pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: var, } } -pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_info: &ElfStackTrace, tty_color: bool, start_addr: ?usize) !void { +pub fn writeCurrentStackTrace(out_stream: var, allocator: *mem.Allocator, debug_info: *ElfStackTrace, tty_color: bool, start_addr: ?usize) !void { const AddressState = union(enum) { NotLookingForStartAddress, LookingForStartAddress: usize, @@ -166,8 +166,8 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_ } var fp = @ptrToInt(@frameAddress()); - while (fp != 0) : (fp = @intToPtr(&const usize, fp).*) { - const return_address = @intToPtr(&const usize, fp + @sizeOf(usize)).*; + while (fp != 0) : (fp = @intToPtr(*const usize, fp).*) { + const return_address = @intToPtr(*const usize, fp + @sizeOf(usize)).*; switch (addr_state) { AddressState.NotLookingForStartAddress => {}, @@ -183,7 +183,7 @@ pub fn writeCurrentStackTrace(out_stream: var, allocator: &mem.Allocator, debug_ } } -fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: usize) !void { +fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: usize) !void { const ptr_hex = "0x{x}"; switch (builtin.os) { @@ -236,7 +236,7 @@ fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: var, address: us } } -pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace { +pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace { switch (builtin.object_format) { builtin.ObjectFormat.elf => { const st = try allocator.create(ElfStackTrace); @@ -289,7 +289,7 @@ pub fn openSelfDebugInfo(allocator: &mem.Allocator) !&ElfStackTrace { } } -fn printLineFromFile(allocator: &mem.Allocator, out_stream: var, line_info: &const LineInfo) !void { +fn printLineFromFile(allocator: *mem.Allocator, out_stream: var, line_info: *const LineInfo) !void { var f = try os.File.openRead(allocator, line_info.file_name); defer f.close(); // TODO fstat and make sure that the file has the correct size @@ -325,32 +325,32 @@ pub const ElfStackTrace = switch (builtin.os) { builtin.Os.macosx => struct { symbol_table: macho.SymbolTable, - pub fn close(self: &ElfStackTrace) void { + pub fn close(self: *ElfStackTrace) void { self.symbol_table.deinit(); } }, else => struct { self_exe_file: os.File, elf: elf.Elf, - debug_info: &elf.SectionHeader, - debug_abbrev: &elf.SectionHeader, - debug_str: &elf.SectionHeader, - debug_line: &elf.SectionHeader, - debug_ranges: ?&elf.SectionHeader, + debug_info: *elf.SectionHeader, + debug_abbrev: *elf.SectionHeader, + debug_str: *elf.SectionHeader, + debug_line: *elf.SectionHeader, + debug_ranges: ?*elf.SectionHeader, abbrev_table_list: ArrayList(AbbrevTableHeader), compile_unit_list: ArrayList(CompileUnit), - pub fn allocator(self: &const ElfStackTrace) &mem.Allocator { + pub fn allocator(self: *const ElfStackTrace) *mem.Allocator { return self.abbrev_table_list.allocator; } - pub fn readString(self: &ElfStackTrace) ![]u8 { + pub fn readString(self: *ElfStackTrace) ![]u8 { var in_file_stream = io.FileInStream.init(&self.self_exe_file); const in_stream = &in_file_stream.stream; return readStringRaw(self.allocator(), in_stream); } - pub fn close(self: &ElfStackTrace) void { + pub fn close(self: *ElfStackTrace) void { self.self_exe_file.close(); self.elf.close(); } @@ -365,7 +365,7 @@ const PcRange = struct { const CompileUnit = struct { version: u16, is_64: bool, - die: &Die, + die: *Die, index: usize, pc_range: ?PcRange, }; @@ -408,7 +408,7 @@ const Constant = struct { payload: []u8, signed: bool, - fn asUnsignedLe(self: &const Constant) !u64 { + fn asUnsignedLe(self: *const Constant) !u64 { if (self.payload.len > @sizeOf(u64)) return error.InvalidDebugInfo; if (self.signed) return error.InvalidDebugInfo; return mem.readInt(self.payload, u64, builtin.Endian.Little); @@ -425,14 +425,14 @@ const Die = struct { value: FormValue, }; - fn getAttr(self: &const Die, id: u64) ?&const FormValue { + fn getAttr(self: *const Die, id: u64) ?*const FormValue { for (self.attrs.toSliceConst()) |*attr| { if (attr.id == id) return &attr.value; } return null; } - fn getAttrAddr(self: &const Die, id: u64) !u64 { + fn getAttrAddr(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Address => |value| value, @@ -440,7 +440,7 @@ const Die = struct { }; } - fn getAttrSecOffset(self: &const Die, id: u64) !u64 { + fn getAttrSecOffset(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), @@ -449,7 +449,7 @@ const Die = struct { }; } - fn getAttrUnsignedLe(self: &const Die, id: u64) !u64 { + fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 { const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), @@ -457,7 +457,7 @@ const Die = struct { }; } - fn getAttrString(self: &const Die, st: &ElfStackTrace, id: u64) ![]u8 { + fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 { const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; return switch (form_value.*) { FormValue.String => |value| value, @@ -478,9 +478,9 @@ const LineInfo = struct { line: usize, column: usize, file_name: []u8, - allocator: &mem.Allocator, + allocator: *mem.Allocator, - fn deinit(self: &const LineInfo) void { + fn deinit(self: *const LineInfo) void { self.allocator.free(self.file_name); } }; @@ -496,7 +496,7 @@ const LineNumberProgram = struct { target_address: usize, include_dirs: []const []const u8, - file_entries: &ArrayList(FileEntry), + file_entries: *ArrayList(FileEntry), prev_address: usize, prev_file: usize, @@ -506,7 +506,7 @@ const LineNumberProgram = struct { prev_basic_block: bool, prev_end_sequence: bool, - pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: &ArrayList(FileEntry), target_address: usize) LineNumberProgram { + pub fn init(is_stmt: bool, include_dirs: []const []const u8, file_entries: *ArrayList(FileEntry), target_address: usize) LineNumberProgram { return LineNumberProgram{ .address = 0, .file = 1, @@ -528,7 +528,7 @@ const LineNumberProgram = struct { }; } - pub fn checkLineMatch(self: &LineNumberProgram) !?LineInfo { + pub fn checkLineMatch(self: *LineNumberProgram) !?LineInfo { if (self.target_address >= self.prev_address and self.target_address < self.address) { const file_entry = if (self.prev_file == 0) { return error.MissingDebugInfo; @@ -562,7 +562,7 @@ const LineNumberProgram = struct { } }; -fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 { +fn readStringRaw(allocator: *mem.Allocator, in_stream: var) ![]u8 { var buf = ArrayList(u8).init(allocator); while (true) { const byte = try in_stream.readByte(); @@ -572,30 +572,30 @@ fn readStringRaw(allocator: &mem.Allocator, in_stream: var) ![]u8 { return buf.toSlice(); } -fn getString(st: &ElfStackTrace, offset: u64) ![]u8 { +fn getString(st: *ElfStackTrace, offset: u64) ![]u8 { const pos = st.debug_str.offset + offset; try st.self_exe_file.seekTo(pos); return st.readString(); } -fn readAllocBytes(allocator: &mem.Allocator, in_stream: var, size: usize) ![]u8 { +fn readAllocBytes(allocator: *mem.Allocator, in_stream: var, size: usize) ![]u8 { const buf = try allocator.alloc(u8, size); errdefer allocator.free(buf); if ((try in_stream.read(buf)) < size) return error.EndOfFile; return buf; } -fn parseFormValueBlockLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue { +fn parseFormValueBlockLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Block = buf }; } -fn parseFormValueBlock(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue { +fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue { const block_len = try in_stream.readVarInt(builtin.Endian.Little, usize, size); return parseFormValueBlockLen(allocator, in_stream, block_len); } -fn parseFormValueConstant(allocator: &mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue { +fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: var, signed: bool, size: usize) !FormValue { return FormValue{ .Const = Constant{ .signed = signed, @@ -612,12 +612,12 @@ fn parseFormValueTargetAddrSize(in_stream: var) !u64 { return if (@sizeOf(usize) == 4) u64(try in_stream.readIntLe(u32)) else if (@sizeOf(usize) == 8) try in_stream.readIntLe(u64) else unreachable; } -fn parseFormValueRefLen(allocator: &mem.Allocator, in_stream: var, size: usize) !FormValue { +fn parseFormValueRefLen(allocator: *mem.Allocator, in_stream: var, size: usize) !FormValue { const buf = try readAllocBytes(allocator, in_stream, size); return FormValue{ .Ref = buf }; } -fn parseFormValueRef(allocator: &mem.Allocator, in_stream: var, comptime T: type) !FormValue { +fn parseFormValueRef(allocator: *mem.Allocator, in_stream: var, comptime T: type) !FormValue { const block_len = try in_stream.readIntLe(T); return parseFormValueRefLen(allocator, in_stream, block_len); } @@ -632,7 +632,7 @@ const ParseFormValueError = error{ OutOfMemory, }; -fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue { +fn parseFormValue(allocator: *mem.Allocator, in_stream: var, form_id: u64, is_64: bool) ParseFormValueError!FormValue { return switch (form_id) { DW.FORM_addr => FormValue{ .Address = try parseFormValueTargetAddrSize(in_stream) }, DW.FORM_block1 => parseFormValueBlock(allocator, in_stream, 1), @@ -682,7 +682,7 @@ fn parseFormValue(allocator: &mem.Allocator, in_stream: var, form_id: u64, is_64 }; } -fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable { +fn parseAbbrevTable(st: *ElfStackTrace) !AbbrevTable { const in_file = &st.self_exe_file; var in_file_stream = io.FileInStream.init(in_file); const in_stream = &in_file_stream.stream; @@ -712,7 +712,7 @@ fn parseAbbrevTable(st: &ElfStackTrace) !AbbrevTable { /// Gets an already existing AbbrevTable given the abbrev_offset, or if not found, /// seeks in the stream and parses it. -fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable { +fn getAbbrevTable(st: *ElfStackTrace, abbrev_offset: u64) !*const AbbrevTable { for (st.abbrev_table_list.toSlice()) |*header| { if (header.offset == abbrev_offset) { return &header.table; @@ -726,14 +726,14 @@ fn getAbbrevTable(st: &ElfStackTrace, abbrev_offset: u64) !&const AbbrevTable { return &st.abbrev_table_list.items[st.abbrev_table_list.len - 1].table; } -fn getAbbrevTableEntry(abbrev_table: &const AbbrevTable, abbrev_code: u64) ?&const AbbrevTableEntry { +fn getAbbrevTableEntry(abbrev_table: *const AbbrevTable, abbrev_code: u64) ?*const AbbrevTableEntry { for (abbrev_table.toSliceConst()) |*table_entry| { if (table_entry.abbrev_code == abbrev_code) return table_entry; } return null; } -fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) !Die { +fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) !Die { const in_file = &st.self_exe_file; var in_file_stream = io.FileInStream.init(in_file); const in_stream = &in_file_stream.stream; @@ -755,7 +755,7 @@ fn parseDie(st: &ElfStackTrace, abbrev_table: &const AbbrevTable, is_64: bool) ! return result; } -fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, target_address: usize) !LineInfo { +fn getLineNumberInfo(st: *ElfStackTrace, compile_unit: *const CompileUnit, target_address: usize) !LineInfo { const compile_unit_cwd = try compile_unit.die.getAttrString(st, DW.AT_comp_dir); const in_file = &st.self_exe_file; @@ -934,7 +934,7 @@ fn getLineNumberInfo(st: &ElfStackTrace, compile_unit: &const CompileUnit, targe return error.MissingDebugInfo; } -fn scanAllCompileUnits(st: &ElfStackTrace) !void { +fn scanAllCompileUnits(st: *ElfStackTrace) !void { const debug_info_end = st.debug_info.offset + st.debug_info.size; var this_unit_offset = st.debug_info.offset; var cu_index: usize = 0; @@ -1005,7 +1005,7 @@ fn scanAllCompileUnits(st: &ElfStackTrace) !void { } } -fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit { +fn findCompileUnit(st: *ElfStackTrace, target_address: u64) !*const CompileUnit { var in_file_stream = io.FileInStream.init(&st.self_exe_file); const in_stream = &in_file_stream.stream; for (st.compile_unit_list.toSlice()) |*compile_unit| { @@ -1039,7 +1039,7 @@ fn findCompileUnit(st: &ElfStackTrace, target_address: u64) !&const CompileUnit return error.MissingDebugInfo; } -fn readInitialLength(comptime E: type, in_stream: &io.InStream(E), is_64: &bool) !u64 { +fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool) !u64 { const first_32_bits = try in_stream.readIntLe(u32); is_64.* = (first_32_bits == 0xffffffff); if (is_64.*) { @@ -1096,10 +1096,10 @@ var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator var global_allocator_mem: [100 * 1024]u8 = undefined; // TODO make thread safe -var debug_info_allocator: ?&mem.Allocator = null; +var debug_info_allocator: ?*mem.Allocator = null; var debug_info_direct_allocator: std.heap.DirectAllocator = undefined; var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined; -fn getDebugInfoAllocator() &mem.Allocator { +fn getDebugInfoAllocator() *mem.Allocator { if (debug_info_allocator) |a| return a; debug_info_direct_allocator = std.heap.DirectAllocator.init(); diff --git a/std/elf.zig b/std/elf.zig index 29b9473f98..50e97ab271 100644 --- a/std/elf.zig +++ b/std/elf.zig @@ -338,7 +338,7 @@ pub const SectionHeader = struct { }; pub const Elf = struct { - in_file: &os.File, + in_file: *os.File, auto_close_stream: bool, is_64: bool, endian: builtin.Endian, @@ -348,20 +348,20 @@ pub const Elf = struct { program_header_offset: u64, section_header_offset: u64, string_section_index: u64, - string_section: &SectionHeader, + string_section: *SectionHeader, section_headers: []SectionHeader, - allocator: &mem.Allocator, + allocator: *mem.Allocator, prealloc_file: os.File, /// Call close when done. - pub fn openPath(elf: &Elf, allocator: &mem.Allocator, path: []const u8) !void { + pub fn openPath(elf: *Elf, allocator: *mem.Allocator, path: []const u8) !void { try elf.prealloc_file.open(path); - try elf.openFile(allocator, &elf.prealloc_file); + try elf.openFile(allocator, *elf.prealloc_file); elf.auto_close_stream = true; } /// Call close when done. - pub fn openFile(elf: &Elf, allocator: &mem.Allocator, file: &os.File) !void { + pub fn openFile(elf: *Elf, allocator: *mem.Allocator, file: *os.File) !void { elf.allocator = allocator; elf.in_file = file; elf.auto_close_stream = false; @@ -503,13 +503,13 @@ pub const Elf = struct { } } - pub fn close(elf: &Elf) void { + pub fn close(elf: *Elf) void { elf.allocator.free(elf.section_headers); if (elf.auto_close_stream) elf.in_file.close(); } - pub fn findSection(elf: &Elf, name: []const u8) !?&SectionHeader { + pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader { var file_stream = io.FileInStream.init(elf.in_file); const in = &file_stream.stream; @@ -533,7 +533,7 @@ pub const Elf = struct { return null; } - pub fn seekToSection(elf: &Elf, elf_section: &SectionHeader) !void { + pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void { try elf.in_file.seekTo(elf_section.offset); } }; diff --git a/std/event.zig b/std/event.zig index 4604eb8d02..89ab816bb6 100644 --- a/std/event.zig +++ b/std/event.zig @@ -6,9 +6,9 @@ const mem = std.mem; const posix = std.os.posix; pub const TcpServer = struct { - handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void, + handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void, - loop: &Loop, + loop: *Loop, sockfd: i32, accept_coro: ?promise, listen_address: std.net.Address, @@ -17,7 +17,7 @@ pub const TcpServer = struct { const PromiseNode = std.LinkedList(promise).Node; - pub fn init(loop: &Loop) !TcpServer { + pub fn init(loop: *Loop) !TcpServer { const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp); errdefer std.os.close(sockfd); @@ -32,7 +32,7 @@ pub const TcpServer = struct { }; } - pub fn listen(self: &TcpServer, address: &const std.net.Address, handleRequestFn: async<&mem.Allocator> fn (&TcpServer, &const std.net.Address, &const std.os.File) void) !void { + pub fn listen(self: *TcpServer, address: *const std.net.Address, handleRequestFn: async<*mem.Allocator> fn (*TcpServer, *const std.net.Address, *const std.os.File) void) !void { self.handleRequestFn = handleRequestFn; try std.os.posixBind(self.sockfd, &address.os_addr); @@ -46,13 +46,13 @@ pub const TcpServer = struct { errdefer self.loop.removeFd(self.sockfd); } - pub fn deinit(self: &TcpServer) void { + pub fn deinit(self: *TcpServer) void { self.loop.removeFd(self.sockfd); if (self.accept_coro) |accept_coro| cancel accept_coro; std.os.close(self.sockfd); } - pub async fn handler(self: &TcpServer) void { + pub async fn handler(self: *TcpServer) void { while (true) { var accepted_addr: std.net.Address = undefined; if (std.os.posixAccept(self.sockfd, &accepted_addr.os_addr, posix.SOCK_NONBLOCK | posix.SOCK_CLOEXEC)) |accepted_fd| { @@ -92,11 +92,11 @@ pub const TcpServer = struct { }; pub const Loop = struct { - allocator: &mem.Allocator, + allocator: *mem.Allocator, epollfd: i32, keep_running: bool, - fn init(allocator: &mem.Allocator) !Loop { + fn init(allocator: *mem.Allocator) !Loop { const epollfd = try std.os.linuxEpollCreate(std.os.linux.EPOLL_CLOEXEC); return Loop{ .keep_running = true, @@ -105,7 +105,7 @@ pub const Loop = struct { }; } - pub fn addFd(self: &Loop, fd: i32, prom: promise) !void { + pub fn addFd(self: *Loop, fd: i32, prom: promise) !void { var ev = std.os.linux.epoll_event{ .events = std.os.linux.EPOLLIN | std.os.linux.EPOLLOUT | std.os.linux.EPOLLET, .data = std.os.linux.epoll_data{ .ptr = @ptrToInt(prom) }, @@ -113,23 +113,23 @@ pub const Loop = struct { try std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_ADD, fd, &ev); } - pub fn removeFd(self: &Loop, fd: i32) void { + pub fn removeFd(self: *Loop, fd: i32) void { std.os.linuxEpollCtl(self.epollfd, std.os.linux.EPOLL_CTL_DEL, fd, undefined) catch {}; } - async fn waitFd(self: &Loop, fd: i32) !void { + async fn waitFd(self: *Loop, fd: i32) !void { defer self.removeFd(fd); suspend |p| { try self.addFd(fd, p); } } - pub fn stop(self: &Loop) void { + pub fn stop(self: *Loop) void { // TODO make atomic self.keep_running = false; // TODO activate an fd in the epoll set } - pub fn run(self: &Loop) void { + pub fn run(self: *Loop) void { while (self.keep_running) { var events: [16]std.os.linux.epoll_event = undefined; const count = std.os.linuxEpollWait(self.epollfd, events[0..], -1); @@ -141,7 +141,7 @@ pub const Loop = struct { } }; -pub async fn connect(loop: &Loop, _address: &const std.net.Address) !std.os.File { +pub async fn connect(loop: *Loop, _address: *const std.net.Address) !std.os.File { var address = _address.*; // TODO https://github.com/ziglang/zig/issues/733 const sockfd = try std.os.posixSocket(posix.AF_INET, posix.SOCK_STREAM | posix.SOCK_CLOEXEC | posix.SOCK_NONBLOCK, posix.PROTO_tcp); @@ -163,7 +163,7 @@ test "listen on a port, send bytes, receive bytes" { tcp_server: TcpServer, const Self = this; - async<&mem.Allocator> fn handler(tcp_server: &TcpServer, _addr: &const std.net.Address, _socket: &const std.os.File) void { + async<*mem.Allocator> fn handler(tcp_server: *TcpServer, _addr: *const std.net.Address, _socket: *const std.os.File) void { const self = @fieldParentPtr(Self, "tcp_server", tcp_server); var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733 defer socket.close(); @@ -177,7 +177,7 @@ test "listen on a port, send bytes, receive bytes" { cancel p; } } - async fn errorableHandler(self: &Self, _addr: &const std.net.Address, _socket: &const std.os.File) !void { + async fn errorableHandler(self: *Self, _addr: *const std.net.Address, _socket: *const std.os.File) !void { const addr = _addr.*; // TODO https://github.com/ziglang/zig/issues/733 var socket = _socket.*; // TODO https://github.com/ziglang/zig/issues/733 @@ -199,7 +199,7 @@ test "listen on a port, send bytes, receive bytes" { defer cancel p; loop.run(); } -async fn doAsyncTest(loop: &Loop, address: &const std.net.Address) void { +async fn doAsyncTest(loop: *Loop, address: *const std.net.Address) void { errdefer @panic("test failure"); var socket_file = try await try async event.connect(loop, address); diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig index 65e8d448a8..933958ac18 100644 --- a/std/fmt/errol/index.zig +++ b/std/fmt/errol/index.zig @@ -21,7 +21,7 @@ pub const RoundMode = enum { /// Round a FloatDecimal as returned by errol3 to the specified fractional precision. /// All digits after the specified precision should be considered invalid. -pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: RoundMode) void { +pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: RoundMode) void { // The round digit refers to the index which we should look at to determine // whether we need to round to match the specified precision. var round_digit: usize = 0; @@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: &FloatDecimal, precision: usize, mode: Ro float_decimal.exp += 1; // Re-size the buffer to use the reserved leading byte. - const one_before = @intToPtr(&u8, @ptrToInt(&float_decimal.digits[0]) - 1); + const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1); float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1]; float_decimal.digits[0] = '1'; return; @@ -217,7 +217,7 @@ fn tableLowerBound(k: u64) usize { /// @in: The HP number. /// @val: The double. /// &returns: The HP number. -fn hpProd(in: &const HP, val: f64) HP { +fn hpProd(in: *const HP, val: f64) HP { var hi: f64 = undefined; var lo: f64 = undefined; split(in.val, &hi, &lo); @@ -239,7 +239,7 @@ fn hpProd(in: &const HP, val: f64) HP { /// @val: The double. /// @hi: The high bits. /// @lo: The low bits. -fn split(val: f64, hi: &f64, lo: &f64) void { +fn split(val: f64, hi: *f64, lo: *f64) void { hi.* = gethi(val); lo.* = val - hi.*; } @@ -252,7 +252,7 @@ fn gethi(in: f64) f64 { /// Normalize the number by factoring in the error. /// @hp: The float pair. -fn hpNormalize(hp: &HP) void { +fn hpNormalize(hp: *HP) void { // Required to avoid segfaults causing buffer overrun during errol3 digit output termination. @setFloatMode(this, @import("builtin").FloatMode.Strict); @@ -264,7 +264,7 @@ fn hpNormalize(hp: &HP) void { /// Divide the high-precision number by ten. /// @hp: The high-precision number -fn hpDiv10(hp: &HP) void { +fn hpDiv10(hp: *HP) void { var val = hp.val; hp.val /= 10.0; @@ -280,7 +280,7 @@ fn hpDiv10(hp: &HP) void { /// Multiply the high-precision number by ten. /// @hp: The high-precision number -fn hpMul10(hp: &HP) void { +fn hpMul10(hp: *HP) void { const val = hp.val; hp.val *= 10.0; diff --git a/std/fmt/index.zig b/std/fmt/index.zig index 0ffbc59895..b522d9d37d 100644 --- a/std/fmt/index.zig +++ b/std/fmt/index.zig @@ -679,7 +679,7 @@ const FormatIntBuf = struct { out_buf: []u8, index: usize, }; -fn formatIntCallback(context: &FormatIntBuf, bytes: []const u8) (error{}!void) { +fn formatIntCallback(context: *FormatIntBuf, bytes: []const u8) (error{}!void) { mem.copy(u8, context.out_buf[context.index..], bytes); context.index += bytes.len; } @@ -751,7 +751,7 @@ const BufPrintContext = struct { remaining: []u8, }; -fn bufPrintWrite(context: &BufPrintContext, bytes: []const u8) !void { +fn bufPrintWrite(context: *BufPrintContext, bytes: []const u8) !void { if (context.remaining.len < bytes.len) return error.BufferTooSmall; mem.copy(u8, context.remaining, bytes); context.remaining = context.remaining[bytes.len..]; @@ -763,14 +763,14 @@ pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: ...) ![]u8 { return buf[0 .. buf.len - context.remaining.len]; } -pub fn allocPrint(allocator: &mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 { +pub fn allocPrint(allocator: *mem.Allocator, comptime fmt: []const u8, args: ...) ![]u8 { var size: usize = 0; format(&size, error{}, countSize, fmt, args) catch |err| switch (err) {}; const buf = try allocator.alloc(u8, size); return bufPrint(buf, fmt, args); } -fn countSize(size: &usize, bytes: []const u8) (error{}!void) { +fn countSize(size: *usize, bytes: []const u8) (error{}!void) { size.* += bytes.len; } diff --git a/std/hash/adler.zig b/std/hash/adler.zig index 12dab1457c..9c5966f89b 100644 --- a/std/hash/adler.zig +++ b/std/hash/adler.zig @@ -18,7 +18,7 @@ pub const Adler32 = struct { // This fast variant is taken from zlib. It reduces the required modulos and unrolls longer // buffer inputs and should be much quicker. - pub fn update(self: &Adler32, input: []const u8) void { + pub fn update(self: *Adler32, input: []const u8) void { var s1 = self.adler & 0xffff; var s2 = (self.adler >> 16) & 0xffff; @@ -77,7 +77,7 @@ pub const Adler32 = struct { self.adler = s1 | (s2 << 16); } - pub fn final(self: &Adler32) u32 { + pub fn final(self: *Adler32) u32 { return self.adler; } diff --git a/std/hash/crc.zig b/std/hash/crc.zig index 45bcb70e8b..ec831cdc2e 100644 --- a/std/hash/crc.zig +++ b/std/hash/crc.zig @@ -58,7 +58,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type { return Self{ .crc = 0xffffffff }; } - pub fn update(self: &Self, input: []const u8) void { + pub fn update(self: *Self, input: []const u8) void { var i: usize = 0; while (i + 8 <= input.len) : (i += 8) { const p = input[i .. i + 8]; @@ -86,7 +86,7 @@ pub fn Crc32WithPoly(comptime poly: u32) type { } } - pub fn final(self: &Self) u32 { + pub fn final(self: *Self) u32 { return ~self.crc; } @@ -143,14 +143,14 @@ pub fn Crc32SmallWithPoly(comptime poly: u32) type { return Self{ .crc = 0xffffffff }; } - pub fn update(self: &Self, input: []const u8) void { + pub fn update(self: *Self, input: []const u8) void { for (input) |b| { self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 0))] ^ (self.crc >> 4); self.crc = lookup_table[@truncate(u4, self.crc ^ (b >> 4))] ^ (self.crc >> 4); } } - pub fn final(self: &Self) u32 { + pub fn final(self: *Self) u32 { return ~self.crc; } diff --git a/std/hash/fnv.zig b/std/hash/fnv.zig index c2439e0ebc..447c996772 100644 --- a/std/hash/fnv.zig +++ b/std/hash/fnv.zig @@ -21,14 +21,14 @@ fn Fnv1a(comptime T: type, comptime prime: T, comptime offset: T) type { return Self{ .value = offset }; } - pub fn update(self: &Self, input: []const u8) void { + pub fn update(self: *Self, input: []const u8) void { for (input) |b| { self.value ^= b; self.value *%= prime; } } - pub fn final(self: &Self) T { + pub fn final(self: *Self) T { return self.value; } diff --git a/std/hash/siphash.zig b/std/hash/siphash.zig index 750e23d4c8..8a90308a46 100644 --- a/std/hash/siphash.zig +++ b/std/hash/siphash.zig @@ -63,7 +63,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) return d; } - pub fn update(d: &Self, b: []const u8) void { + pub fn update(d: *Self, b: []const u8) void { var off: usize = 0; // Partial from previous. @@ -85,7 +85,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) d.msg_len +%= @truncate(u8, b.len); } - pub fn final(d: &Self) T { + pub fn final(d: *Self) T { // Padding mem.set(u8, d.buf[d.buf_len..], 0); d.buf[7] = d.msg_len; @@ -118,7 +118,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) return (u128(b2) << 64) | b1; } - fn round(d: &Self, b: []const u8) void { + fn round(d: *Self, b: []const u8) void { debug.assert(b.len == 8); const m = mem.readInt(b[0..], u64, Endian.Little); @@ -132,7 +132,7 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) d.v0 ^= m; } - fn sipRound(d: &Self) void { + fn sipRound(d: *Self) void { d.v0 +%= d.v1; d.v1 = math.rotl(u64, d.v1, u64(13)); d.v1 ^= d.v0; diff --git a/std/hash_map.zig b/std/hash_map.zig index f51b9c66ba..a323cdc197 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -14,7 +14,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 entries: []Entry, size: usize, max_distance_from_start_index: usize, - allocator: &Allocator, + allocator: *Allocator, // this is used to detect bugs where a hashtable is edited while an iterator is running. modification_count: debug_u32, @@ -28,7 +28,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 }; pub const Iterator = struct { - hm: &const Self, + hm: *const Self, // how many items have we returned count: usize, // iterator through the entry array @@ -36,7 +36,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 // used to detect concurrent modification initial_modification_count: debug_u32, - pub fn next(it: &Iterator) ?&Entry { + pub fn next(it: *Iterator) ?*Entry { if (want_modification_safety) { assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification } @@ -53,7 +53,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 } // Reset the iterator to the initial index - pub fn reset(it: &Iterator) void { + pub fn reset(it: *Iterator) void { it.count = 0; it.index = 0; // Resetting the modification count too @@ -61,7 +61,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 } }; - pub fn init(allocator: &Allocator) Self { + pub fn init(allocator: *Allocator) Self { return Self{ .entries = []Entry{}, .allocator = allocator, @@ -71,11 +71,11 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 }; } - pub fn deinit(hm: &const Self) void { + pub fn deinit(hm: *const Self) void { hm.allocator.free(hm.entries); } - pub fn clear(hm: &Self) void { + pub fn clear(hm: *Self) void { for (hm.entries) |*entry| { entry.used = false; } @@ -84,12 +84,12 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 hm.incrementModificationCount(); } - pub fn count(hm: &const Self) usize { + pub fn count(hm: *const Self) usize { return hm.size; } /// Returns the value that was already there. - pub fn put(hm: &Self, key: K, value: &const V) !?V { + pub fn put(hm: *Self, key: K, value: *const V) !?V { if (hm.entries.len == 0) { try hm.initCapacity(16); } @@ -111,18 +111,18 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 return hm.internalPut(key, value); } - pub fn get(hm: &const Self, key: K) ?&Entry { + pub fn get(hm: *const Self, key: K) ?*Entry { if (hm.entries.len == 0) { return null; } return hm.internalGet(key); } - pub fn contains(hm: &const Self, key: K) bool { + pub fn contains(hm: *const Self, key: K) bool { return hm.get(key) != null; } - pub fn remove(hm: &Self, key: K) ?&Entry { + pub fn remove(hm: *Self, key: K) ?*Entry { if (hm.entries.len == 0) return null; hm.incrementModificationCount(); const start_index = hm.keyToIndex(key); @@ -154,7 +154,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 return null; } - pub fn iterator(hm: &const Self) Iterator { + pub fn iterator(hm: *const Self) Iterator { return Iterator{ .hm = hm, .count = 0, @@ -163,7 +163,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 }; } - fn initCapacity(hm: &Self, capacity: usize) !void { + fn initCapacity(hm: *Self, capacity: usize) !void { hm.entries = try hm.allocator.alloc(Entry, capacity); hm.size = 0; hm.max_distance_from_start_index = 0; @@ -172,14 +172,14 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 } } - fn incrementModificationCount(hm: &Self) void { + fn incrementModificationCount(hm: *Self) void { if (want_modification_safety) { hm.modification_count +%= 1; } } /// Returns the value that was already there. - fn internalPut(hm: &Self, orig_key: K, orig_value: &const V) ?V { + fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V { var key = orig_key; var value = orig_value.*; const start_index = hm.keyToIndex(key); @@ -231,7 +231,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 unreachable; // put into a full map } - fn internalGet(hm: &const Self, key: K) ?&Entry { + fn internalGet(hm: *const Self, key: K) ?*Entry { const start_index = hm.keyToIndex(key); { var roll_over: usize = 0; @@ -246,7 +246,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3 return null; } - fn keyToIndex(hm: &const Self, key: K) usize { + fn keyToIndex(hm: *const Self, key: K) usize { return usize(hash(key)) % hm.entries.len; } }; diff --git a/std/heap.zig b/std/heap.zig index 8d4938a7c3..81d6f25282 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -16,15 +16,15 @@ var c_allocator_state = Allocator{ .freeFn = cFree, }; -fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 { +fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 { assert(alignment <= @alignOf(c_longdouble)); - return if (c.malloc(n)) |buf| @ptrCast(&u8, buf)[0..n] else error.OutOfMemory; + return if (c.malloc(n)) |buf| @ptrCast(*u8, buf)[0..n] else error.OutOfMemory; } -fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { - const old_ptr = @ptrCast(&c_void, old_mem.ptr); +fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + const old_ptr = @ptrCast(*c_void, old_mem.ptr); if (c.realloc(old_ptr, new_size)) |buf| { - return @ptrCast(&u8, buf)[0..new_size]; + return @ptrCast(*u8, buf)[0..new_size]; } else if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { @@ -32,8 +32,8 @@ fn cRealloc(self: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![ } } -fn cFree(self: &Allocator, old_mem: []u8) void { - const old_ptr = @ptrCast(&c_void, old_mem.ptr); +fn cFree(self: *Allocator, old_mem: []u8) void { + const old_ptr = @ptrCast(*c_void, old_mem.ptr); c.free(old_ptr); } @@ -55,7 +55,7 @@ pub const DirectAllocator = struct { }; } - pub fn deinit(self: &DirectAllocator) void { + pub fn deinit(self: *DirectAllocator) void { switch (builtin.os) { Os.windows => if (self.heap_handle) |heap_handle| { _ = os.windows.HeapDestroy(heap_handle); @@ -64,7 +64,7 @@ pub const DirectAllocator = struct { } } - fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { @@ -74,7 +74,7 @@ pub const DirectAllocator = struct { const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0); if (addr == p.MAP_FAILED) return error.OutOfMemory; - if (alloc_size == n) return @intToPtr(&u8, addr)[0..n]; + if (alloc_size == n) return @intToPtr(*u8, addr)[0..n]; var aligned_addr = addr & ~usize(alignment - 1); aligned_addr += alignment; @@ -93,7 +93,7 @@ pub const DirectAllocator = struct { //It is impossible that there is an unoccupied page at the top of our // mmap. - return @intToPtr(&u8, aligned_addr)[0..n]; + return @intToPtr(*u8, aligned_addr)[0..n]; }, Os.windows => { const amt = n + alignment + @sizeOf(usize); @@ -108,14 +108,14 @@ pub const DirectAllocator = struct { const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_addr = root_addr + march_forward_bytes; const record_addr = adjusted_addr + n; - @intToPtr(&align(1) usize, record_addr).* = root_addr; - return @intToPtr(&u8, adjusted_addr)[0..n]; + @intToPtr(*align(1) usize, record_addr).* = root_addr; + return @intToPtr(*u8, adjusted_addr)[0..n]; }, else => @compileError("Unsupported OS"), } } - fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { @@ -139,13 +139,13 @@ pub const DirectAllocator = struct { Os.windows => { const old_adjusted_addr = @ptrToInt(old_mem.ptr); const old_record_addr = old_adjusted_addr + old_mem.len; - const root_addr = @intToPtr(&align(1) usize, old_record_addr).*; + const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; const old_ptr = @intToPtr(os.windows.LPVOID, root_addr); const amt = new_size + alignment + @sizeOf(usize); const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: { if (new_size > old_mem.len) return error.OutOfMemory; const new_record_addr = old_record_addr - new_size + old_mem.len; - @intToPtr(&align(1) usize, new_record_addr).* = root_addr; + @intToPtr(*align(1) usize, new_record_addr).* = root_addr; return old_mem[0..new_size]; }; const offset = old_adjusted_addr - root_addr; @@ -153,14 +153,14 @@ pub const DirectAllocator = struct { const new_adjusted_addr = new_root_addr + offset; assert(new_adjusted_addr % alignment == 0); const new_record_addr = new_adjusted_addr + new_size; - @intToPtr(&align(1) usize, new_record_addr).* = new_root_addr; - return @intToPtr(&u8, new_adjusted_addr)[0..new_size]; + @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr; + return @intToPtr(*u8, new_adjusted_addr)[0..new_size]; }, else => @compileError("Unsupported OS"), } } - fn free(allocator: &Allocator, bytes: []u8) void { + fn free(allocator: *Allocator, bytes: []u8) void { const self = @fieldParentPtr(DirectAllocator, "allocator", allocator); switch (builtin.os) { @@ -169,7 +169,7 @@ pub const DirectAllocator = struct { }, Os.windows => { const record_addr = @ptrToInt(bytes.ptr) + bytes.len; - const root_addr = @intToPtr(&align(1) usize, record_addr).*; + const root_addr = @intToPtr(*align(1) usize, record_addr).*; const ptr = @intToPtr(os.windows.LPVOID, root_addr); _ = os.windows.HeapFree(??self.heap_handle, 0, ptr); }, @@ -183,13 +183,13 @@ pub const DirectAllocator = struct { pub const ArenaAllocator = struct { pub allocator: Allocator, - child_allocator: &Allocator, + child_allocator: *Allocator, buffer_list: std.LinkedList([]u8), end_index: usize, const BufNode = std.LinkedList([]u8).Node; - pub fn init(child_allocator: &Allocator) ArenaAllocator { + pub fn init(child_allocator: *Allocator) ArenaAllocator { return ArenaAllocator{ .allocator = Allocator{ .allocFn = alloc, @@ -202,7 +202,7 @@ pub const ArenaAllocator = struct { }; } - pub fn deinit(self: &ArenaAllocator) void { + pub fn deinit(self: *ArenaAllocator) void { var it = self.buffer_list.first; while (it) |node| { // this has to occur before the free because the free frees node @@ -212,7 +212,7 @@ pub const ArenaAllocator = struct { } } - fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode { + fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode { const actual_min_size = minimum_size + @sizeOf(BufNode); var len = prev_len; while (true) { @@ -233,7 +233,7 @@ pub const ArenaAllocator = struct { return buf_node; } - fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator); var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment); @@ -254,7 +254,7 @@ pub const ArenaAllocator = struct { } } - fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { @@ -264,7 +264,7 @@ pub const ArenaAllocator = struct { } } - fn free(allocator: &Allocator, bytes: []u8) void {} + fn free(allocator: *Allocator, bytes: []u8) void {} }; pub const FixedBufferAllocator = struct { @@ -284,7 +284,7 @@ pub const FixedBufferAllocator = struct { }; } - fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); const addr = @ptrToInt(self.buffer.ptr) + self.end_index; const rem = @rem(addr, alignment); @@ -300,7 +300,7 @@ pub const FixedBufferAllocator = struct { return result; } - fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { @@ -310,7 +310,7 @@ pub const FixedBufferAllocator = struct { } } - fn free(allocator: &Allocator, bytes: []u8) void {} + fn free(allocator: *Allocator, bytes: []u8) void {} }; /// lock free @@ -331,7 +331,7 @@ pub const ThreadSafeFixedBufferAllocator = struct { }; } - fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { + fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst); while (true) { @@ -343,11 +343,11 @@ pub const ThreadSafeFixedBufferAllocator = struct { if (new_end_index > self.buffer.len) { return error.OutOfMemory; } - end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index]; + end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst,) ?? return self.buffer[adjusted_index..new_end_index]; } } - fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { @@ -357,7 +357,7 @@ pub const ThreadSafeFixedBufferAllocator = struct { } } - fn free(allocator: &Allocator, bytes: []u8) void {} + fn free(allocator: *Allocator, bytes: []u8) void {} }; test "c_allocator" { @@ -403,8 +403,8 @@ test "ThreadSafeFixedBufferAllocator" { try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); } -fn testAllocator(allocator: &mem.Allocator) !void { - var slice = try allocator.alloc(&i32, 100); +fn testAllocator(allocator: *mem.Allocator) !void { + var slice = try allocator.alloc(*i32, 100); for (slice) |*item, i| { item.* = try allocator.create(i32); @@ -415,15 +415,15 @@ fn testAllocator(allocator: &mem.Allocator) !void { allocator.destroy(item); } - slice = try allocator.realloc(&i32, slice, 20000); - slice = try allocator.realloc(&i32, slice, 50); - slice = try allocator.realloc(&i32, slice, 25); - slice = try allocator.realloc(&i32, slice, 10); + slice = try allocator.realloc(*i32, slice, 20000); + slice = try allocator.realloc(*i32, slice, 50); + slice = try allocator.realloc(*i32, slice, 25); + slice = try allocator.realloc(*i32, slice, 10); allocator.free(slice); } -fn testAllocatorLargeAlignment(allocator: &mem.Allocator) mem.Allocator.Error!void { +fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!void { //Maybe a platform's page_size is actually the same as or // very near usize? if (os.page_size << 2 > @maxValue(usize)) return; diff --git a/std/io.zig b/std/io.zig index 39d319159e..e20a284e4e 100644 --- a/std/io.zig +++ b/std/io.zig @@ -34,20 +34,20 @@ pub fn getStdIn() GetStdIoErrs!File { /// Implementation of InStream trait for File pub const FileInStream = struct { - file: &File, + file: *File, stream: Stream, pub const Error = @typeOf(File.read).ReturnType.ErrorSet; pub const Stream = InStream(Error); - pub fn init(file: &File) FileInStream { + pub fn init(file: *File) FileInStream { return FileInStream{ .file = file, .stream = Stream{ .readFn = readFn }, }; } - fn readFn(in_stream: &Stream, buffer: []u8) Error!usize { + fn readFn(in_stream: *Stream, buffer: []u8) Error!usize { const self = @fieldParentPtr(FileInStream, "stream", in_stream); return self.file.read(buffer); } @@ -55,20 +55,20 @@ pub const FileInStream = struct { /// Implementation of OutStream trait for File pub const FileOutStream = struct { - file: &File, + file: *File, stream: Stream, pub const Error = File.WriteError; pub const Stream = OutStream(Error); - pub fn init(file: &File) FileOutStream { + pub fn init(file: *File) FileOutStream { return FileOutStream{ .file = file, .stream = Stream{ .writeFn = writeFn }, }; } - fn writeFn(out_stream: &Stream, bytes: []const u8) !void { + fn writeFn(out_stream: *Stream, bytes: []const u8) !void { const self = @fieldParentPtr(FileOutStream, "stream", out_stream); return self.file.write(bytes); } @@ -82,12 +82,12 @@ pub fn InStream(comptime ReadError: type) type { /// Return the number of bytes read. If the number read is smaller than buf.len, it /// means the stream reached the end. Reaching the end of a stream is not an error /// condition. - readFn: fn (self: &Self, buffer: []u8) Error!usize, + readFn: fn (self: *Self, buffer: []u8) Error!usize, /// Replaces `buffer` contents by reading from the stream until it is finished. /// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and /// the contents read from the stream are lost. - pub fn readAllBuffer(self: &Self, buffer: &Buffer, max_size: usize) !void { + pub fn readAllBuffer(self: *Self, buffer: *Buffer, max_size: usize) !void { try buffer.resize(0); var actual_buf_len: usize = 0; @@ -111,7 +111,7 @@ pub fn InStream(comptime ReadError: type) type { /// memory would be greater than `max_size`, returns `error.StreamTooLong`. /// Caller owns returned memory. /// If this function returns an error, the contents from the stream read so far are lost. - pub fn readAllAlloc(self: &Self, allocator: &mem.Allocator, max_size: usize) ![]u8 { + pub fn readAllAlloc(self: *Self, allocator: *mem.Allocator, max_size: usize) ![]u8 { var buf = Buffer.initNull(allocator); defer buf.deinit(); @@ -123,7 +123,7 @@ pub fn InStream(comptime ReadError: type) type { /// Does not include the delimiter in the result. /// If `buffer.len()` would exceed `max_size`, `error.StreamTooLong` is returned and the contents /// read from the stream so far are lost. - pub fn readUntilDelimiterBuffer(self: &Self, buffer: &Buffer, delimiter: u8, max_size: usize) !void { + pub fn readUntilDelimiterBuffer(self: *Self, buffer: *Buffer, delimiter: u8, max_size: usize) !void { try buffer.resize(0); while (true) { @@ -145,7 +145,7 @@ pub fn InStream(comptime ReadError: type) type { /// memory would be greater than `max_size`, returns `error.StreamTooLong`. /// Caller owns returned memory. /// If this function returns an error, the contents from the stream read so far are lost. - pub fn readUntilDelimiterAlloc(self: &Self, allocator: &mem.Allocator, delimiter: u8, max_size: usize) ![]u8 { + pub fn readUntilDelimiterAlloc(self: *Self, allocator: *mem.Allocator, delimiter: u8, max_size: usize) ![]u8 { var buf = Buffer.initNull(allocator); defer buf.deinit(); @@ -156,43 +156,43 @@ pub fn InStream(comptime ReadError: type) type { /// Returns the number of bytes read. If the number read is smaller than buf.len, it /// means the stream reached the end. Reaching the end of a stream is not an error /// condition. - pub fn read(self: &Self, buffer: []u8) !usize { + pub fn read(self: *Self, buffer: []u8) !usize { return self.readFn(self, buffer); } /// Same as `read` but end of stream returns `error.EndOfStream`. - pub fn readNoEof(self: &Self, buf: []u8) !void { + pub fn readNoEof(self: *Self, buf: []u8) !void { const amt_read = try self.read(buf); if (amt_read < buf.len) return error.EndOfStream; } /// Reads 1 byte from the stream or returns `error.EndOfStream`. - pub fn readByte(self: &Self) !u8 { + pub fn readByte(self: *Self) !u8 { var result: [1]u8 = undefined; try self.readNoEof(result[0..]); return result[0]; } /// Same as `readByte` except the returned byte is signed. - pub fn readByteSigned(self: &Self) !i8 { + pub fn readByteSigned(self: *Self) !i8 { return @bitCast(i8, try self.readByte()); } - pub fn readIntLe(self: &Self, comptime T: type) !T { + pub fn readIntLe(self: *Self, comptime T: type) !T { return self.readInt(builtin.Endian.Little, T); } - pub fn readIntBe(self: &Self, comptime T: type) !T { + pub fn readIntBe(self: *Self, comptime T: type) !T { return self.readInt(builtin.Endian.Big, T); } - pub fn readInt(self: &Self, endian: builtin.Endian, comptime T: type) !T { + pub fn readInt(self: *Self, endian: builtin.Endian, comptime T: type) !T { var bytes: [@sizeOf(T)]u8 = undefined; try self.readNoEof(bytes[0..]); return mem.readInt(bytes, T, endian); } - pub fn readVarInt(self: &Self, endian: builtin.Endian, comptime T: type, size: usize) !T { + pub fn readVarInt(self: *Self, endian: builtin.Endian, comptime T: type, size: usize) !T { assert(size <= @sizeOf(T)); assert(size <= 8); var input_buf: [8]u8 = undefined; @@ -208,22 +208,22 @@ pub fn OutStream(comptime WriteError: type) type { const Self = this; pub const Error = WriteError; - writeFn: fn (self: &Self, bytes: []const u8) Error!void, + writeFn: fn (self: *Self, bytes: []const u8) Error!void, - pub fn print(self: &Self, comptime format: []const u8, args: ...) !void { + pub fn print(self: *Self, comptime format: []const u8, args: ...) !void { return std.fmt.format(self, Error, self.writeFn, format, args); } - pub fn write(self: &Self, bytes: []const u8) !void { + pub fn write(self: *Self, bytes: []const u8) !void { return self.writeFn(self, bytes); } - pub fn writeByte(self: &Self, byte: u8) !void { + pub fn writeByte(self: *Self, byte: u8) !void { const slice = (&byte)[0..1]; return self.writeFn(self, slice); } - pub fn writeByteNTimes(self: &Self, byte: u8, n: usize) !void { + pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void { const slice = (&byte)[0..1]; var i: usize = 0; while (i < n) : (i += 1) { @@ -234,14 +234,14 @@ pub fn OutStream(comptime WriteError: type) type { } /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. -pub fn writeFile(allocator: &mem.Allocator, path: []const u8, data: []const u8) !void { +pub fn writeFile(allocator: *mem.Allocator, path: []const u8, data: []const u8) !void { var file = try File.openWrite(allocator, path); defer file.close(); try file.write(data); } /// On success, caller owns returned buffer. -pub fn readFileAlloc(allocator: &mem.Allocator, path: []const u8) ![]u8 { +pub fn readFileAlloc(allocator: *mem.Allocator, path: []const u8) ![]u8 { var file = try File.openRead(allocator, path); defer file.close(); @@ -265,13 +265,13 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type) pub stream: Stream, - unbuffered_in_stream: &Stream, + unbuffered_in_stream: *Stream, buffer: [buffer_size]u8, start_index: usize, end_index: usize, - pub fn init(unbuffered_in_stream: &Stream) Self { + pub fn init(unbuffered_in_stream: *Stream) Self { return Self{ .unbuffered_in_stream = unbuffered_in_stream, .buffer = undefined, @@ -287,7 +287,7 @@ pub fn BufferedInStreamCustom(comptime buffer_size: usize, comptime Error: type) }; } - fn readFn(in_stream: &Stream, dest: []u8) !usize { + fn readFn(in_stream: *Stream, dest: []u8) !usize { const self = @fieldParentPtr(Self, "stream", in_stream); var dest_index: usize = 0; @@ -338,12 +338,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr pub stream: Stream, - unbuffered_out_stream: &Stream, + unbuffered_out_stream: *Stream, buffer: [buffer_size]u8, index: usize, - pub fn init(unbuffered_out_stream: &Stream) Self { + pub fn init(unbuffered_out_stream: *Stream) Self { return Self{ .unbuffered_out_stream = unbuffered_out_stream, .buffer = undefined, @@ -352,12 +352,12 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr }; } - pub fn flush(self: &Self) !void { + pub fn flush(self: *Self) !void { try self.unbuffered_out_stream.write(self.buffer[0..self.index]); self.index = 0; } - fn writeFn(out_stream: &Stream, bytes: []const u8) !void { + fn writeFn(out_stream: *Stream, bytes: []const u8) !void { const self = @fieldParentPtr(Self, "stream", out_stream); if (bytes.len >= self.buffer.len) { @@ -383,20 +383,20 @@ pub fn BufferedOutStreamCustom(comptime buffer_size: usize, comptime OutStreamEr /// Implementation of OutStream trait for Buffer pub const BufferOutStream = struct { - buffer: &Buffer, + buffer: *Buffer, stream: Stream, pub const Error = error{OutOfMemory}; pub const Stream = OutStream(Error); - pub fn init(buffer: &Buffer) BufferOutStream { + pub fn init(buffer: *Buffer) BufferOutStream { return BufferOutStream{ .buffer = buffer, .stream = Stream{ .writeFn = writeFn }, }; } - fn writeFn(out_stream: &Stream, bytes: []const u8) !void { + fn writeFn(out_stream: *Stream, bytes: []const u8) !void { const self = @fieldParentPtr(BufferOutStream, "stream", out_stream); return self.buffer.append(bytes); } @@ -407,7 +407,7 @@ pub const BufferedAtomicFile = struct { file_stream: FileOutStream, buffered_stream: BufferedOutStream(FileOutStream.Error), - pub fn create(allocator: &mem.Allocator, dest_path: []const u8) !&BufferedAtomicFile { + pub fn create(allocator: *mem.Allocator, dest_path: []const u8) !*BufferedAtomicFile { // TODO with well defined copy elision we don't need this allocation var self = try allocator.create(BufferedAtomicFile); errdefer allocator.destroy(self); @@ -427,18 +427,18 @@ pub const BufferedAtomicFile = struct { } /// always call destroy, even after successful finish() - pub fn destroy(self: &BufferedAtomicFile) void { + pub fn destroy(self: *BufferedAtomicFile) void { const allocator = self.atomic_file.allocator; self.atomic_file.deinit(); allocator.destroy(self); } - pub fn finish(self: &BufferedAtomicFile) !void { + pub fn finish(self: *BufferedAtomicFile) !void { try self.buffered_stream.flush(); try self.atomic_file.finish(); } - pub fn stream(self: &BufferedAtomicFile) &OutStream(FileOutStream.Error) { + pub fn stream(self: *BufferedAtomicFile) *OutStream(FileOutStream.Error) { return &self.buffered_stream.stream; } }; diff --git a/std/json.zig b/std/json.zig index 9de8f0b53e..c8aef7688b 100644 --- a/std/json.zig +++ b/std/json.zig @@ -76,7 +76,7 @@ pub const Token = struct { } // Slice into the underlying input string. - pub fn slice(self: &const Token, input: []const u8, i: usize) []const u8 { + pub fn slice(self: *const Token, input: []const u8, i: usize) []const u8 { return input[i + self.offset - self.count .. i + self.offset]; } }; @@ -115,7 +115,7 @@ pub const StreamingJsonParser = struct { return p; } - pub fn reset(p: &StreamingJsonParser) void { + pub fn reset(p: *StreamingJsonParser) void { p.state = State.TopLevelBegin; p.count = 0; // Set before ever read in main transition function @@ -205,7 +205,7 @@ pub const StreamingJsonParser = struct { // tokens. token2 is always null if token1 is null. // // There is currently no error recovery on a bad stream. - pub fn feed(p: &StreamingJsonParser, c: u8, token1: &?Token, token2: &?Token) Error!void { + pub fn feed(p: *StreamingJsonParser, c: u8, token1: *?Token, token2: *?Token) Error!void { token1.* = null; token2.* = null; p.count += 1; @@ -217,7 +217,7 @@ pub const StreamingJsonParser = struct { } // Perform a single transition on the state machine and return any possible token. - fn transition(p: &StreamingJsonParser, c: u8, token: &?Token) Error!bool { + fn transition(p: *StreamingJsonParser, c: u8, token: *?Token) Error!bool { switch (p.state) { State.TopLevelBegin => switch (c) { '{' => { @@ -861,7 +861,7 @@ pub fn validate(s: []const u8) bool { var token1: ?Token = undefined; var token2: ?Token = undefined; - p.feed(c, &token1, &token2) catch |err| { + p.feed(c, *token1, *token2) catch |err| { return false; }; } @@ -878,7 +878,7 @@ pub const ValueTree = struct { arena: ArenaAllocator, root: Value, - pub fn deinit(self: &ValueTree) void { + pub fn deinit(self: *ValueTree) void { self.arena.deinit(); } }; @@ -894,7 +894,7 @@ pub const Value = union(enum) { Array: ArrayList(Value), Object: ObjectMap, - pub fn dump(self: &const Value) void { + pub fn dump(self: *const Value) void { switch (self.*) { Value.Null => { std.debug.warn("null"); @@ -941,7 +941,7 @@ pub const Value = union(enum) { } } - pub fn dumpIndent(self: &const Value, indent: usize) void { + pub fn dumpIndent(self: *const Value, indent: usize) void { if (indent == 0) { self.dump(); } else { @@ -949,7 +949,7 @@ pub const Value = union(enum) { } } - fn dumpIndentLevel(self: &const Value, indent: usize, level: usize) void { + fn dumpIndentLevel(self: *const Value, indent: usize, level: usize) void { switch (self.*) { Value.Null => { std.debug.warn("null"); @@ -1013,7 +1013,7 @@ pub const Value = union(enum) { // A non-stream JSON parser which constructs a tree of Value's. pub const JsonParser = struct { - allocator: &Allocator, + allocator: *Allocator, state: State, copy_strings: bool, // Stores parent nodes and un-combined Values. @@ -1026,7 +1026,7 @@ pub const JsonParser = struct { Simple, }; - pub fn init(allocator: &Allocator, copy_strings: bool) JsonParser { + pub fn init(allocator: *Allocator, copy_strings: bool) JsonParser { return JsonParser{ .allocator = allocator, .state = State.Simple, @@ -1035,16 +1035,16 @@ pub const JsonParser = struct { }; } - pub fn deinit(p: &JsonParser) void { + pub fn deinit(p: *JsonParser) void { p.stack.deinit(); } - pub fn reset(p: &JsonParser) void { + pub fn reset(p: *JsonParser) void { p.state = State.Simple; p.stack.shrink(0); } - pub fn parse(p: &JsonParser, input: []const u8) !ValueTree { + pub fn parse(p: *JsonParser, input: []const u8) !ValueTree { var mp = StreamingJsonParser.init(); var arena = ArenaAllocator.init(p.allocator); @@ -1090,7 +1090,7 @@ pub const JsonParser = struct { // Even though p.allocator exists, we take an explicit allocator so that allocation state // can be cleaned up on error correctly during a `parse` on call. - fn transition(p: &JsonParser, allocator: &Allocator, input: []const u8, i: usize, token: &const Token) !void { + fn transition(p: *JsonParser, allocator: *Allocator, input: []const u8, i: usize, token: *const Token) !void { switch (p.state) { State.ObjectKey => switch (token.id) { Token.Id.ObjectEnd => { @@ -1223,7 +1223,7 @@ pub const JsonParser = struct { } } - fn pushToParent(p: &JsonParser, value: &const Value) !void { + fn pushToParent(p: *JsonParser, value: *const Value) !void { switch (p.stack.at(p.stack.len - 1)) { // Object Parent -> [ ..., object, , value ] Value.String => |key| { @@ -1244,14 +1244,14 @@ pub const JsonParser = struct { } } - fn parseString(p: &JsonParser, allocator: &Allocator, token: &const Token, input: []const u8, i: usize) !Value { + fn parseString(p: *JsonParser, allocator: *Allocator, token: *const Token, input: []const u8, i: usize) !Value { // TODO: We don't strictly have to copy values which do not contain any escape // characters if flagged with the option. const slice = token.slice(input, i); return Value{ .String = try mem.dupe(p.allocator, u8, slice) }; } - fn parseNumber(p: &JsonParser, token: &const Token, input: []const u8, i: usize) !Value { + fn parseNumber(p: *JsonParser, token: *const Token, input: []const u8, i: usize) !Value { return if (token.number_is_integer) Value{ .Integer = try std.fmt.parseInt(i64, token.slice(input, i), 10) } else diff --git a/std/linked_list.zig b/std/linked_list.zig index c6be08171e..fbc0a0c42a 100644 --- a/std/linked_list.zig +++ b/std/linked_list.zig @@ -21,11 +21,11 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Node inside the linked list wrapping the actual data. pub const Node = struct { - prev: ?&Node, - next: ?&Node, + prev: ?*Node, + next: ?*Node, data: T, - pub fn init(value: &const T) Node { + pub fn init(value: *const T) Node { return Node{ .prev = null, .next = null, @@ -38,14 +38,14 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na return Node.init({}); } - pub fn toData(node: &Node) &ParentType { + pub fn toData(node: *Node) *ParentType { comptime assert(isIntrusive()); return @fieldParentPtr(ParentType, field_name, node); } }; - first: ?&Node, - last: ?&Node, + first: ?*Node, + last: ?*Node, len: usize, /// Initialize a linked list. @@ -69,7 +69,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Arguments: /// node: Pointer to a node in the list. /// new_node: Pointer to the new node to insert. - pub fn insertAfter(list: &Self, node: &Node, new_node: &Node) void { + pub fn insertAfter(list: *Self, node: *Node, new_node: *Node) void { new_node.prev = node; if (node.next) |next_node| { // Intermediate node. @@ -90,7 +90,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Arguments: /// node: Pointer to a node in the list. /// new_node: Pointer to the new node to insert. - pub fn insertBefore(list: &Self, node: &Node, new_node: &Node) void { + pub fn insertBefore(list: *Self, node: *Node, new_node: *Node) void { new_node.next = node; if (node.prev) |prev_node| { // Intermediate node. @@ -110,7 +110,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Arguments: /// new_node: Pointer to the new node to insert. - pub fn append(list: &Self, new_node: &Node) void { + pub fn append(list: *Self, new_node: *Node) void { if (list.last) |last| { // Insert after last. list.insertAfter(last, new_node); @@ -124,7 +124,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Arguments: /// new_node: Pointer to the new node to insert. - pub fn prepend(list: &Self, new_node: &Node) void { + pub fn prepend(list: *Self, new_node: *Node) void { if (list.first) |first| { // Insert before first. list.insertBefore(first, new_node); @@ -143,7 +143,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Arguments: /// node: Pointer to the node to be removed. - pub fn remove(list: &Self, node: &Node) void { + pub fn remove(list: *Self, node: *Node) void { if (node.prev) |prev_node| { // Intermediate node. prev_node.next = node.next; @@ -168,7 +168,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Returns: /// A pointer to the last node in the list. - pub fn pop(list: &Self) ?&Node { + pub fn pop(list: *Self) ?*Node { const last = list.last ?? return null; list.remove(last); return last; @@ -178,7 +178,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Returns: /// A pointer to the first node in the list. - pub fn popFirst(list: &Self) ?&Node { + pub fn popFirst(list: *Self) ?*Node { const first = list.first ?? return null; list.remove(first); return first; @@ -191,7 +191,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Returns: /// A pointer to the new node. - pub fn allocateNode(list: &Self, allocator: &Allocator) !&Node { + pub fn allocateNode(list: *Self, allocator: *Allocator) !*Node { comptime assert(!isIntrusive()); return allocator.create(Node); } @@ -201,7 +201,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Arguments: /// node: Pointer to the node to deallocate. /// allocator: Dynamic memory allocator. - pub fn destroyNode(list: &Self, node: &Node, allocator: &Allocator) void { + pub fn destroyNode(list: *Self, node: *Node, allocator: *Allocator) void { comptime assert(!isIntrusive()); allocator.destroy(node); } @@ -214,7 +214,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// /// Returns: /// A pointer to the new node. - pub fn createNode(list: &Self, data: &const T, allocator: &Allocator) !&Node { + pub fn createNode(list: *Self, data: *const T, allocator: *Allocator) !*Node { comptime assert(!isIntrusive()); var node = try list.allocateNode(allocator); node.* = Node.init(data); diff --git a/std/macho.zig b/std/macho.zig index 615569e4b4..e71ac76b1a 100644 --- a/std/macho.zig +++ b/std/macho.zig @@ -42,13 +42,13 @@ pub const Symbol = struct { name: []const u8, address: u64, - fn addressLessThan(lhs: &const Symbol, rhs: &const Symbol) bool { + fn addressLessThan(lhs: *const Symbol, rhs: *const Symbol) bool { return lhs.address < rhs.address; } }; pub const SymbolTable = struct { - allocator: &mem.Allocator, + allocator: *mem.Allocator, symbols: []const Symbol, strings: []const u8, @@ -56,7 +56,7 @@ pub const SymbolTable = struct { // Ideally we'd use _mh_execute_header because it's always at 0x100000000 // in the image but as it's located in a different section than executable // code, its displacement is different. - pub fn deinit(self: &SymbolTable) void { + pub fn deinit(self: *SymbolTable) void { self.allocator.free(self.symbols); self.symbols = []const Symbol{}; @@ -64,7 +64,7 @@ pub const SymbolTable = struct { self.strings = []const u8{}; } - pub fn search(self: &const SymbolTable, address: usize) ?&const Symbol { + pub fn search(self: *const SymbolTable, address: usize) ?*const Symbol { var min: usize = 0; var max: usize = self.symbols.len - 1; // Exclude sentinel. while (min < max) { @@ -83,7 +83,7 @@ pub const SymbolTable = struct { } }; -pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable { +pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable { var file = in.file; try file.seekTo(0); @@ -160,13 +160,13 @@ pub fn loadSymbols(allocator: &mem.Allocator, in: &io.FileInStream) !SymbolTable }; } -fn readNoEof(in: &io.FileInStream, comptime T: type, result: []T) !void { +fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void { return in.stream.readNoEof(([]u8)(result)); } -fn readOneNoEof(in: &io.FileInStream, comptime T: type, result: &T) !void { +fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void { return readNoEof(in, T, result[0..1]); } -fn isSymbol(sym: &const Nlist64) bool { +fn isSymbol(sym: *const Nlist64) bool { return sym.n_value != 0 and sym.n_desc == 0; } diff --git a/std/math/complex/atan.zig b/std/math/complex/atan.zig index b7bbf930eb..9bfe5fe724 100644 --- a/std/math/complex/atan.zig +++ b/std/math/complex/atan.zig @@ -29,7 +29,7 @@ fn redupif32(x: f32) f32 { return ((x - u * DP1) - u * DP2) - t * DP3; } -fn atan32(z: &const Complex(f32)) Complex(f32) { +fn atan32(z: *const Complex(f32)) Complex(f32) { const maxnum = 1.0e38; const x = z.re; @@ -78,7 +78,7 @@ fn redupif64(x: f64) f64 { return ((x - u * DP1) - u * DP2) - t * DP3; } -fn atan64(z: &const Complex(f64)) Complex(f64) { +fn atan64(z: *const Complex(f64)) Complex(f64) { const maxnum = 1.0e308; const x = z.re; diff --git a/std/math/complex/cosh.zig b/std/math/complex/cosh.zig index 96eac68556..c2f9a47b8d 100644 --- a/std/math/complex/cosh.zig +++ b/std/math/complex/cosh.zig @@ -15,7 +15,7 @@ pub fn cosh(z: var) Complex(@typeOf(z.re)) { }; } -fn cosh32(z: &const Complex(f32)) Complex(f32) { +fn cosh32(z: *const Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; @@ -78,7 +78,7 @@ fn cosh32(z: &const Complex(f32)) Complex(f32) { return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y)); } -fn cosh64(z: &const Complex(f64)) Complex(f64) { +fn cosh64(z: *const Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; diff --git a/std/math/complex/exp.zig b/std/math/complex/exp.zig index 8fe069a43d..44c354f246 100644 --- a/std/math/complex/exp.zig +++ b/std/math/complex/exp.zig @@ -16,7 +16,7 @@ pub fn exp(z: var) Complex(@typeOf(z.re)) { }; } -fn exp32(z: &const Complex(f32)) Complex(f32) { +fn exp32(z: *const Complex(f32)) Complex(f32) { @setFloatMode(this, @import("builtin").FloatMode.Strict); const exp_overflow = 0x42b17218; // max_exp * ln2 ~= 88.72283955 @@ -63,7 +63,7 @@ fn exp32(z: &const Complex(f32)) Complex(f32) { } } -fn exp64(z: &const Complex(f64)) Complex(f64) { +fn exp64(z: *const Complex(f64)) Complex(f64) { const exp_overflow = 0x40862e42; // high bits of max_exp * ln2 ~= 710 const cexp_overflow = 0x4096b8e4; // (max_exp - min_denorm_exp) * ln2 diff --git a/std/math/complex/index.zig b/std/math/complex/index.zig index 5902ffaa19..b00296beda 100644 --- a/std/math/complex/index.zig +++ b/std/math/complex/index.zig @@ -37,28 +37,28 @@ pub fn Complex(comptime T: type) type { }; } - pub fn add(self: &const Self, other: &const Self) Self { + pub fn add(self: *const Self, other: *const Self) Self { return Self{ .re = self.re + other.re, .im = self.im + other.im, }; } - pub fn sub(self: &const Self, other: &const Self) Self { + pub fn sub(self: *const Self, other: *const Self) Self { return Self{ .re = self.re - other.re, .im = self.im - other.im, }; } - pub fn mul(self: &const Self, other: &const Self) Self { + pub fn mul(self: *const Self, other: *const Self) Self { return Self{ .re = self.re * other.re - self.im * other.im, .im = self.im * other.re + self.re * other.im, }; } - pub fn div(self: &const Self, other: &const Self) Self { + pub fn div(self: *const Self, other: *const Self) Self { const re_num = self.re * other.re + self.im * other.im; const im_num = self.im * other.re - self.re * other.im; const den = other.re * other.re + other.im * other.im; @@ -69,14 +69,14 @@ pub fn Complex(comptime T: type) type { }; } - pub fn conjugate(self: &const Self) Self { + pub fn conjugate(self: *const Self) Self { return Self{ .re = self.re, .im = -self.im, }; } - pub fn reciprocal(self: &const Self) Self { + pub fn reciprocal(self: *const Self) Self { const m = self.re * self.re + self.im * self.im; return Self{ .re = self.re / m, @@ -84,7 +84,7 @@ pub fn Complex(comptime T: type) type { }; } - pub fn magnitude(self: &const Self) T { + pub fn magnitude(self: *const Self) T { return math.sqrt(self.re * self.re + self.im * self.im); } }; diff --git a/std/math/complex/ldexp.zig b/std/math/complex/ldexp.zig index 7ebefff40c..a56c2ef2eb 100644 --- a/std/math/complex/ldexp.zig +++ b/std/math/complex/ldexp.zig @@ -14,7 +14,7 @@ pub fn ldexp_cexp(z: var, expt: i32) Complex(@typeOf(z.re)) { }; } -fn frexp_exp32(x: f32, expt: &i32) f32 { +fn frexp_exp32(x: f32, expt: *i32) f32 { const k = 235; // reduction constant const kln2 = 162.88958740; // k * ln2 @@ -24,7 +24,7 @@ fn frexp_exp32(x: f32, expt: &i32) f32 { return @bitCast(f32, (hx & 0x7fffff) | ((0x7f + 127) << 23)); } -fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) { +fn ldexp_cexp32(z: *const Complex(f32), expt: i32) Complex(f32) { var ex_expt: i32 = undefined; const exp_x = frexp_exp32(z.re, &ex_expt); const exptf = expt + ex_expt; @@ -38,7 +38,7 @@ fn ldexp_cexp32(z: &const Complex(f32), expt: i32) Complex(f32) { return Complex(f32).new(math.cos(z.im) * exp_x * scale1 * scale2, math.sin(z.im) * exp_x * scale1 * scale2); } -fn frexp_exp64(x: f64, expt: &i32) f64 { +fn frexp_exp64(x: f64, expt: *i32) f64 { const k = 1799; // reduction constant const kln2 = 1246.97177782734161156; // k * ln2 @@ -54,7 +54,7 @@ fn frexp_exp64(x: f64, expt: &i32) f64 { return @bitCast(f64, (u64(high_word) << 32) | lx); } -fn ldexp_cexp64(z: &const Complex(f64), expt: i32) Complex(f64) { +fn ldexp_cexp64(z: *const Complex(f64), expt: i32) Complex(f64) { var ex_expt: i32 = undefined; const exp_x = frexp_exp64(z.re, &ex_expt); const exptf = i64(expt + ex_expt); diff --git a/std/math/complex/pow.zig b/std/math/complex/pow.zig index bef9fde542..4c2cd9cf34 100644 --- a/std/math/complex/pow.zig +++ b/std/math/complex/pow.zig @@ -4,7 +4,7 @@ const math = std.math; const cmath = math.complex; const Complex = cmath.Complex; -pub fn pow(comptime T: type, z: &const T, c: &const T) T { +pub fn pow(comptime T: type, z: *const T, c: *const T) T { const p = cmath.log(z); const q = c.mul(p); return cmath.exp(q); diff --git a/std/math/complex/sinh.zig b/std/math/complex/sinh.zig index 09a62ca058..3d196bfd50 100644 --- a/std/math/complex/sinh.zig +++ b/std/math/complex/sinh.zig @@ -15,7 +15,7 @@ pub fn sinh(z: var) Complex(@typeOf(z.re)) { }; } -fn sinh32(z: &const Complex(f32)) Complex(f32) { +fn sinh32(z: *const Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; @@ -78,7 +78,7 @@ fn sinh32(z: &const Complex(f32)) Complex(f32) { return Complex(f32).new((x * x) * (y - y), (x + x) * (y - y)); } -fn sinh64(z: &const Complex(f64)) Complex(f64) { +fn sinh64(z: *const Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; diff --git a/std/math/complex/sqrt.zig b/std/math/complex/sqrt.zig index afda69f7c9..d4f5a67528 100644 --- a/std/math/complex/sqrt.zig +++ b/std/math/complex/sqrt.zig @@ -15,7 +15,7 @@ pub fn sqrt(z: var) Complex(@typeOf(z.re)) { }; } -fn sqrt32(z: &const Complex(f32)) Complex(f32) { +fn sqrt32(z: *const Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; @@ -57,7 +57,7 @@ fn sqrt32(z: &const Complex(f32)) Complex(f32) { } } -fn sqrt64(z: &const Complex(f64)) Complex(f64) { +fn sqrt64(z: *const Complex(f64)) Complex(f64) { // may encounter overflow for im,re >= DBL_MAX / (1 + sqrt(2)) const threshold = 0x1.a827999fcef32p+1022; diff --git a/std/math/complex/tanh.zig b/std/math/complex/tanh.zig index 34250b1b4a..1d754838a3 100644 --- a/std/math/complex/tanh.zig +++ b/std/math/complex/tanh.zig @@ -13,7 +13,7 @@ pub fn tanh(z: var) Complex(@typeOf(z.re)) { }; } -fn tanh32(z: &const Complex(f32)) Complex(f32) { +fn tanh32(z: *const Complex(f32)) Complex(f32) { const x = z.re; const y = z.im; @@ -51,7 +51,7 @@ fn tanh32(z: &const Complex(f32)) Complex(f32) { return Complex(f32).new((beta * rho * s) / den, t / den); } -fn tanh64(z: &const Complex(f64)) Complex(f64) { +fn tanh64(z: *const Complex(f64)) Complex(f64) { const x = z.re; const y = z.im; diff --git a/std/math/hypot.zig b/std/math/hypot.zig index fe0de3a1ea..494df22ba6 100644 --- a/std/math/hypot.zig +++ b/std/math/hypot.zig @@ -52,7 +52,7 @@ fn hypot32(x: f32, y: f32) f32 { return z * math.sqrt(f32(f64(x) * x + f64(y) * y)); } -fn sq(hi: &f64, lo: &f64, x: f64) void { +fn sq(hi: *f64, lo: *f64, x: f64) void { const split: f64 = 0x1.0p27 + 1.0; const xc = x * split; const xh = x - xc + xc; diff --git a/std/math/index.zig b/std/math/index.zig index 847e972500..33bc1082f7 100644 --- a/std/math/index.zig +++ b/std/math/index.zig @@ -46,12 +46,12 @@ pub fn forceEval(value: var) void { switch (T) { f32 => { var x: f32 = undefined; - const p = @ptrCast(&volatile f32, &x); + const p = @ptrCast(*volatile f32, &x); p.* = x; }, f64 => { var x: f64 = undefined; - const p = @ptrCast(&volatile f64, &x); + const p = @ptrCast(*volatile f64, &x); p.* = x; }, else => { diff --git a/std/mem.zig b/std/mem.zig index f4696cff9f..aec24e8491 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -13,7 +13,7 @@ pub const Allocator = struct { /// The returned newly allocated memory is undefined. /// `alignment` is guaranteed to be >= 1 /// `alignment` is guaranteed to be a power of 2 - allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) Error![]u8, + allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8, /// If `new_byte_count > old_mem.len`: /// * `old_mem.len` is the same as what was returned from allocFn or reallocFn. @@ -26,22 +26,22 @@ pub const Allocator = struct { /// The returned newly allocated memory is undefined. /// `alignment` is guaranteed to be >= 1 /// `alignment` is guaranteed to be a power of 2 - reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8, + reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8, /// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn` - freeFn: fn (self: &Allocator, old_mem: []u8) void, + freeFn: fn (self: *Allocator, old_mem: []u8) void, - fn create(self: &Allocator, comptime T: type) !&T { - if (@sizeOf(T) == 0) return &{}; + fn create(self: *Allocator, comptime T: type) !*T { + if (@sizeOf(T) == 0) return *{}; const slice = try self.alloc(T, 1); return &slice[0]; } // TODO once #733 is solved, this will replace create - fn construct(self: &Allocator, init: var) t: { + fn construct(self: *Allocator, init: var) t: { // TODO this is a workaround for type getting parsed as Error!&const T const T = @typeOf(init).Child; - break :t Error!&T; + break :t Error!*T; } { const T = @typeOf(init).Child; if (@sizeOf(T) == 0) return &{}; @@ -51,17 +51,17 @@ pub const Allocator = struct { return ptr; } - fn destroy(self: &Allocator, ptr: var) void { + fn destroy(self: *Allocator, ptr: var) void { self.free(ptr[0..1]); } - fn alloc(self: &Allocator, comptime T: type, n: usize) ![]T { + fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T { return self.alignedAlloc(T, @alignOf(T), n); } - fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T { + fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T { if (n == 0) { - return (&align(alignment) T)(undefined)[0..0]; + return (*align(alignment) T)(undefined)[0..0]; } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; const byte_slice = try self.allocFn(self, byte_count, alignment); @@ -73,17 +73,17 @@ pub const Allocator = struct { return ([]align(alignment) T)(@alignCast(alignment, byte_slice)); } - fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) ![]T { + fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T { return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n); } - fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T { + fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T { if (old_mem.len == 0) { return self.alloc(T, n); } if (n == 0) { self.free(old_mem); - return (&align(alignment) T)(undefined)[0..0]; + return (*align(alignment) T)(undefined)[0..0]; } const old_byte_slice = ([]u8)(old_mem); @@ -102,11 +102,11 @@ pub const Allocator = struct { /// Reallocate, but `n` must be less than or equal to `old_mem.len`. /// Unlike `realloc`, this function cannot fail. /// Shrinking to 0 is the same as calling `free`. - fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) []T { + fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T { return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n); } - fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T { + fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T { if (n == 0) { self.free(old_mem); return old_mem[0..0]; @@ -123,10 +123,10 @@ pub const Allocator = struct { return ([]align(alignment) T)(@alignCast(alignment, byte_slice)); } - fn free(self: &Allocator, memory: var) void { + fn free(self: *Allocator, memory: var) void { const bytes = ([]const u8)(memory); if (bytes.len == 0) return; - const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr)); + const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr)); self.freeFn(self, non_const_ptr[0..bytes.len]); } }; @@ -186,7 +186,7 @@ pub fn allEqual(comptime T: type, slice: []const T, scalar: T) bool { } /// Copies ::m to newly allocated memory. Caller is responsible to free it. -pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) ![]T { +pub fn dupe(allocator: *Allocator, comptime T: type, m: []const T) ![]T { const new_buf = try allocator.alloc(T, m.len); copy(T, new_buf, m); return new_buf; @@ -457,7 +457,7 @@ pub const SplitIterator = struct { split_bytes: []const u8, index: usize, - pub fn next(self: &SplitIterator) ?[]const u8 { + pub fn next(self: *SplitIterator) ?[]const u8 { // move to beginning of token while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {} const start = self.index; @@ -473,14 +473,14 @@ pub const SplitIterator = struct { } /// Returns a slice of the remaining bytes. Does not affect iterator state. - pub fn rest(self: &const SplitIterator) []const u8 { + pub fn rest(self: *const SplitIterator) []const u8 { // move to beginning of token var index: usize = self.index; while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {} return self.buffer[index..]; } - fn isSplitByte(self: &const SplitIterator, byte: u8) bool { + fn isSplitByte(self: *const SplitIterator, byte: u8) bool { for (self.split_bytes) |split_byte| { if (byte == split_byte) { return true; @@ -492,7 +492,7 @@ pub const SplitIterator = struct { /// Naively combines a series of strings with a separator. /// Allocates memory for the result, which must be freed by the caller. -pub fn join(allocator: &Allocator, sep: u8, strings: ...) ![]u8 { +pub fn join(allocator: *Allocator, sep: u8, strings: ...) ![]u8 { comptime assert(strings.len >= 1); var total_strings_len: usize = strings.len; // 1 sep per string { @@ -649,7 +649,7 @@ test "mem.max" { assert(max(u8, "abcdefg") == 'g'); } -pub fn swap(comptime T: type, a: &T, b: &T) void { +pub fn swap(comptime T: type, a: *T, b: *T) void { const tmp = a.*; a.* = b.*; b.* = tmp; diff --git a/std/net.zig b/std/net.zig index 3af4e0b525..bfe4b1c2a0 100644 --- a/std/net.zig +++ b/std/net.zig @@ -31,7 +31,7 @@ pub const Address = struct { }; } - pub fn initIp6(ip6: &const Ip6Addr, port: u16) Address { + pub fn initIp6(ip6: *const Ip6Addr, port: u16) Address { return Address{ .family = posix.AF_INET6, .os_addr = posix.sockaddr{ @@ -46,15 +46,15 @@ pub const Address = struct { }; } - pub fn initPosix(addr: &const posix.sockaddr) Address { + pub fn initPosix(addr: *const posix.sockaddr) Address { return Address{ .os_addr = addr.* }; } - pub fn format(self: &const Address, out_stream: var) !void { + pub fn format(self: *const Address, out_stream: var) !void { switch (self.os_addr.in.family) { posix.AF_INET => { const native_endian_port = std.mem.endianSwapIfLe(u16, self.os_addr.in.port); - const bytes = ([]const u8)((&self.os_addr.in.addr)[0..1]); + const bytes = ([]const u8)((*self.os_addr.in.addr)[0..1]); try out_stream.print("{}.{}.{}.{}:{}", bytes[0], bytes[1], bytes[2], bytes[3], native_endian_port); }, posix.AF_INET6 => { diff --git a/std/os/child_process.zig b/std/os/child_process.zig index 51f1bd96e5..30a2fd1408 100644 --- a/std/os/child_process.zig +++ b/std/os/child_process.zig @@ -20,7 +20,7 @@ pub const ChildProcess = struct { pub handle: if (is_windows) windows.HANDLE else void, pub thread_handle: if (is_windows) windows.HANDLE else void, - pub allocator: &mem.Allocator, + pub allocator: *mem.Allocator, pub stdin: ?os.File, pub stdout: ?os.File, @@ -31,7 +31,7 @@ pub const ChildProcess = struct { pub argv: []const []const u8, /// Leave as null to use the current env map using the supplied allocator. - pub env_map: ?&const BufMap, + pub env_map: ?*const BufMap, pub stdin_behavior: StdIo, pub stdout_behavior: StdIo, @@ -47,7 +47,7 @@ pub const ChildProcess = struct { pub cwd: ?[]const u8, err_pipe: if (is_windows) void else [2]i32, - llnode: if (is_windows) void else LinkedList(&ChildProcess).Node, + llnode: if (is_windows) void else LinkedList(*ChildProcess).Node, pub const SpawnError = error{ ProcessFdQuotaExceeded, @@ -84,7 +84,7 @@ pub const ChildProcess = struct { /// First argument in argv is the executable. /// On success must call deinit. - pub fn init(argv: []const []const u8, allocator: &mem.Allocator) !&ChildProcess { + pub fn init(argv: []const []const u8, allocator: *mem.Allocator) !*ChildProcess { const child = try allocator.create(ChildProcess); errdefer allocator.destroy(child); @@ -114,14 +114,14 @@ pub const ChildProcess = struct { return child; } - pub fn setUserName(self: &ChildProcess, name: []const u8) !void { + pub fn setUserName(self: *ChildProcess, name: []const u8) !void { const user_info = try os.getUserInfo(name); self.uid = user_info.uid; self.gid = user_info.gid; } /// On success must call `kill` or `wait`. - pub fn spawn(self: &ChildProcess) !void { + pub fn spawn(self: *ChildProcess) !void { if (is_windows) { return self.spawnWindows(); } else { @@ -129,13 +129,13 @@ pub const ChildProcess = struct { } } - pub fn spawnAndWait(self: &ChildProcess) !Term { + pub fn spawnAndWait(self: *ChildProcess) !Term { try self.spawn(); return self.wait(); } /// Forcibly terminates child process and then cleans up all resources. - pub fn kill(self: &ChildProcess) !Term { + pub fn kill(self: *ChildProcess) !Term { if (is_windows) { return self.killWindows(1); } else { @@ -143,7 +143,7 @@ pub const ChildProcess = struct { } } - pub fn killWindows(self: &ChildProcess, exit_code: windows.UINT) !Term { + pub fn killWindows(self: *ChildProcess, exit_code: windows.UINT) !Term { if (self.term) |term| { self.cleanupStreams(); return term; @@ -159,7 +159,7 @@ pub const ChildProcess = struct { return ??self.term; } - pub fn killPosix(self: &ChildProcess) !Term { + pub fn killPosix(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; @@ -179,7 +179,7 @@ pub const ChildProcess = struct { } /// Blocks until child process terminates and then cleans up all resources. - pub fn wait(self: &ChildProcess) !Term { + pub fn wait(self: *ChildProcess) !Term { if (is_windows) { return self.waitWindows(); } else { @@ -195,7 +195,7 @@ pub const ChildProcess = struct { /// Spawns a child process, waits for it, collecting stdout and stderr, and then returns. /// If it succeeds, the caller owns result.stdout and result.stderr memory. - pub fn exec(allocator: &mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?&const BufMap, max_output_size: usize) !ExecResult { + pub fn exec(allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8, env_map: ?*const BufMap, max_output_size: usize) !ExecResult { const child = try ChildProcess.init(argv, allocator); defer child.deinit(); @@ -225,7 +225,7 @@ pub const ChildProcess = struct { }; } - fn waitWindows(self: &ChildProcess) !Term { + fn waitWindows(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; @@ -235,7 +235,7 @@ pub const ChildProcess = struct { return ??self.term; } - fn waitPosix(self: &ChildProcess) !Term { + fn waitPosix(self: *ChildProcess) !Term { if (self.term) |term| { self.cleanupStreams(); return term; @@ -245,16 +245,16 @@ pub const ChildProcess = struct { return ??self.term; } - pub fn deinit(self: &ChildProcess) void { + pub fn deinit(self: *ChildProcess) void { self.allocator.destroy(self); } - fn waitUnwrappedWindows(self: &ChildProcess) !void { + fn waitUnwrappedWindows(self: *ChildProcess) !void { const result = os.windowsWaitSingle(self.handle, windows.INFINITE); self.term = (SpawnError!Term)(x: { var exit_code: windows.DWORD = undefined; - if (windows.GetExitCodeProcess(self.handle, &exit_code) == 0) { + if (windows.GetExitCodeProcess(self.handle, *exit_code) == 0) { break :x Term{ .Unknown = 0 }; } else { break :x Term{ .Exited = @bitCast(i32, exit_code) }; @@ -267,7 +267,7 @@ pub const ChildProcess = struct { return result; } - fn waitUnwrapped(self: &ChildProcess) void { + fn waitUnwrapped(self: *ChildProcess) void { var status: i32 = undefined; while (true) { const err = posix.getErrno(posix.waitpid(self.pid, &status, 0)); @@ -283,11 +283,11 @@ pub const ChildProcess = struct { } } - fn handleWaitResult(self: &ChildProcess, status: i32) void { + fn handleWaitResult(self: *ChildProcess, status: i32) void { self.term = self.cleanupAfterWait(status); } - fn cleanupStreams(self: &ChildProcess) void { + fn cleanupStreams(self: *ChildProcess) void { if (self.stdin) |*stdin| { stdin.close(); self.stdin = null; @@ -302,7 +302,7 @@ pub const ChildProcess = struct { } } - fn cleanupAfterWait(self: &ChildProcess, status: i32) !Term { + fn cleanupAfterWait(self: *ChildProcess, status: i32) !Term { defer { os.close(self.err_pipe[0]); os.close(self.err_pipe[1]); @@ -335,7 +335,7 @@ pub const ChildProcess = struct { Term{ .Unknown = status }; } - fn spawnPosix(self: &ChildProcess) !void { + fn spawnPosix(self: *ChildProcess) !void { const stdin_pipe = if (self.stdin_behavior == StdIo.Pipe) try makePipe() else undefined; errdefer if (self.stdin_behavior == StdIo.Pipe) { destroyPipe(stdin_pipe); @@ -432,7 +432,7 @@ pub const ChildProcess = struct { self.pid = pid; self.err_pipe = err_pipe; - self.llnode = LinkedList(&ChildProcess).Node.init(self); + self.llnode = LinkedList(*ChildProcess).Node.init(self); self.term = null; if (self.stdin_behavior == StdIo.Pipe) { @@ -446,7 +446,7 @@ pub const ChildProcess = struct { } } - fn spawnWindows(self: &ChildProcess) !void { + fn spawnWindows(self: *ChildProcess) !void { const saAttr = windows.SECURITY_ATTRIBUTES{ .nLength = @sizeOf(windows.SECURITY_ATTRIBUTES), .bInheritHandle = windows.TRUE, @@ -639,8 +639,8 @@ pub const ChildProcess = struct { } }; -fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ?&u8, lpStartupInfo: &windows.STARTUPINFOA, lpProcessInformation: &windows.PROCESS_INFORMATION) !void { - if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?&c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) { +fn windowsCreateProcess(app_name: *u8, cmd_line: *u8, envp_ptr: ?*u8, cwd_ptr: ?*u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void { + if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.FILE_NOT_FOUND, windows.ERROR.PATH_NOT_FOUND => error.FileNotFound, @@ -653,7 +653,7 @@ fn windowsCreateProcess(app_name: &u8, cmd_line: &u8, envp_ptr: ?&u8, cwd_ptr: ? /// Caller must dealloc. /// Guarantees a null byte at result[result.len]. -fn windowsCreateCommandLine(allocator: &mem.Allocator, argv: []const []const u8) ![]u8 { +fn windowsCreateCommandLine(allocator: *mem.Allocator, argv: []const []const u8) ![]u8 { var buf = try Buffer.initSize(allocator, 0); defer buf.deinit(); @@ -698,7 +698,7 @@ fn windowsDestroyPipe(rd: ?windows.HANDLE, wr: ?windows.HANDLE) void { // a namespace field lookup const SECURITY_ATTRIBUTES = windows.SECURITY_ATTRIBUTES; -fn windowsMakePipe(rd: &windows.HANDLE, wr: &windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void { +fn windowsMakePipe(rd: *windows.HANDLE, wr: *windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void { if (windows.CreatePipe(rd, wr, sattr, 0) == 0) { const err = windows.GetLastError(); return switch (err) { @@ -716,7 +716,7 @@ fn windowsSetHandleInfo(h: windows.HANDLE, mask: windows.DWORD, flags: windows.D } } -fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void { +fn windowsMakePipeIn(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void { var rd_h: windows.HANDLE = undefined; var wr_h: windows.HANDLE = undefined; try windowsMakePipe(&rd_h, &wr_h, sattr); @@ -726,7 +726,7 @@ fn windowsMakePipeIn(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const S wr.* = wr_h; } -fn windowsMakePipeOut(rd: &?windows.HANDLE, wr: &?windows.HANDLE, sattr: &const SECURITY_ATTRIBUTES) !void { +fn windowsMakePipeOut(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *const SECURITY_ATTRIBUTES) !void { var rd_h: windows.HANDLE = undefined; var wr_h: windows.HANDLE = undefined; try windowsMakePipe(&rd_h, &wr_h, sattr); @@ -748,7 +748,7 @@ fn makePipe() ![2]i32 { return fds; } -fn destroyPipe(pipe: &const [2]i32) void { +fn destroyPipe(pipe: *const [2]i32) void { os.close((pipe.*)[0]); os.close((pipe.*)[1]); } diff --git a/std/os/darwin.zig b/std/os/darwin.zig index a3fc230ac5..77e8b6bb6a 100644 --- a/std/os/darwin.zig +++ b/std/os/darwin.zig @@ -309,7 +309,7 @@ pub fn isatty(fd: i32) bool { return c.isatty(fd) != 0; } -pub fn fstat(fd: i32, buf: &c.Stat) usize { +pub fn fstat(fd: i32, buf: *c.Stat) usize { return errnoWrap(c.@"fstat$INODE64"(fd, buf)); } @@ -317,7 +317,7 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize { return errnoWrap(c.lseek(fd, offset, whence)); } -pub fn open(path: &const u8, flags: u32, mode: usize) usize { +pub fn open(path: *const u8, flags: u32, mode: usize) usize { return errnoWrap(c.open(path, @bitCast(c_int, flags), mode)); } @@ -325,79 +325,79 @@ pub fn raise(sig: i32) usize { return errnoWrap(c.raise(sig)); } -pub fn read(fd: i32, buf: &u8, nbyte: usize) usize { - return errnoWrap(c.read(fd, @ptrCast(&c_void, buf), nbyte)); +pub fn read(fd: i32, buf: *u8, nbyte: usize) usize { + return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); } -pub fn stat(noalias path: &const u8, noalias buf: &stat) usize { +pub fn stat(noalias path: *const u8, noalias buf: *stat) usize { return errnoWrap(c.stat(path, buf)); } -pub fn write(fd: i32, buf: &const u8, nbyte: usize) usize { - return errnoWrap(c.write(fd, @ptrCast(&const c_void, buf), nbyte)); +pub fn write(fd: i32, buf: *const u8, nbyte: usize) usize { + return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); } -pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { - const ptr_result = c.mmap(@ptrCast(&c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); +pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { + const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); const isize_result = @bitCast(isize, @ptrToInt(ptr_result)); return errnoWrap(isize_result); } pub fn munmap(address: usize, length: usize) usize { - return errnoWrap(c.munmap(@intToPtr(&c_void, address), length)); + return errnoWrap(c.munmap(@intToPtr(*c_void, address), length)); } -pub fn unlink(path: &const u8) usize { +pub fn unlink(path: *const u8) usize { return errnoWrap(c.unlink(path)); } -pub fn getcwd(buf: &u8, size: usize) usize { +pub fn getcwd(buf: *u8, size: usize) usize { return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } -pub fn waitpid(pid: i32, status: &i32, options: u32) usize { +pub fn waitpid(pid: i32, status: *i32, options: u32) usize { comptime assert(i32.bit_count == c_int.bit_count); - return errnoWrap(c.waitpid(pid, @ptrCast(&c_int, status), @bitCast(c_int, options))); + return errnoWrap(c.waitpid(pid, @ptrCast(*c_int, status), @bitCast(c_int, options))); } pub fn fork() usize { return errnoWrap(c.fork()); } -pub fn access(path: &const u8, mode: u32) usize { +pub fn access(path: *const u8, mode: u32) usize { return errnoWrap(c.access(path, mode)); } -pub fn pipe(fds: &[2]i32) usize { +pub fn pipe(fds: *[2]i32) usize { comptime assert(i32.bit_count == c_int.bit_count); - return errnoWrap(c.pipe(@ptrCast(&c_int, fds))); + return errnoWrap(c.pipe(@ptrCast(*c_int, fds))); } -pub fn getdirentries64(fd: i32, buf_ptr: &u8, buf_len: usize, basep: &i64) usize { +pub fn getdirentries64(fd: i32, buf_ptr: *u8, buf_len: usize, basep: *i64) usize { return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep))); } -pub fn mkdir(path: &const u8, mode: u32) usize { +pub fn mkdir(path: *const u8, mode: u32) usize { return errnoWrap(c.mkdir(path, mode)); } -pub fn symlink(existing: &const u8, new: &const u8) usize { +pub fn symlink(existing: *const u8, new: *const u8) usize { return errnoWrap(c.symlink(existing, new)); } -pub fn rename(old: &const u8, new: &const u8) usize { +pub fn rename(old: *const u8, new: *const u8) usize { return errnoWrap(c.rename(old, new)); } -pub fn rmdir(path: &const u8) usize { +pub fn rmdir(path: *const u8) usize { return errnoWrap(c.rmdir(path)); } -pub fn chdir(path: &const u8) usize { +pub fn chdir(path: *const u8) usize { return errnoWrap(c.chdir(path)); } -pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize { +pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize { return errnoWrap(c.execve(path, argv, envp)); } @@ -405,19 +405,19 @@ pub fn dup2(old: i32, new: i32) usize { return errnoWrap(c.dup2(old, new)); } -pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize { +pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize { return errnoWrap(c.readlink(path, buf_ptr, buf_len)); } -pub fn gettimeofday(tv: ?&timeval, tz: ?&timezone) usize { +pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize { return errnoWrap(c.gettimeofday(tv, tz)); } -pub fn nanosleep(req: &const timespec, rem: ?×pec) usize { +pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { return errnoWrap(c.nanosleep(req, rem)); } -pub fn realpath(noalias filename: &const u8, noalias resolved_name: &u8) usize { +pub fn realpath(noalias filename: *const u8, noalias resolved_name: *u8) usize { return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } @@ -429,11 +429,11 @@ pub fn setregid(rgid: u32, egid: u32) usize { return errnoWrap(c.setregid(rgid, egid)); } -pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize { +pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize { return errnoWrap(c.sigprocmask(@bitCast(c_int, flags), set, oldset)); } -pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize { +pub fn sigaction(sig: u5, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize { assert(sig != SIGKILL); assert(sig != SIGSTOP); var cact = c.Sigaction{ @@ -442,7 +442,7 @@ pub fn sigaction(sig: u5, noalias act: &const Sigaction, noalias oact: ?&Sigacti .sa_mask = act.mask, }; var coact: c.Sigaction = undefined; - const result = errnoWrap(c.sigaction(sig, &cact, &coact)); + const result = errnoWrap(c.sigaction(sig, *cact, *coact)); if (result != 0) { return result; } @@ -473,7 +473,7 @@ pub const Sigaction = struct { flags: u32, }; -pub fn sigaddset(set: &sigset_t, signo: u5) void { +pub fn sigaddset(set: *sigset_t, signo: u5) void { set.* |= u32(1) << (signo - 1); } diff --git a/std/os/file.zig b/std/os/file.zig index c07e2c5c8b..d943da30ca 100644 --- a/std/os/file.zig +++ b/std/os/file.zig @@ -19,7 +19,7 @@ pub const File = struct { /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// Call close to clean up. - pub fn openRead(allocator: &mem.Allocator, path: []const u8) OpenError!File { + pub fn openRead(allocator: *mem.Allocator, path: []const u8) OpenError!File { if (is_posix) { const flags = posix.O_LARGEFILE | posix.O_RDONLY; const fd = try os.posixOpen(allocator, path, flags, 0); @@ -40,7 +40,7 @@ pub const File = struct { } /// Calls `openWriteMode` with os.default_file_mode for the mode. - pub fn openWrite(allocator: &mem.Allocator, path: []const u8) OpenError!File { + pub fn openWrite(allocator: *mem.Allocator, path: []const u8) OpenError!File { return openWriteMode(allocator, path, os.default_file_mode); } @@ -48,7 +48,7 @@ pub const File = struct { /// If a file already exists in the destination it will be truncated. /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// Call close to clean up. - pub fn openWriteMode(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { + pub fn openWriteMode(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { if (is_posix) { const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_TRUNC; const fd = try os.posixOpen(allocator, path, flags, file_mode); @@ -72,7 +72,7 @@ pub const File = struct { /// If a file already exists in the destination this returns OpenError.PathAlreadyExists /// `path` needs to be copied in memory to add a null terminating byte, hence the allocator. /// Call close to clean up. - pub fn openWriteNoClobber(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { + pub fn openWriteNoClobber(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) OpenError!File { if (is_posix) { const flags = posix.O_LARGEFILE | posix.O_WRONLY | posix.O_CREAT | posix.O_CLOEXEC | posix.O_EXCL; const fd = try os.posixOpen(allocator, path, flags, file_mode); @@ -96,7 +96,7 @@ pub const File = struct { return File{ .handle = handle }; } - pub fn access(allocator: &mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool { + pub fn access(allocator: *mem.Allocator, path: []const u8, file_mode: os.FileMode) !bool { const path_with_null = try std.cstr.addNullByte(allocator, path); defer allocator.free(path_with_null); @@ -140,17 +140,17 @@ pub const File = struct { /// Upon success, the stream is in an uninitialized state. To continue using it, /// you must use the open() function. - pub fn close(self: &File) void { + pub fn close(self: *File) void { os.close(self.handle); self.handle = undefined; } /// Calls `os.isTty` on `self.handle`. - pub fn isTty(self: &File) bool { + pub fn isTty(self: *File) bool { return os.isTty(self.handle); } - pub fn seekForward(self: &File, amount: isize) !void { + pub fn seekForward(self: *File, amount: isize) !void { switch (builtin.os) { Os.linux, Os.macosx, Os.ios => { const result = posix.lseek(self.handle, amount, posix.SEEK_CUR); @@ -179,7 +179,7 @@ pub const File = struct { } } - pub fn seekTo(self: &File, pos: usize) !void { + pub fn seekTo(self: *File, pos: usize) !void { switch (builtin.os) { Os.linux, Os.macosx, Os.ios => { const ipos = try math.cast(isize, pos); @@ -210,7 +210,7 @@ pub const File = struct { } } - pub fn getPos(self: &File) !usize { + pub fn getPos(self: *File) !usize { switch (builtin.os) { Os.linux, Os.macosx, Os.ios => { const result = posix.lseek(self.handle, 0, posix.SEEK_CUR); @@ -229,7 +229,7 @@ pub const File = struct { }, Os.windows => { var pos: windows.LARGE_INTEGER = undefined; - if (windows.SetFilePointerEx(self.handle, 0, &pos, windows.FILE_CURRENT) == 0) { + if (windows.SetFilePointerEx(self.handle, 0, *pos, windows.FILE_CURRENT) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.INVALID_PARAMETER => error.BadFd, @@ -250,7 +250,7 @@ pub const File = struct { } } - pub fn getEndPos(self: &File) !usize { + pub fn getEndPos(self: *File) !usize { if (is_posix) { var stat: posix.Stat = undefined; const err = posix.getErrno(posix.fstat(self.handle, &stat)); @@ -285,7 +285,7 @@ pub const File = struct { Unexpected, }; - fn mode(self: &File) ModeError!os.FileMode { + fn mode(self: *File) ModeError!os.FileMode { if (is_posix) { var stat: posix.Stat = undefined; const err = posix.getErrno(posix.fstat(self.handle, &stat)); @@ -309,7 +309,7 @@ pub const File = struct { pub const ReadError = error{}; - pub fn read(self: &File, buffer: []u8) !usize { + pub fn read(self: *File, buffer: []u8) !usize { if (is_posix) { var index: usize = 0; while (index < buffer.len) { @@ -334,7 +334,7 @@ pub const File = struct { while (index < buffer.len) { const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index)); var amt_read: windows.DWORD = undefined; - if (windows.ReadFile(self.handle, @ptrCast(&c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) { + if (windows.ReadFile(self.handle, @ptrCast(*c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.OPERATION_ABORTED => continue, @@ -353,7 +353,7 @@ pub const File = struct { pub const WriteError = os.WindowsWriteError || os.PosixWriteError; - fn write(self: &File, bytes: []const u8) WriteError!void { + fn write(self: *File, bytes: []const u8) WriteError!void { if (is_posix) { try os.posixWrite(self.handle, bytes); } else if (is_windows) { diff --git a/std/os/get_user_id.zig b/std/os/get_user_id.zig index 2a15e1d495..c0c1b1cc4b 100644 --- a/std/os/get_user_id.zig +++ b/std/os/get_user_id.zig @@ -77,8 +77,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; - if (@mulWithOverflow(u32, uid, 10, &uid)) return error.CorruptPasswordFile; - if (@addWithOverflow(u32, uid, digit, &uid)) return error.CorruptPasswordFile; + if (@mulWithOverflow(u32, uid, 10, *uid)) return error.CorruptPasswordFile; + if (@addWithOverflow(u32, uid, digit, *uid)) return error.CorruptPasswordFile; }, }, State.ReadGroupId => switch (byte) { @@ -93,8 +93,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo { '0'...'9' => byte - '0', else => return error.CorruptPasswordFile, }; - if (@mulWithOverflow(u32, gid, 10, &gid)) return error.CorruptPasswordFile; - if (@addWithOverflow(u32, gid, digit, &gid)) return error.CorruptPasswordFile; + if (@mulWithOverflow(u32, gid, 10, *gid)) return error.CorruptPasswordFile; + if (@addWithOverflow(u32, gid, digit, *gid)) return error.CorruptPasswordFile; }, }, } diff --git a/std/os/index.zig b/std/os/index.zig index 70e654bcd9..ff638c670b 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -321,14 +321,14 @@ pub const PosixOpenError = error{ /// ::file_path needs to be copied in memory to add a null terminating byte. /// Calls POSIX open, keeps trying if it gets interrupted, and translates /// the return value into zig errors. -pub fn posixOpen(allocator: &Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 { +pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: usize) PosixOpenError!i32 { const path_with_null = try cstr.addNullByte(allocator, file_path); defer allocator.free(path_with_null); return posixOpenC(path_with_null.ptr, flags, perm); } -pub fn posixOpenC(file_path: &const u8, flags: u32, perm: usize) !i32 { +pub fn posixOpenC(file_path: *const u8, flags: u32, perm: usize) !i32 { while (true) { const result = posix.open(file_path, flags, perm); const err = posix.getErrno(result); @@ -374,10 +374,10 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void { } } -pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) ![]?&u8 { +pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?*u8 { const envp_count = env_map.count(); - const envp_buf = try allocator.alloc(?&u8, envp_count + 1); - mem.set(?&u8, envp_buf, null); + const envp_buf = try allocator.alloc(?*u8, envp_count + 1); + mem.set(?*u8, envp_buf, null); errdefer freeNullDelimitedEnvMap(allocator, envp_buf); { var it = env_map.iterator(); @@ -397,7 +397,7 @@ pub fn createNullDelimitedEnvMap(allocator: &Allocator, env_map: &const BufMap) return envp_buf; } -pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void { +pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void { for (envp_buf) |env| { const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break; allocator.free(env_buf); @@ -410,9 +410,9 @@ pub fn freeNullDelimitedEnvMap(allocator: &Allocator, envp_buf: []?&u8) void { /// pointers after the args and after the environment variables. /// `argv[0]` is the executable path. /// This function also uses the PATH environment variable to get the full path to the executable. -pub fn posixExecve(argv: []const []const u8, env_map: &const BufMap, allocator: &Allocator) !void { - const argv_buf = try allocator.alloc(?&u8, argv.len + 1); - mem.set(?&u8, argv_buf, null); +pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void { + const argv_buf = try allocator.alloc(?*u8, argv.len + 1); + mem.set(?*u8, argv_buf, null); defer { for (argv_buf) |arg| { const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break; @@ -494,10 +494,10 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError { } pub var linux_aux_raw = []usize{0} ** 38; -pub var posix_environ_raw: []&u8 = undefined; +pub var posix_environ_raw: []*u8 = undefined; /// Caller must free result when done. -pub fn getEnvMap(allocator: &Allocator) !BufMap { +pub fn getEnvMap(allocator: *Allocator) !BufMap { var result = BufMap.init(allocator); errdefer result.deinit(); @@ -557,7 +557,7 @@ pub fn getEnvPosix(key: []const u8) ?[]const u8 { } /// Caller must free returned memory. -pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 { +pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 { if (is_windows) { const key_with_null = try cstr.addNullByte(allocator, key); defer allocator.free(key_with_null); @@ -591,7 +591,7 @@ pub fn getEnvVarOwned(allocator: &mem.Allocator, key: []const u8) ![]u8 { } /// Caller must free the returned memory. -pub fn getCwd(allocator: &Allocator) ![]u8 { +pub fn getCwd(allocator: *Allocator) ![]u8 { switch (builtin.os) { Os.windows => { var buf = try allocator.alloc(u8, 256); @@ -640,7 +640,7 @@ test "os.getCwd" { pub const SymLinkError = PosixSymLinkError || WindowsSymLinkError; -pub fn symLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void { +pub fn symLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) SymLinkError!void { if (is_windows) { return symLinkWindows(allocator, existing_path, new_path); } else { @@ -653,7 +653,7 @@ pub const WindowsSymLinkError = error{ Unexpected, }; -pub fn symLinkWindows(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void { +pub fn symLinkWindows(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) WindowsSymLinkError!void { const existing_with_null = try cstr.addNullByte(allocator, existing_path); defer allocator.free(existing_with_null); const new_with_null = try cstr.addNullByte(allocator, new_path); @@ -683,7 +683,7 @@ pub const PosixSymLinkError = error{ Unexpected, }; -pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void { +pub fn symLinkPosix(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) PosixSymLinkError!void { const full_buf = try allocator.alloc(u8, existing_path.len + new_path.len + 2); defer allocator.free(full_buf); @@ -718,7 +718,7 @@ pub fn symLinkPosix(allocator: &Allocator, existing_path: []const u8, new_path: // here we replace the standard +/ with -_ so that it can be used in a file name const b64_fs_encoder = base64.Base64Encoder.init("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_", base64.standard_pad_char); -pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: []const u8) !void { +pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void { if (symLink(allocator, existing_path, new_path)) { return; } else |err| switch (err) { @@ -746,7 +746,7 @@ pub fn atomicSymLink(allocator: &Allocator, existing_path: []const u8, new_path: } } -pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void { +pub fn deleteFile(allocator: *Allocator, file_path: []const u8) !void { if (builtin.os == Os.windows) { return deleteFileWindows(allocator, file_path); } else { @@ -754,7 +754,7 @@ pub fn deleteFile(allocator: &Allocator, file_path: []const u8) !void { } } -pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void { +pub fn deleteFileWindows(allocator: *Allocator, file_path: []const u8) !void { const buf = try allocator.alloc(u8, file_path.len + 1); defer allocator.free(buf); @@ -772,7 +772,7 @@ pub fn deleteFileWindows(allocator: &Allocator, file_path: []const u8) !void { } } -pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void { +pub fn deleteFilePosix(allocator: *Allocator, file_path: []const u8) !void { const buf = try allocator.alloc(u8, file_path.len + 1); defer allocator.free(buf); @@ -803,7 +803,7 @@ pub fn deleteFilePosix(allocator: &Allocator, file_path: []const u8) !void { /// there is a possibility of power loss or application termination leaving temporary files present /// in the same directory as dest_path. /// Destination file will have the same mode as the source file. -pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []const u8) !void { +pub fn copyFile(allocator: *Allocator, source_path: []const u8, dest_path: []const u8) !void { var in_file = try os.File.openRead(allocator, source_path); defer in_file.close(); @@ -825,7 +825,7 @@ pub fn copyFile(allocator: &Allocator, source_path: []const u8, dest_path: []con /// Guaranteed to be atomic. However until https://patchwork.kernel.org/patch/9636735/ is /// merged and readily available, /// there is a possibility of power loss or application termination leaving temporary files present -pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void { +pub fn copyFileMode(allocator: *Allocator, source_path: []const u8, dest_path: []const u8, mode: FileMode) !void { var in_file = try os.File.openRead(allocator, source_path); defer in_file.close(); @@ -843,7 +843,7 @@ pub fn copyFileMode(allocator: &Allocator, source_path: []const u8, dest_path: [ } pub const AtomicFile = struct { - allocator: &Allocator, + allocator: *Allocator, file: os.File, tmp_path: []u8, dest_path: []const u8, @@ -851,7 +851,7 @@ pub const AtomicFile = struct { /// dest_path must remain valid for the lifetime of AtomicFile /// call finish to atomically replace dest_path with contents - pub fn init(allocator: &Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile { + pub fn init(allocator: *Allocator, dest_path: []const u8, mode: FileMode) !AtomicFile { const dirname = os.path.dirname(dest_path); var rand_buf: [12]u8 = undefined; @@ -888,7 +888,7 @@ pub const AtomicFile = struct { } /// always call deinit, even after successful finish() - pub fn deinit(self: &AtomicFile) void { + pub fn deinit(self: *AtomicFile) void { if (!self.finished) { self.file.close(); deleteFile(self.allocator, self.tmp_path) catch {}; @@ -897,7 +897,7 @@ pub const AtomicFile = struct { } } - pub fn finish(self: &AtomicFile) !void { + pub fn finish(self: *AtomicFile) !void { assert(!self.finished); self.file.close(); try rename(self.allocator, self.tmp_path, self.dest_path); @@ -906,7 +906,7 @@ pub const AtomicFile = struct { } }; -pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) !void { +pub fn rename(allocator: *Allocator, old_path: []const u8, new_path: []const u8) !void { const full_buf = try allocator.alloc(u8, old_path.len + new_path.len + 2); defer allocator.free(full_buf); @@ -951,7 +951,7 @@ pub fn rename(allocator: &Allocator, old_path: []const u8, new_path: []const u8) } } -pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void { +pub fn makeDir(allocator: *Allocator, dir_path: []const u8) !void { if (is_windows) { return makeDirWindows(allocator, dir_path); } else { @@ -959,7 +959,7 @@ pub fn makeDir(allocator: &Allocator, dir_path: []const u8) !void { } } -pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void { +pub fn makeDirWindows(allocator: *Allocator, dir_path: []const u8) !void { const path_buf = try cstr.addNullByte(allocator, dir_path); defer allocator.free(path_buf); @@ -973,7 +973,7 @@ pub fn makeDirWindows(allocator: &Allocator, dir_path: []const u8) !void { } } -pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void { +pub fn makeDirPosix(allocator: *Allocator, dir_path: []const u8) !void { const path_buf = try cstr.addNullByte(allocator, dir_path); defer allocator.free(path_buf); @@ -999,7 +999,7 @@ pub fn makeDirPosix(allocator: &Allocator, dir_path: []const u8) !void { /// Calls makeDir recursively to make an entire path. Returns success if the path /// already exists and is a directory. -pub fn makePath(allocator: &Allocator, full_path: []const u8) !void { +pub fn makePath(allocator: *Allocator, full_path: []const u8) !void { const resolved_path = try path.resolve(allocator, full_path); defer allocator.free(resolved_path); @@ -1033,7 +1033,7 @@ pub fn makePath(allocator: &Allocator, full_path: []const u8) !void { /// Returns ::error.DirNotEmpty if the directory is not empty. /// To delete a directory recursively, see ::deleteTree -pub fn deleteDir(allocator: &Allocator, dir_path: []const u8) !void { +pub fn deleteDir(allocator: *Allocator, dir_path: []const u8) !void { const path_buf = try allocator.alloc(u8, dir_path.len + 1); defer allocator.free(path_buf); @@ -1084,7 +1084,7 @@ const DeleteTreeError = error{ DirNotEmpty, Unexpected, }; -pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError!void { +pub fn deleteTree(allocator: *Allocator, full_path: []const u8) DeleteTreeError!void { start_over: while (true) { var got_access_denied = false; // First, try deleting the item as a file. This way we don't follow sym links. @@ -1153,7 +1153,7 @@ pub fn deleteTree(allocator: &Allocator, full_path: []const u8) DeleteTreeError! pub const Dir = struct { fd: i32, darwin_seek: darwin_seek_t, - allocator: &Allocator, + allocator: *Allocator, buf: []u8, index: usize, end_index: usize, @@ -1180,7 +1180,7 @@ pub const Dir = struct { }; }; - pub fn open(allocator: &Allocator, dir_path: []const u8) !Dir { + pub fn open(allocator: *Allocator, dir_path: []const u8) !Dir { const fd = switch (builtin.os) { Os.windows => @compileError("TODO support Dir.open for windows"), Os.linux => try posixOpen(allocator, dir_path, posix.O_RDONLY | posix.O_DIRECTORY | posix.O_CLOEXEC, 0), @@ -1206,14 +1206,14 @@ pub const Dir = struct { }; } - pub fn close(self: &Dir) void { + pub fn close(self: *Dir) void { self.allocator.free(self.buf); os.close(self.fd); } /// Memory such as file names referenced in this returned entry becomes invalid /// with subsequent calls to next, as well as when this ::Dir is deinitialized. - pub fn next(self: &Dir) !?Entry { + pub fn next(self: *Dir) !?Entry { switch (builtin.os) { Os.linux => return self.nextLinux(), Os.macosx, Os.ios => return self.nextDarwin(), @@ -1222,7 +1222,7 @@ pub const Dir = struct { } } - fn nextDarwin(self: &Dir) !?Entry { + fn nextDarwin(self: *Dir) !?Entry { start_over: while (true) { if (self.index >= self.end_index) { if (self.buf.len == 0) { @@ -1248,7 +1248,7 @@ pub const Dir = struct { break; } } - const darwin_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]); + const darwin_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]); const next_index = self.index + darwin_entry.d_reclen; self.index = next_index; @@ -1277,11 +1277,11 @@ pub const Dir = struct { } } - fn nextWindows(self: &Dir) !?Entry { + fn nextWindows(self: *Dir) !?Entry { @compileError("TODO support Dir.next for windows"); } - fn nextLinux(self: &Dir) !?Entry { + fn nextLinux(self: *Dir) !?Entry { start_over: while (true) { if (self.index >= self.end_index) { if (self.buf.len == 0) { @@ -1307,7 +1307,7 @@ pub const Dir = struct { break; } } - const linux_entry = @ptrCast(&align(1) posix.dirent, &self.buf[self.index]); + const linux_entry = @ptrCast(*align(1) posix.dirent, &self.buf[self.index]); const next_index = self.index + linux_entry.d_reclen; self.index = next_index; @@ -1337,7 +1337,7 @@ pub const Dir = struct { } }; -pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void { +pub fn changeCurDir(allocator: *Allocator, dir_path: []const u8) !void { const path_buf = try allocator.alloc(u8, dir_path.len + 1); defer allocator.free(path_buf); @@ -1361,7 +1361,7 @@ pub fn changeCurDir(allocator: &Allocator, dir_path: []const u8) !void { } /// Read value of a symbolic link. -pub fn readLink(allocator: &Allocator, pathname: []const u8) ![]u8 { +pub fn readLink(allocator: *Allocator, pathname: []const u8) ![]u8 { const path_buf = try allocator.alloc(u8, pathname.len + 1); defer allocator.free(path_buf); @@ -1468,7 +1468,7 @@ pub const ArgIteratorPosix = struct { }; } - pub fn next(self: &ArgIteratorPosix) ?[]const u8 { + pub fn next(self: *ArgIteratorPosix) ?[]const u8 { if (self.index == self.count) return null; const s = raw[self.index]; @@ -1476,7 +1476,7 @@ pub const ArgIteratorPosix = struct { return cstr.toSlice(s); } - pub fn skip(self: &ArgIteratorPosix) bool { + pub fn skip(self: *ArgIteratorPosix) bool { if (self.index == self.count) return false; self.index += 1; @@ -1485,12 +1485,12 @@ pub const ArgIteratorPosix = struct { /// This is marked as public but actually it's only meant to be used /// internally by zig's startup code. - pub var raw: []&u8 = undefined; + pub var raw: []*u8 = undefined; }; pub const ArgIteratorWindows = struct { index: usize, - cmd_line: &const u8, + cmd_line: *const u8, in_quote: bool, quote_count: usize, seen_quote_count: usize, @@ -1501,7 +1501,7 @@ pub const ArgIteratorWindows = struct { return initWithCmdLine(windows.GetCommandLineA()); } - pub fn initWithCmdLine(cmd_line: &const u8) ArgIteratorWindows { + pub fn initWithCmdLine(cmd_line: *const u8) ArgIteratorWindows { return ArgIteratorWindows{ .index = 0, .cmd_line = cmd_line, @@ -1512,7 +1512,7 @@ pub const ArgIteratorWindows = struct { } /// You must free the returned memory when done. - pub fn next(self: &ArgIteratorWindows, allocator: &Allocator) ?(NextError![]u8) { + pub fn next(self: *ArgIteratorWindows, allocator: *Allocator) ?(NextError![]u8) { // march forward over whitespace while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; @@ -1526,7 +1526,7 @@ pub const ArgIteratorWindows = struct { return self.internalNext(allocator); } - pub fn skip(self: &ArgIteratorWindows) bool { + pub fn skip(self: *ArgIteratorWindows) bool { // march forward over whitespace while (true) : (self.index += 1) { const byte = self.cmd_line[self.index]; @@ -1565,7 +1565,7 @@ pub const ArgIteratorWindows = struct { } } - fn internalNext(self: &ArgIteratorWindows, allocator: &Allocator) NextError![]u8 { + fn internalNext(self: *ArgIteratorWindows, allocator: *Allocator) NextError![]u8 { var buf = try Buffer.initSize(allocator, 0); defer buf.deinit(); @@ -1609,14 +1609,14 @@ pub const ArgIteratorWindows = struct { } } - fn emitBackslashes(self: &ArgIteratorWindows, buf: &Buffer, emit_count: usize) !void { + fn emitBackslashes(self: *ArgIteratorWindows, buf: *Buffer, emit_count: usize) !void { var i: usize = 0; while (i < emit_count) : (i += 1) { try buf.appendByte('\\'); } } - fn countQuotes(cmd_line: &const u8) usize { + fn countQuotes(cmd_line: *const u8) usize { var result: usize = 0; var backslash_count: usize = 0; var index: usize = 0; @@ -1649,7 +1649,7 @@ pub const ArgIterator = struct { pub const NextError = ArgIteratorWindows.NextError; /// You must free the returned memory when done. - pub fn next(self: &ArgIterator, allocator: &Allocator) ?(NextError![]u8) { + pub fn next(self: *ArgIterator, allocator: *Allocator) ?(NextError![]u8) { if (builtin.os == Os.windows) { return self.inner.next(allocator); } else { @@ -1658,13 +1658,13 @@ pub const ArgIterator = struct { } /// If you only are targeting posix you can call this and not need an allocator. - pub fn nextPosix(self: &ArgIterator) ?[]const u8 { + pub fn nextPosix(self: *ArgIterator) ?[]const u8 { return self.inner.next(); } /// Parse past 1 argument without capturing it. /// Returns `true` if skipped an arg, `false` if we are at the end. - pub fn skip(self: &ArgIterator) bool { + pub fn skip(self: *ArgIterator) bool { return self.inner.skip(); } }; @@ -1674,7 +1674,7 @@ pub fn args() ArgIterator { } /// Caller must call freeArgs on result. -pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 { +pub fn argsAlloc(allocator: *mem.Allocator) ![]const []u8 { // TODO refactor to only make 1 allocation. var it = args(); var contents = try Buffer.initSize(allocator, 0); @@ -1711,12 +1711,12 @@ pub fn argsAlloc(allocator: &mem.Allocator) ![]const []u8 { return result_slice_list; } -pub fn argsFree(allocator: &mem.Allocator, args_alloc: []const []u8) void { +pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void { var total_bytes: usize = 0; for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len; } - const unaligned_allocated_buf = @ptrCast(&const u8, args_alloc.ptr)[0..total_bytes]; + const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes]; const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } @@ -1765,7 +1765,7 @@ test "windows arg parsing" { }); } -fn testWindowsCmdLine(input_cmd_line: &const u8, expected_args: []const []const u8) void { +fn testWindowsCmdLine(input_cmd_line: *const u8, expected_args: []const []const u8) void { var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line); for (expected_args) |expected_arg| { const arg = ??it.next(debug.global_allocator) catch unreachable; @@ -1832,7 +1832,7 @@ test "openSelfExe" { /// This function may return an error if the current executable /// was deleted after spawning. /// Caller owns returned memory. -pub fn selfExePath(allocator: &mem.Allocator) ![]u8 { +pub fn selfExePath(allocator: *mem.Allocator) ![]u8 { switch (builtin.os) { Os.linux => { // If the currently executing binary has been deleted, @@ -1875,7 +1875,7 @@ pub fn selfExePath(allocator: &mem.Allocator) ![]u8 { /// Get the directory path that contains the current executable. /// Caller owns returned memory. -pub fn selfExeDirPath(allocator: &mem.Allocator) ![]u8 { +pub fn selfExeDirPath(allocator: *mem.Allocator) ![]u8 { switch (builtin.os) { Os.linux => { // If the currently executing binary has been deleted, @@ -2001,7 +2001,7 @@ pub const PosixBindError = error{ }; /// addr is `&const T` where T is one of the sockaddr -pub fn posixBind(fd: i32, addr: &const posix.sockaddr) PosixBindError!void { +pub fn posixBind(fd: i32, addr: *const posix.sockaddr) PosixBindError!void { const rc = posix.bind(fd, addr, @sizeOf(posix.sockaddr)); const err = posix.getErrno(rc); switch (err) { @@ -2096,7 +2096,7 @@ pub const PosixAcceptError = error{ Unexpected, }; -pub fn posixAccept(fd: i32, addr: &posix.sockaddr, flags: u32) PosixAcceptError!i32 { +pub fn posixAccept(fd: i32, addr: *posix.sockaddr, flags: u32) PosixAcceptError!i32 { while (true) { var sockaddr_size = u32(@sizeOf(posix.sockaddr)); const rc = posix.accept4(fd, addr, &sockaddr_size, flags); @@ -2195,7 +2195,7 @@ pub const LinuxEpollCtlError = error{ Unexpected, }; -pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: &linux.epoll_event) LinuxEpollCtlError!void { +pub fn linuxEpollCtl(epfd: i32, op: u32, fd: i32, event: *linux.epoll_event) LinuxEpollCtlError!void { const rc = posix.epoll_ctl(epfd, op, fd, event); const err = posix.getErrno(rc); switch (err) { @@ -2288,7 +2288,7 @@ pub const PosixConnectError = error{ Unexpected, }; -pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void { +pub fn posixConnect(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void { while (true) { const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr)); const err = posix.getErrno(rc); @@ -2319,7 +2319,7 @@ pub fn posixConnect(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectEr /// Same as posixConnect except it is for blocking socket file descriptors. /// It expects to receive EINPROGRESS. -pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConnectError!void { +pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConnectError!void { while (true) { const rc = posix.connect(sockfd, sockaddr, @sizeOf(posix.sockaddr)); const err = posix.getErrno(rc); @@ -2350,7 +2350,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: &const posix.sockaddr) PosixConn pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(i32); - const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(&u8, &err_code), &size); + const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(*u8, &err_code), &size); assert(size == 4); const err = posix.getErrno(rc); switch (err) { @@ -2401,13 +2401,13 @@ pub const Thread = struct { }, builtin.Os.windows => struct { handle: windows.HANDLE, - alloc_start: &c_void, + alloc_start: *c_void, heap_handle: windows.HANDLE, }, else => @compileError("Unsupported OS"), }; - pub fn wait(self: &const Thread) void { + pub fn wait(self: *const Thread) void { if (use_pthreads) { const err = c.pthread_join(self.data.handle, null); switch (err) { @@ -2473,7 +2473,7 @@ pub const SpawnThreadError = error{ /// fn startFn(@typeOf(context)) T /// where T is u8, noreturn, void, or !void /// caller must call wait on the returned thread -pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread { +pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread { // TODO compile-time call graph analysis to determine stack upper bound // https://github.com/ziglang/zig/issues/157 const default_stack_size = 8 * 1024 * 1024; @@ -2491,7 +2491,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread if (@sizeOf(Context) == 0) { return startFn({}); } else { - return startFn(@ptrCast(&Context, @alignCast(@alignOf(Context), arg)).*); + return startFn(@ptrCast(*Context, @alignCast(@alignOf(Context), arg)).*); } } }; @@ -2500,13 +2500,13 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext); const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory; errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0); - const bytes = @ptrCast(&u8, bytes_ptr)[0..byte_count]; + const bytes = @ptrCast(*u8, bytes_ptr)[0..byte_count]; const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable; outer_context.inner = context; outer_context.thread.data.heap_handle = heap_handle; outer_context.thread.data.alloc_start = bytes_ptr; - const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(&c_void, &outer_context.inner); + const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner); outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? { const err = windows.GetLastError(); return switch (err) { @@ -2521,15 +2521,15 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread if (@sizeOf(Context) == 0) { return startFn({}); } else { - return startFn(@intToPtr(&const Context, ctx_addr).*); + return startFn(@intToPtr(*const Context, ctx_addr).*); } } - extern fn posixThreadMain(ctx: ?&c_void) ?&c_void { + extern fn posixThreadMain(ctx: ?*c_void) ?*c_void { if (@sizeOf(Context) == 0) { _ = startFn({}); return null; } else { - _ = startFn(@ptrCast(&const Context, @alignCast(@alignOf(Context), ctx)).*); + _ = startFn(@ptrCast(*const Context, @alignCast(@alignOf(Context), ctx)).*); return null; } } @@ -2548,7 +2548,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread stack_end -= @sizeOf(Context); stack_end -= stack_end % @alignOf(Context); assert(stack_end >= stack_addr); - const context_ptr = @alignCast(@alignOf(Context), @intToPtr(&Context, stack_end)); + const context_ptr = @alignCast(@alignOf(Context), @intToPtr(*Context, stack_end)); context_ptr.* = context; arg = stack_end; } @@ -2556,7 +2556,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread stack_end -= @sizeOf(Thread); stack_end -= stack_end % @alignOf(Thread); assert(stack_end >= stack_addr); - const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(&Thread, stack_end)); + const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, stack_end)); thread_ptr.data.stack_addr = stack_addr; thread_ptr.data.stack_len = mmap_len; @@ -2572,9 +2572,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!&Thread // align to page stack_end -= stack_end % os.page_size; - assert(c.pthread_attr_setstack(&attr, @intToPtr(&c_void, stack_addr), stack_end - stack_addr) == 0); + assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0); - const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(&c_void, arg)); + const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg)); switch (err) { 0 => return thread_ptr, posix.EAGAIN => return SpawnThreadError.SystemResources, diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig index 5186ff32d3..3e7b836ac7 100644 --- a/std/os/linux/index.zig +++ b/std/os/linux/index.zig @@ -665,15 +665,15 @@ pub fn dup2(old: i32, new: i32) usize { return syscall2(SYS_dup2, usize(old), usize(new)); } -pub fn chdir(path: &const u8) usize { +pub fn chdir(path: *const u8) usize { return syscall1(SYS_chdir, @ptrToInt(path)); } -pub fn chroot(path: &const u8) usize { +pub fn chroot(path: *const u8) usize { return syscall1(SYS_chroot, @ptrToInt(path)); } -pub fn execve(path: &const u8, argv: &const ?&const u8, envp: &const ?&const u8) usize { +pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize { return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp)); } @@ -681,15 +681,15 @@ pub fn fork() usize { return syscall0(SYS_fork); } -pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?×pec) usize { +pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) usize { return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout)); } -pub fn getcwd(buf: &u8, size: usize) usize { +pub fn getcwd(buf: *u8, size: usize) usize { return syscall2(SYS_getcwd, @ptrToInt(buf), size); } -pub fn getdents(fd: i32, dirp: &u8, count: usize) usize { +pub fn getdents(fd: i32, dirp: *u8, count: usize) usize { return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count); } @@ -698,27 +698,27 @@ pub fn isatty(fd: i32) bool { return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0; } -pub fn readlink(noalias path: &const u8, noalias buf_ptr: &u8, buf_len: usize) usize { +pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize { return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len); } -pub fn mkdir(path: &const u8, mode: u32) usize { +pub fn mkdir(path: *const u8, mode: u32) usize { return syscall2(SYS_mkdir, @ptrToInt(path), mode); } -pub fn mount(special: &const u8, dir: &const u8, fstype: &const u8, flags: usize, data: usize) usize { +pub fn mount(special: *const u8, dir: *const u8, fstype: *const u8, flags: usize, data: usize) usize { return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data); } -pub fn umount(special: &const u8) usize { +pub fn umount(special: *const u8) usize { return syscall2(SYS_umount2, @ptrToInt(special), 0); } -pub fn umount2(special: &const u8, flags: u32) usize { +pub fn umount2(special: *const u8, flags: u32) usize { return syscall2(SYS_umount2, @ptrToInt(special), flags); } -pub fn mmap(address: ?&u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { +pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd), @bitCast(usize, offset)); } @@ -726,60 +726,60 @@ pub fn munmap(address: usize, length: usize) usize { return syscall2(SYS_munmap, address, length); } -pub fn read(fd: i32, buf: &u8, count: usize) usize { +pub fn read(fd: i32, buf: *u8, count: usize) usize { return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count); } -pub fn rmdir(path: &const u8) usize { +pub fn rmdir(path: *const u8) usize { return syscall1(SYS_rmdir, @ptrToInt(path)); } -pub fn symlink(existing: &const u8, new: &const u8) usize { +pub fn symlink(existing: *const u8, new: *const u8) usize { return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new)); } -pub fn pread(fd: i32, buf: &u8, count: usize, offset: usize) usize { +pub fn pread(fd: i32, buf: *u8, count: usize, offset: usize) usize { return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset); } -pub fn access(path: &const u8, mode: u32) usize { +pub fn access(path: *const u8, mode: u32) usize { return syscall2(SYS_access, @ptrToInt(path), mode); } -pub fn pipe(fd: &[2]i32) usize { +pub fn pipe(fd: *[2]i32) usize { return pipe2(fd, 0); } -pub fn pipe2(fd: &[2]i32, flags: usize) usize { +pub fn pipe2(fd: *[2]i32, flags: usize) usize { return syscall2(SYS_pipe2, @ptrToInt(fd), flags); } -pub fn write(fd: i32, buf: &const u8, count: usize) usize { +pub fn write(fd: i32, buf: *const u8, count: usize) usize { return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count); } -pub fn pwrite(fd: i32, buf: &const u8, count: usize, offset: usize) usize { +pub fn pwrite(fd: i32, buf: *const u8, count: usize, offset: usize) usize { return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset); } -pub fn rename(old: &const u8, new: &const u8) usize { +pub fn rename(old: *const u8, new: *const u8) usize { return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new)); } -pub fn open(path: &const u8, flags: u32, perm: usize) usize { +pub fn open(path: *const u8, flags: u32, perm: usize) usize { return syscall3(SYS_open, @ptrToInt(path), flags, perm); } -pub fn create(path: &const u8, perm: usize) usize { +pub fn create(path: *const u8, perm: usize) usize { return syscall2(SYS_creat, @ptrToInt(path), perm); } -pub fn openat(dirfd: i32, path: &const u8, flags: usize, mode: usize) usize { +pub fn openat(dirfd: i32, path: *const u8, flags: usize, mode: usize) usize { return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode); } /// See also `clone` (from the arch-specific include) -pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: &i32, child_tid: &i32, newtls: usize) usize { +pub fn clone5(flags: usize, child_stack_ptr: usize, parent_tid: *i32, child_tid: *i32, newtls: usize) usize { return syscall5(SYS_clone, flags, child_stack_ptr, @ptrToInt(parent_tid), @ptrToInt(child_tid), newtls); } @@ -801,7 +801,7 @@ pub fn exit(status: i32) noreturn { unreachable; } -pub fn getrandom(buf: &u8, count: usize, flags: u32) usize { +pub fn getrandom(buf: *u8, count: usize, flags: u32) usize { return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags)); } @@ -809,15 +809,15 @@ pub fn kill(pid: i32, sig: i32) usize { return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig)); } -pub fn unlink(path: &const u8) usize { +pub fn unlink(path: *const u8) usize { return syscall1(SYS_unlink, @ptrToInt(path)); } -pub fn waitpid(pid: i32, status: &i32, options: i32) usize { +pub fn waitpid(pid: i32, status: *i32, options: i32) usize { return syscall4(SYS_wait4, @bitCast(usize, isize(pid)), @ptrToInt(status), @bitCast(usize, isize(options)), 0); } -pub fn clock_gettime(clk_id: i32, tp: ×pec) usize { +pub fn clock_gettime(clk_id: i32, tp: *timespec) usize { if (VDSO_CGT_SYM.len != 0) { const f = @atomicLoad(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, builtin.AtomicOrder.Unordered); if (@ptrToInt(f) != 0) { @@ -831,7 +831,7 @@ pub fn clock_gettime(clk_id: i32, tp: ×pec) usize { return syscall2(SYS_clock_gettime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp)); } var vdso_clock_gettime = init_vdso_clock_gettime; -extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize { +extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize { const addr = vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM); var f = @intToPtr(@typeOf(init_vdso_clock_gettime), addr); _ = @cmpxchgStrong(@typeOf(init_vdso_clock_gettime), &vdso_clock_gettime, init_vdso_clock_gettime, f, builtin.AtomicOrder.Monotonic, builtin.AtomicOrder.Monotonic); @@ -839,23 +839,23 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: ×pec) usize { return f(clk, ts); } -pub fn clock_getres(clk_id: i32, tp: ×pec) usize { +pub fn clock_getres(clk_id: i32, tp: *timespec) usize { return syscall2(SYS_clock_getres, @bitCast(usize, isize(clk_id)), @ptrToInt(tp)); } -pub fn clock_settime(clk_id: i32, tp: &const timespec) usize { +pub fn clock_settime(clk_id: i32, tp: *const timespec) usize { return syscall2(SYS_clock_settime, @bitCast(usize, isize(clk_id)), @ptrToInt(tp)); } -pub fn gettimeofday(tv: &timeval, tz: &timezone) usize { +pub fn gettimeofday(tv: *timeval, tz: *timezone) usize { return syscall2(SYS_gettimeofday, @ptrToInt(tv), @ptrToInt(tz)); } -pub fn settimeofday(tv: &const timeval, tz: &const timezone) usize { +pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize { return syscall2(SYS_settimeofday, @ptrToInt(tv), @ptrToInt(tz)); } -pub fn nanosleep(req: &const timespec, rem: ?×pec) usize { +pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { return syscall2(SYS_nanosleep, @ptrToInt(req), @ptrToInt(rem)); } @@ -899,11 +899,11 @@ pub fn setegid(egid: u32) usize { return syscall1(SYS_setegid, egid); } -pub fn getresuid(ruid: &u32, euid: &u32, suid: &u32) usize { +pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize { return syscall3(SYS_getresuid, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid)); } -pub fn getresgid(rgid: &u32, egid: &u32, sgid: &u32) usize { +pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize { return syscall3(SYS_getresgid, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid)); } @@ -915,11 +915,11 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize { return syscall3(SYS_setresgid, rgid, egid, sgid); } -pub fn getgroups(size: usize, list: &u32) usize { +pub fn getgroups(size: usize, list: *u32) usize { return syscall2(SYS_getgroups, size, @ptrToInt(list)); } -pub fn setgroups(size: usize, list: &const u32) usize { +pub fn setgroups(size: usize, list: *const u32) usize { return syscall2(SYS_setgroups, size, @ptrToInt(list)); } @@ -927,11 +927,11 @@ pub fn getpid() i32 { return @bitCast(i32, u32(syscall0(SYS_getpid))); } -pub fn sigprocmask(flags: u32, noalias set: &const sigset_t, noalias oldset: ?&sigset_t) usize { +pub fn sigprocmask(flags: u32, noalias set: *const sigset_t, noalias oldset: ?*sigset_t) usize { return syscall4(SYS_rt_sigprocmask, flags, @ptrToInt(set), @ptrToInt(oldset), NSIG / 8); } -pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigaction) usize { +pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigaction) usize { assert(sig >= 1); assert(sig != SIGKILL); assert(sig != SIGSTOP); @@ -942,8 +942,8 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti .restorer = @ptrCast(extern fn () void, restore_rt), }; var ksa_old: k_sigaction = undefined; - @memcpy(@ptrCast(&u8, &ksa.mask), @ptrCast(&const u8, &act.mask), 8); - const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask))); + @memcpy(@ptrCast(*u8, *ksa.mask), @ptrCast(*const u8, *act.mask), 8); + const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(*ksa), @ptrToInt(*ksa_old), @sizeOf(@typeOf(ksa.mask))); const err = getErrno(result); if (err != 0) { return result; @@ -951,7 +951,7 @@ pub fn sigaction(sig: u6, noalias act: &const Sigaction, noalias oact: ?&Sigacti if (oact) |old| { old.handler = ksa_old.handler; old.flags = @truncate(u32, ksa_old.flags); - @memcpy(@ptrCast(&u8, &old.mask), @ptrCast(&const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask))); + @memcpy(@ptrCast(*u8, *old.mask), @ptrCast(*const u8, *ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask))); } return 0; } @@ -989,24 +989,24 @@ pub fn raise(sig: i32) usize { return ret; } -fn blockAllSignals(set: &sigset_t) void { +fn blockAllSignals(set: *sigset_t) void { _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&all_mask), @ptrToInt(set), NSIG / 8); } -fn blockAppSignals(set: &sigset_t) void { +fn blockAppSignals(set: *sigset_t) void { _ = syscall4(SYS_rt_sigprocmask, SIG_BLOCK, @ptrToInt(&app_mask), @ptrToInt(set), NSIG / 8); } -fn restoreSignals(set: &sigset_t) void { +fn restoreSignals(set: *sigset_t) void { _ = syscall4(SYS_rt_sigprocmask, SIG_SETMASK, @ptrToInt(set), 0, NSIG / 8); } -pub fn sigaddset(set: &sigset_t, sig: u6) void { +pub fn sigaddset(set: *sigset_t, sig: u6) void { const s = sig - 1; (set.*)[usize(s) / usize.bit_count] |= usize(1) << (s & (usize.bit_count - 1)); } -pub fn sigismember(set: &const sigset_t, sig: u6) bool { +pub fn sigismember(set: *const sigset_t, sig: u6) bool { const s = sig - 1; return ((set.*)[usize(s) / usize.bit_count] & (usize(1) << (s & (usize.bit_count - 1)))) != 0; } @@ -1036,15 +1036,15 @@ pub const sockaddr_in6 = extern struct { }; pub const iovec = extern struct { - iov_base: &u8, + iov_base: *u8, iov_len: usize, }; -pub fn getsockname(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize { +pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { return syscall3(SYS_getsockname, usize(fd), @ptrToInt(addr), @ptrToInt(len)); } -pub fn getpeername(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize { +pub fn getpeername(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { return syscall3(SYS_getpeername, usize(fd), @ptrToInt(addr), @ptrToInt(len)); } @@ -1052,27 +1052,27 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { return syscall3(SYS_socket, domain, socket_type, protocol); } -pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: &const u8, optlen: socklen_t) usize { +pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: *const u8, optlen: socklen_t) usize { return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen)); } -pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: &u8, noalias optlen: &socklen_t) usize { +pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: *u8, noalias optlen: *socklen_t) usize { return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen)); } -pub fn sendmsg(fd: i32, msg: &const msghdr, flags: u32) usize { +pub fn sendmsg(fd: i32, msg: *const msghdr, flags: u32) usize { return syscall3(SYS_sendmsg, usize(fd), @ptrToInt(msg), flags); } -pub fn connect(fd: i32, addr: &const sockaddr, len: socklen_t) usize { +pub fn connect(fd: i32, addr: *const sockaddr, len: socklen_t) usize { return syscall3(SYS_connect, usize(fd), @ptrToInt(addr), usize(len)); } -pub fn recvmsg(fd: i32, msg: &msghdr, flags: u32) usize { +pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize { return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags); } -pub fn recvfrom(fd: i32, noalias buf: &u8, len: usize, flags: u32, noalias addr: ?&sockaddr, noalias alen: ?&socklen_t) usize { +pub fn recvfrom(fd: i32, noalias buf: *u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize { return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen)); } @@ -1080,7 +1080,7 @@ pub fn shutdown(fd: i32, how: i32) usize { return syscall2(SYS_shutdown, usize(fd), usize(how)); } -pub fn bind(fd: i32, addr: &const sockaddr, len: socklen_t) usize { +pub fn bind(fd: i32, addr: *const sockaddr, len: socklen_t) usize { return syscall3(SYS_bind, usize(fd), @ptrToInt(addr), usize(len)); } @@ -1088,79 +1088,79 @@ pub fn listen(fd: i32, backlog: u32) usize { return syscall2(SYS_listen, usize(fd), backlog); } -pub fn sendto(fd: i32, buf: &const u8, len: usize, flags: u32, addr: ?&const sockaddr, alen: socklen_t) usize { +pub fn sendto(fd: i32, buf: *const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize { return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen)); } pub fn socketpair(domain: i32, socket_type: i32, protocol: i32, fd: [2]i32) usize { - return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(&fd[0])); + return syscall4(SYS_socketpair, usize(domain), usize(socket_type), usize(protocol), @ptrToInt(*fd[0])); } -pub fn accept(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t) usize { +pub fn accept(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize { return accept4(fd, addr, len, 0); } -pub fn accept4(fd: i32, noalias addr: &sockaddr, noalias len: &socklen_t, flags: u32) usize { +pub fn accept4(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t, flags: u32) usize { return syscall4(SYS_accept4, usize(fd), @ptrToInt(addr), @ptrToInt(len), flags); } -pub fn fstat(fd: i32, stat_buf: &Stat) usize { +pub fn fstat(fd: i32, stat_buf: *Stat) usize { return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf)); } -pub fn stat(pathname: &const u8, statbuf: &Stat) usize { +pub fn stat(pathname: *const u8, statbuf: *Stat) usize { return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf)); } -pub fn lstat(pathname: &const u8, statbuf: &Stat) usize { +pub fn lstat(pathname: *const u8, statbuf: *Stat) usize { return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf)); } -pub fn listxattr(path: &const u8, list: &u8, size: usize) usize { +pub fn listxattr(path: *const u8, list: *u8, size: usize) usize { return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size); } -pub fn llistxattr(path: &const u8, list: &u8, size: usize) usize { +pub fn llistxattr(path: *const u8, list: *u8, size: usize) usize { return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size); } -pub fn flistxattr(fd: usize, list: &u8, size: usize) usize { +pub fn flistxattr(fd: usize, list: *u8, size: usize) usize { return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size); } -pub fn getxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize { +pub fn getxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize { return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size); } -pub fn lgetxattr(path: &const u8, name: &const u8, value: &void, size: usize) usize { +pub fn lgetxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize { return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size); } -pub fn fgetxattr(fd: usize, name: &const u8, value: &void, size: usize) usize { +pub fn fgetxattr(fd: usize, name: *const u8, value: *void, size: usize) usize { return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size); } -pub fn setxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize { +pub fn setxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn lsetxattr(path: &const u8, name: &const u8, value: &const void, size: usize, flags: usize) usize { +pub fn lsetxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn fsetxattr(fd: usize, name: &const u8, value: &const void, size: usize, flags: usize) usize { +pub fn fsetxattr(fd: usize, name: *const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn removexattr(path: &const u8, name: &const u8) usize { +pub fn removexattr(path: *const u8, name: *const u8) usize { return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name)); } -pub fn lremovexattr(path: &const u8, name: &const u8) usize { +pub fn lremovexattr(path: *const u8, name: *const u8) usize { return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name)); } -pub fn fremovexattr(fd: usize, name: &const u8) usize { +pub fn fremovexattr(fd: usize, name: *const u8) usize { return syscall2(SYS_fremovexattr, fd, @ptrToInt(name)); } @@ -1184,11 +1184,11 @@ pub fn epoll_create1(flags: usize) usize { return syscall1(SYS_epoll_create1, flags); } -pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: &epoll_event) usize { +pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize { return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev)); } -pub fn epoll_wait(epoll_fd: i32, events: &epoll_event, maxevents: u32, timeout: i32) usize { +pub fn epoll_wait(epoll_fd: i32, events: *epoll_event, maxevents: u32, timeout: i32) usize { return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout)); } @@ -1201,11 +1201,11 @@ pub const itimerspec = extern struct { it_value: timespec, }; -pub fn timerfd_gettime(fd: i32, curr_value: &itimerspec) usize { +pub fn timerfd_gettime(fd: i32, curr_value: *itimerspec) usize { return syscall2(SYS_timerfd_gettime, usize(fd), @ptrToInt(curr_value)); } -pub fn timerfd_settime(fd: i32, flags: u32, new_value: &const itimerspec, old_value: ?&itimerspec) usize { +pub fn timerfd_settime(fd: i32, flags: u32, new_value: *const itimerspec, old_value: ?*itimerspec) usize { return syscall4(SYS_timerfd_settime, usize(fd), usize(flags), @ptrToInt(new_value), @ptrToInt(old_value)); } @@ -1300,8 +1300,8 @@ pub fn CAP_TO_INDEX(cap: u8) u8 { } pub const cap_t = extern struct { - hdrp: &cap_user_header_t, - datap: &cap_user_data_t, + hdrp: *cap_user_header_t, + datap: *cap_user_data_t, }; pub const cap_user_header_t = extern struct { @@ -1319,11 +1319,11 @@ pub fn unshare(flags: usize) usize { return syscall1(SYS_unshare, usize(flags)); } -pub fn capget(hdrp: &cap_user_header_t, datap: &cap_user_data_t) usize { +pub fn capget(hdrp: *cap_user_header_t, datap: *cap_user_data_t) usize { return syscall2(SYS_capget, @ptrToInt(hdrp), @ptrToInt(datap)); } -pub fn capset(hdrp: &cap_user_header_t, datap: &const cap_user_data_t) usize { +pub fn capset(hdrp: *cap_user_header_t, datap: *const cap_user_data_t) usize { return syscall2(SYS_capset, @ptrToInt(hdrp), @ptrToInt(datap)); } diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig index 8e0a285841..1317da6388 100644 --- a/std/os/linux/vdso.zig +++ b/std/os/linux/vdso.zig @@ -8,11 +8,11 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const vdso_addr = std.os.linux_aux_raw[std.elf.AT_SYSINFO_EHDR]; if (vdso_addr == 0) return 0; - const eh = @intToPtr(&elf.Ehdr, vdso_addr); + const eh = @intToPtr(*elf.Ehdr, vdso_addr); var ph_addr: usize = vdso_addr + eh.e_phoff; - const ph = @intToPtr(&elf.Phdr, ph_addr); + const ph = @intToPtr(*elf.Phdr, ph_addr); - var maybe_dynv: ?&usize = null; + var maybe_dynv: ?*usize = null; var base: usize = @maxValue(usize); { var i: usize = 0; @@ -20,10 +20,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { i += 1; ph_addr += eh.e_phentsize; }) { - const this_ph = @intToPtr(&elf.Phdr, ph_addr); + const this_ph = @intToPtr(*elf.Phdr, ph_addr); switch (this_ph.p_type) { elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr, - elf.PT_DYNAMIC => maybe_dynv = @intToPtr(&usize, vdso_addr + this_ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @intToPtr(*usize, vdso_addr + this_ph.p_offset), else => {}, } } @@ -31,22 +31,22 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const dynv = maybe_dynv ?? return 0; if (base == @maxValue(usize)) return 0; - var maybe_strings: ?&u8 = null; - var maybe_syms: ?&elf.Sym = null; - var maybe_hashtab: ?&linux.Elf_Symndx = null; - var maybe_versym: ?&u16 = null; - var maybe_verdef: ?&elf.Verdef = null; + var maybe_strings: ?*u8 = null; + var maybe_syms: ?*elf.Sym = null; + var maybe_hashtab: ?*linux.Elf_Symndx = null; + var maybe_versym: ?*u16 = null; + var maybe_verdef: ?*elf.Verdef = null; { var i: usize = 0; while (dynv[i] != 0) : (i += 2) { const p = base + dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @intToPtr(&u8, p), - elf.DT_SYMTAB => maybe_syms = @intToPtr(&elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @intToPtr(&linux.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @intToPtr(&u16, p), - elf.DT_VERDEF => maybe_verdef = @intToPtr(&elf.Verdef, p), + elf.DT_STRTAB => maybe_strings = @intToPtr(*u8, p), + elf.DT_SYMTAB => maybe_syms = @intToPtr(*elf.Sym, p), + elf.DT_HASH => maybe_hashtab = @intToPtr(*linux.Elf_Symndx, p), + elf.DT_VERSYM => maybe_versym = @intToPtr(*u16, p), + elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p), else => {}, } } @@ -76,7 +76,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { return 0; } -fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: &u8) bool { +fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: *u8) bool { var def = def_arg; const vsym = @bitCast(u32, vsym_arg) & 0x7fff; while (true) { @@ -84,8 +84,8 @@ fn checkver(def_arg: &elf.Verdef, vsym_arg: i32, vername: []const u8, strings: & break; if (def.vd_next == 0) return false; - def = @intToPtr(&elf.Verdef, @ptrToInt(def) + def.vd_next); + def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next); } - const aux = @intToPtr(&elf.Verdaux, @ptrToInt(def) + def.vd_aux); + const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux); return mem.eql(u8, vername, cstr.toSliceConst(&strings[aux.vda_name])); } diff --git a/std/os/linux/x86_64.zig b/std/os/linux/x86_64.zig index b43a642038..9a90e64757 100644 --- a/std/os/linux/x86_64.zig +++ b/std/os/linux/x86_64.zig @@ -463,7 +463,7 @@ pub fn syscall6( } /// This matches the libc clone function. -pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: &i32, tls: usize, ctid: &i32) usize; +pub extern fn clone(func: extern fn (arg: usize) u8, stack: usize, flags: usize, arg: usize, ptid: *i32, tls: usize, ctid: *i32) usize; pub nakedcc fn restore_rt() void { return asm volatile ("syscall" @@ -474,12 +474,12 @@ pub nakedcc fn restore_rt() void { } pub const msghdr = extern struct { - msg_name: &u8, + msg_name: *u8, msg_namelen: socklen_t, - msg_iov: &iovec, + msg_iov: *iovec, msg_iovlen: i32, __pad1: i32, - msg_control: &u8, + msg_control: *u8, msg_controllen: socklen_t, __pad2: socklen_t, msg_flags: i32, diff --git a/std/os/path.zig b/std/os/path.zig index 162faffc42..4df6179bf5 100644 --- a/std/os/path.zig +++ b/std/os/path.zig @@ -32,7 +32,7 @@ pub fn isSep(byte: u8) bool { /// Naively combines a series of paths with the native path seperator. /// Allocates memory for the result, which must be freed by the caller. -pub fn join(allocator: &Allocator, paths: ...) ![]u8 { +pub fn join(allocator: *Allocator, paths: ...) ![]u8 { if (is_windows) { return joinWindows(allocator, paths); } else { @@ -40,11 +40,11 @@ pub fn join(allocator: &Allocator, paths: ...) ![]u8 { } } -pub fn joinWindows(allocator: &Allocator, paths: ...) ![]u8 { +pub fn joinWindows(allocator: *Allocator, paths: ...) ![]u8 { return mem.join(allocator, sep_windows, paths); } -pub fn joinPosix(allocator: &Allocator, paths: ...) ![]u8 { +pub fn joinPosix(allocator: *Allocator, paths: ...) ![]u8 { return mem.join(allocator, sep_posix, paths); } @@ -310,7 +310,7 @@ fn asciiEqlIgnoreCase(s1: []const u8, s2: []const u8) bool { } /// Converts the command line arguments into a slice and calls `resolveSlice`. -pub fn resolve(allocator: &Allocator, args: ...) ![]u8 { +pub fn resolve(allocator: *Allocator, args: ...) ![]u8 { var paths: [args.len][]const u8 = undefined; comptime var arg_i = 0; inline while (arg_i < args.len) : (arg_i += 1) { @@ -320,7 +320,7 @@ pub fn resolve(allocator: &Allocator, args: ...) ![]u8 { } /// On Windows, this calls `resolveWindows` and on POSIX it calls `resolvePosix`. -pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 { +pub fn resolveSlice(allocator: *Allocator, paths: []const []const u8) ![]u8 { if (is_windows) { return resolveWindows(allocator, paths); } else { @@ -334,7 +334,7 @@ pub fn resolveSlice(allocator: &Allocator, paths: []const []const u8) ![]u8 { /// If all paths are relative it uses the current working directory as a starting point. /// Each drive has its own current working directory. /// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters. -pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 { +pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 { if (paths.len == 0) { assert(is_windows); // resolveWindows called on non windows can't use getCwd return os.getCwd(allocator); @@ -513,7 +513,7 @@ pub fn resolveWindows(allocator: &Allocator, paths: []const []const u8) ![]u8 { /// It resolves "." and "..". /// The result does not have a trailing path separator. /// If all paths are relative it uses the current working directory as a starting point. -pub fn resolvePosix(allocator: &Allocator, paths: []const []const u8) ![]u8 { +pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 { if (paths.len == 0) { assert(!is_windows); // resolvePosix called on windows can't use getCwd return os.getCwd(allocator); @@ -883,7 +883,7 @@ fn testBasenameWindows(input: []const u8, expected_output: []const u8) void { /// resolve to the same path (after calling `resolve` on each), a zero-length /// string is returned. /// On Windows this canonicalizes the drive to a capital letter and paths to `\\`. -pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relative(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { if (is_windows) { return relativeWindows(allocator, from, to); } else { @@ -891,7 +891,7 @@ pub fn relative(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 { } } -pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { const resolved_from = try resolveWindows(allocator, [][]const u8{from}); defer allocator.free(resolved_from); @@ -964,7 +964,7 @@ pub fn relativeWindows(allocator: &Allocator, from: []const u8, to: []const u8) return []u8{}; } -pub fn relativePosix(allocator: &Allocator, from: []const u8, to: []const u8) ![]u8 { +pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![]u8 { const resolved_from = try resolvePosix(allocator, [][]const u8{from}); defer allocator.free(resolved_from); @@ -1063,7 +1063,7 @@ fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []cons /// Expands all symbolic links and resolves references to `.`, `..`, and /// extra `/` characters in ::pathname. /// Caller must deallocate result. -pub fn real(allocator: &Allocator, pathname: []const u8) ![]u8 { +pub fn real(allocator: *Allocator, pathname: []const u8) ![]u8 { switch (builtin.os) { Os.windows => { const pathname_buf = try allocator.alloc(u8, pathname.len + 1); diff --git a/std/os/test.zig b/std/os/test.zig index 4dfe76224a..4aa3535829 100644 --- a/std/os/test.zig +++ b/std/os/test.zig @@ -63,7 +63,7 @@ fn start1(ctx: void) u8 { return 0; } -fn start2(ctx: &i32) u8 { +fn start2(ctx: *i32) u8 { _ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); return 0; } diff --git a/std/os/time.zig b/std/os/time.zig index 9a7c682483..8629504323 100644 --- a/std/os/time.zig +++ b/std/os/time.zig @@ -200,7 +200,7 @@ pub const Timer = struct { } /// Reads the timer value since start or the last reset in nanoseconds - pub fn read(self: &Timer) u64 { + pub fn read(self: *Timer) u64 { var clock = clockNative() - self.start_time; return switch (builtin.os) { Os.windows => @divFloor(clock * ns_per_s, self.frequency), @@ -211,12 +211,12 @@ pub const Timer = struct { } /// Resets the timer value to 0/now. - pub fn reset(self: &Timer) void { + pub fn reset(self: *Timer) void { self.start_time = clockNative(); } /// Returns the current value of the timer in nanoseconds, then resets it - pub fn lap(self: &Timer) u64 { + pub fn lap(self: *Timer) u64 { var now = clockNative(); var lap_time = self.read(); self.start_time = now; diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig index 264ea391c4..85f69836d5 100644 --- a/std/os/windows/index.zig +++ b/std/os/windows/index.zig @@ -1,7 +1,7 @@ pub const ERROR = @import("error.zig"); pub extern "advapi32" stdcallcc fn CryptAcquireContextA( - phProv: &HCRYPTPROV, + phProv: *HCRYPTPROV, pszContainer: ?LPCSTR, pszProvider: ?LPCSTR, dwProvType: DWORD, @@ -10,13 +10,13 @@ pub extern "advapi32" stdcallcc fn CryptAcquireContextA( pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL; -pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: &BYTE) BOOL; +pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: *BYTE) BOOL; pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; pub extern "kernel32" stdcallcc fn CreateDirectoryA( lpPathName: LPCSTR, - lpSecurityAttributes: ?&SECURITY_ATTRIBUTES, + lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, ) BOOL; pub extern "kernel32" stdcallcc fn CreateFileA( @@ -30,23 +30,23 @@ pub extern "kernel32" stdcallcc fn CreateFileA( ) HANDLE; pub extern "kernel32" stdcallcc fn CreatePipe( - hReadPipe: &HANDLE, - hWritePipe: &HANDLE, - lpPipeAttributes: &const SECURITY_ATTRIBUTES, + hReadPipe: *HANDLE, + hWritePipe: *HANDLE, + lpPipeAttributes: *const SECURITY_ATTRIBUTES, nSize: DWORD, ) BOOL; pub extern "kernel32" stdcallcc fn CreateProcessA( lpApplicationName: ?LPCSTR, lpCommandLine: LPSTR, - lpProcessAttributes: ?&SECURITY_ATTRIBUTES, - lpThreadAttributes: ?&SECURITY_ATTRIBUTES, + lpProcessAttributes: ?*SECURITY_ATTRIBUTES, + lpThreadAttributes: ?*SECURITY_ATTRIBUTES, bInheritHandles: BOOL, dwCreationFlags: DWORD, - lpEnvironment: ?&c_void, + lpEnvironment: ?*c_void, lpCurrentDirectory: ?LPCSTR, - lpStartupInfo: &STARTUPINFOA, - lpProcessInformation: &PROCESS_INFORMATION, + lpStartupInfo: *STARTUPINFOA, + lpProcessInformation: *PROCESS_INFORMATION, ) BOOL; pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA( @@ -65,7 +65,7 @@ pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL; pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR; -pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: &DWORD) BOOL; +pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL; pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD; @@ -73,9 +73,9 @@ pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH; pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD; -pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: &DWORD) BOOL; +pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL; -pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: &LARGE_INTEGER) BOOL; +pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL; pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD; @@ -84,7 +84,7 @@ pub extern "kernel32" stdcallcc fn GetLastError() DWORD; pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx( in_hFile: HANDLE, in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, - out_lpFileInformation: &c_void, + out_lpFileInformation: *c_void, in_dwBufferSize: DWORD, ) BOOL; @@ -97,21 +97,21 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA( pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE; -pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?&FILETIME) void; +pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void; pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE; pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; -pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void; -pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T; -pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; +pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; +pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL; pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE; -pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void; +pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void; -pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; pub extern "kernel32" stdcallcc fn MoveFileExA( lpExistingFileName: LPCSTR, @@ -119,24 +119,24 @@ pub extern "kernel32" stdcallcc fn MoveFileExA( dwFlags: DWORD, ) BOOL; -pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: &LARGE_INTEGER) BOOL; +pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL; -pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: &LARGE_INTEGER) BOOL; +pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL; pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL; pub extern "kernel32" stdcallcc fn ReadFile( in_hFile: HANDLE, - out_lpBuffer: &c_void, + out_lpBuffer: *c_void, in_nNumberOfBytesToRead: DWORD, - out_lpNumberOfBytesRead: &DWORD, - in_out_lpOverlapped: ?&OVERLAPPED, + out_lpNumberOfBytesRead: *DWORD, + in_out_lpOverlapped: ?*OVERLAPPED, ) BOOL; pub extern "kernel32" stdcallcc fn SetFilePointerEx( in_fFile: HANDLE, in_liDistanceToMove: LARGE_INTEGER, - out_opt_ldNewFilePointer: ?&LARGE_INTEGER, + out_opt_ldNewFilePointer: ?*LARGE_INTEGER, in_dwMoveMethod: DWORD, ) BOOL; @@ -150,10 +150,10 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis pub extern "kernel32" stdcallcc fn WriteFile( in_hFile: HANDLE, - in_lpBuffer: &const c_void, + in_lpBuffer: *const c_void, in_nNumberOfBytesToWrite: DWORD, - out_lpNumberOfBytesWritten: ?&DWORD, - in_out_lpOverlapped: ?&OVERLAPPED, + out_lpNumberOfBytesWritten: ?*DWORD, + in_out_lpOverlapped: ?*OVERLAPPED, ) BOOL; //TODO: call unicode versions instead of relying on ANSI code page @@ -171,23 +171,23 @@ pub const BYTE = u8; pub const CHAR = u8; pub const DWORD = u32; pub const FLOAT = f32; -pub const HANDLE = &c_void; +pub const HANDLE = *c_void; pub const HCRYPTPROV = ULONG_PTR; -pub const HINSTANCE = &@OpaqueType(); -pub const HMODULE = &@OpaqueType(); +pub const HINSTANCE = *@OpaqueType(); +pub const HMODULE = *@OpaqueType(); pub const INT = c_int; -pub const LPBYTE = &BYTE; -pub const LPCH = &CHAR; -pub const LPCSTR = &const CHAR; -pub const LPCTSTR = &const TCHAR; -pub const LPCVOID = &const c_void; -pub const LPDWORD = &DWORD; -pub const LPSTR = &CHAR; +pub const LPBYTE = *BYTE; +pub const LPCH = *CHAR; +pub const LPCSTR = *const CHAR; +pub const LPCTSTR = *const TCHAR; +pub const LPCVOID = *const c_void; +pub const LPDWORD = *DWORD; +pub const LPSTR = *CHAR; pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR; -pub const LPVOID = &c_void; -pub const LPWSTR = &WCHAR; -pub const PVOID = &c_void; -pub const PWSTR = &WCHAR; +pub const LPVOID = *c_void; +pub const LPWSTR = *WCHAR; +pub const PVOID = *c_void; +pub const PWSTR = *WCHAR; pub const SIZE_T = usize; pub const TCHAR = if (UNICODE) WCHAR else u8; pub const UINT = c_uint; @@ -218,7 +218,7 @@ pub const OVERLAPPED = extern struct { Pointer: PVOID, hEvent: HANDLE, }; -pub const LPOVERLAPPED = &OVERLAPPED; +pub const LPOVERLAPPED = *OVERLAPPED; pub const MAX_PATH = 260; @@ -271,11 +271,11 @@ pub const VOLUME_NAME_NT = 0x2; pub const SECURITY_ATTRIBUTES = extern struct { nLength: DWORD, - lpSecurityDescriptor: ?&c_void, + lpSecurityDescriptor: ?*c_void, bInheritHandle: BOOL, }; -pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES; -pub const LPSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES; +pub const PSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES; +pub const LPSECURITY_ATTRIBUTES = *SECURITY_ATTRIBUTES; pub const GENERIC_READ = 0x80000000; pub const GENERIC_WRITE = 0x40000000; diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig index 2bd8a157e4..7170346108 100644 --- a/std/os/windows/util.zig +++ b/std/os/windows/util.zig @@ -42,7 +42,7 @@ pub const WriteError = error{ }; pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void { - if (windows.WriteFile(handle, @ptrCast(&const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { + if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources, @@ -68,11 +68,11 @@ pub fn windowsIsCygwinPty(handle: windows.HANDLE) bool { const size = @sizeOf(windows.FILE_NAME_INFO); var name_info_bytes align(@alignOf(windows.FILE_NAME_INFO)) = []u8{0} ** (size + windows.MAX_PATH); - if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(&c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) { + if (windows.GetFileInformationByHandleEx(handle, windows.FileNameInfo, @ptrCast(*c_void, &name_info_bytes[0]), u32(name_info_bytes.len)) == 0) { return true; } - const name_info = @ptrCast(&const windows.FILE_NAME_INFO, &name_info_bytes[0]); + const name_info = @ptrCast(*const windows.FILE_NAME_INFO, &name_info_bytes[0]); const name_bytes = name_info_bytes[size .. size + usize(name_info.FileNameLength)]; const name_wide = ([]u16)(name_bytes); return mem.indexOf(u16, name_wide, []u16{ 'm', 's', 'y', 's', '-' }) != null or @@ -91,7 +91,7 @@ pub const OpenError = error{ /// `file_path` needs to be copied in memory to add a null terminating byte, hence the allocator. pub fn windowsOpen( - allocator: &mem.Allocator, + allocator: *mem.Allocator, file_path: []const u8, desired_access: windows.DWORD, share_mode: windows.DWORD, @@ -119,7 +119,7 @@ pub fn windowsOpen( } /// Caller must free result. -pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) ![]u8 { +pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) ![]u8 { // count bytes needed const bytes_needed = x: { var bytes_needed: usize = 1; // 1 for the final null byte @@ -150,7 +150,7 @@ pub fn createWindowsEnvBlock(allocator: &mem.Allocator, env_map: &const BufMap) return result; } -pub fn windowsLoadDll(allocator: &mem.Allocator, dll_path: []const u8) !windows.HMODULE { +pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE { const padded_buff = try cstr.addNullByte(allocator, dll_path); defer allocator.free(padded_buff); return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound; diff --git a/std/os/zen.zig b/std/os/zen.zig index 2411c5363e..2312b36dea 100644 --- a/std/os/zen.zig +++ b/std/os/zen.zig @@ -8,7 +8,7 @@ pub const Message = struct { type: usize, payload: usize, - pub fn from(mailbox_id: &const MailboxId) Message { + pub fn from(mailbox_id: *const MailboxId) Message { return Message{ .sender = MailboxId.Undefined, .receiver = *mailbox_id, @@ -17,7 +17,7 @@ pub const Message = struct { }; } - pub fn to(mailbox_id: &const MailboxId, msg_type: usize) Message { + pub fn to(mailbox_id: *const MailboxId, msg_type: usize) Message { return Message{ .sender = MailboxId.This, .receiver = *mailbox_id, @@ -26,7 +26,7 @@ pub const Message = struct { }; } - pub fn withData(mailbox_id: &const MailboxId, msg_type: usize, payload: usize) Message { + pub fn withData(mailbox_id: *const MailboxId, msg_type: usize, payload: usize) Message { return Message{ .sender = MailboxId.This, .receiver = *mailbox_id, @@ -67,7 +67,7 @@ pub const getErrno = @import("linux/index.zig").getErrno; use @import("linux/errno.zig"); // TODO: implement this correctly. -pub fn read(fd: i32, buf: &u8, count: usize) usize { +pub fn read(fd: i32, buf: *u8, count: usize) usize { switch (fd) { STDIN_FILENO => { var i: usize = 0; @@ -75,7 +75,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize { send(Message.to(Server.Keyboard, 0)); var message = Message.from(MailboxId.This); - receive(&message); + receive(*message); buf[i] = u8(message.payload); } @@ -86,7 +86,7 @@ pub fn read(fd: i32, buf: &u8, count: usize) usize { } // TODO: implement this correctly. -pub fn write(fd: i32, buf: &const u8, count: usize) usize { +pub fn write(fd: i32, buf: *const u8, count: usize) usize { switch (fd) { STDOUT_FILENO, STDERR_FILENO => { var i: usize = 0; @@ -126,22 +126,22 @@ pub fn exit(status: i32) noreturn { unreachable; } -pub fn createPort(mailbox_id: &const MailboxId) void { +pub fn createPort(mailbox_id: *const MailboxId) void { _ = switch (*mailbox_id) { MailboxId.Port => |id| syscall1(Syscall.createPort, id), else => unreachable, }; } -pub fn send(message: &const Message) void { +pub fn send(message: *const Message) void { _ = syscall1(Syscall.send, @ptrToInt(message)); } -pub fn receive(destination: &Message) void { +pub fn receive(destination: *Message) void { _ = syscall1(Syscall.receive, @ptrToInt(destination)); } -pub fn subscribeIRQ(irq: u8, mailbox_id: &const MailboxId) void { +pub fn subscribeIRQ(irq: u8, mailbox_id: *const MailboxId) void { _ = syscall2(Syscall.subscribeIRQ, irq, @ptrToInt(mailbox_id)); } diff --git a/std/rand/index.zig b/std/rand/index.zig index c32309a0fd..3a1a559cd9 100644 --- a/std/rand/index.zig +++ b/std/rand/index.zig @@ -28,15 +28,15 @@ pub const DefaultPrng = Xoroshiro128; pub const DefaultCsprng = Isaac64; pub const Random = struct { - fillFn: fn (r: &Random, buf: []u8) void, + fillFn: fn (r: *Random, buf: []u8) void, /// Read random bytes into the specified buffer until fill. - pub fn bytes(r: &Random, buf: []u8) void { + pub fn bytes(r: *Random, buf: []u8) void { r.fillFn(r, buf); } /// Return a random integer/boolean type. - pub fn scalar(r: &Random, comptime T: type) T { + pub fn scalar(r: *Random, comptime T: type) T { var rand_bytes: [@sizeOf(T)]u8 = undefined; r.bytes(rand_bytes[0..]); @@ -50,7 +50,7 @@ pub const Random = struct { /// Get a random unsigned integer with even distribution between `start` /// inclusive and `end` exclusive. - pub fn range(r: &Random, comptime T: type, start: T, end: T) T { + pub fn range(r: *Random, comptime T: type, start: T, end: T) T { assert(start <= end); if (T.is_signed) { const uint = @IntType(false, T.bit_count); @@ -92,7 +92,7 @@ pub const Random = struct { } /// Return a floating point value evenly distributed in the range [0, 1). - pub fn float(r: &Random, comptime T: type) T { + pub fn float(r: *Random, comptime T: type) T { // Generate a uniform value between [1, 2) and scale down to [0, 1). // Note: The lowest mantissa bit is always set to 0 so we only use half the available range. switch (T) { @@ -113,7 +113,7 @@ pub const Random = struct { /// Return a floating point value normally distributed with mean = 0, stddev = 1. /// /// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean. - pub fn floatNorm(r: &Random, comptime T: type) T { + pub fn floatNorm(r: *Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.NormDist); switch (T) { f32 => return f32(value), @@ -125,7 +125,7 @@ pub const Random = struct { /// Return an exponentially distributed float with a rate parameter of 1. /// /// To use a different rate parameter, use: floatExp(...) / desiredRate. - pub fn floatExp(r: &Random, comptime T: type) T { + pub fn floatExp(r: *Random, comptime T: type) T { const value = ziggurat.next_f64(r, ziggurat.ExpDist); switch (T) { f32 => return f32(value), @@ -135,7 +135,7 @@ pub const Random = struct { } /// Shuffle a slice into a random order. - pub fn shuffle(r: &Random, comptime T: type, buf: []T) void { + pub fn shuffle(r: *Random, comptime T: type, buf: []T) void { if (buf.len < 2) { return; } @@ -159,7 +159,7 @@ const SplitMix64 = struct { return SplitMix64{ .s = seed }; } - pub fn next(self: &SplitMix64) u64 { + pub fn next(self: *SplitMix64) u64 { self.s +%= 0x9e3779b97f4a7c15; var z = self.s; @@ -208,7 +208,7 @@ pub const Pcg = struct { return pcg; } - fn next(self: &Pcg) u32 { + fn next(self: *Pcg) u32 { const l = self.s; self.s = l *% default_multiplier +% (self.i | 1); @@ -218,13 +218,13 @@ pub const Pcg = struct { return (xor_s >> u5(rot)) | (xor_s << u5((0 -% rot) & 31)); } - fn seed(self: &Pcg, init_s: u64) void { + fn seed(self: *Pcg, init_s: u64) void { // Pcg requires 128-bits of seed. var gen = SplitMix64.init(init_s); self.seedTwo(gen.next(), gen.next()); } - fn seedTwo(self: &Pcg, init_s: u64, init_i: u64) void { + fn seedTwo(self: *Pcg, init_s: u64, init_i: u64) void { self.s = 0; self.i = (init_s << 1) | 1; self.s = self.s *% default_multiplier +% self.i; @@ -232,7 +232,7 @@ pub const Pcg = struct { self.s = self.s *% default_multiplier +% self.i; } - fn fill(r: &Random, buf: []u8) void { + fn fill(r: *Random, buf: []u8) void { const self = @fieldParentPtr(Pcg, "random", r); var i: usize = 0; @@ -297,7 +297,7 @@ pub const Xoroshiro128 = struct { return x; } - fn next(self: &Xoroshiro128) u64 { + fn next(self: *Xoroshiro128) u64 { const s0 = self.s[0]; var s1 = self.s[1]; const r = s0 +% s1; @@ -310,7 +310,7 @@ pub const Xoroshiro128 = struct { } // Skip 2^64 places ahead in the sequence - fn jump(self: &Xoroshiro128) void { + fn jump(self: *Xoroshiro128) void { var s0: u64 = 0; var s1: u64 = 0; @@ -334,7 +334,7 @@ pub const Xoroshiro128 = struct { self.s[1] = s1; } - fn seed(self: &Xoroshiro128, init_s: u64) void { + fn seed(self: *Xoroshiro128, init_s: u64) void { // Xoroshiro requires 128-bits of seed. var gen = SplitMix64.init(init_s); @@ -342,7 +342,7 @@ pub const Xoroshiro128 = struct { self.s[1] = gen.next(); } - fn fill(r: &Random, buf: []u8) void { + fn fill(r: *Random, buf: []u8) void { const self = @fieldParentPtr(Xoroshiro128, "random", r); var i: usize = 0; @@ -435,7 +435,7 @@ pub const Isaac64 = struct { return isaac; } - fn step(self: &Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void { + fn step(self: *Isaac64, mix: u64, base: usize, comptime m1: usize, comptime m2: usize) void { const x = self.m[base + m1]; self.a = mix +% self.m[base + m2]; @@ -446,7 +446,7 @@ pub const Isaac64 = struct { self.r[self.r.len - 1 - base - m1] = self.b; } - fn refill(self: &Isaac64) void { + fn refill(self: *Isaac64) void { const midpoint = self.r.len / 2; self.c +%= 1; @@ -475,7 +475,7 @@ pub const Isaac64 = struct { self.i = 0; } - fn next(self: &Isaac64) u64 { + fn next(self: *Isaac64) u64 { if (self.i >= self.r.len) { self.refill(); } @@ -485,7 +485,7 @@ pub const Isaac64 = struct { return value; } - fn seed(self: &Isaac64, init_s: u64, comptime rounds: usize) void { + fn seed(self: *Isaac64, init_s: u64, comptime rounds: usize) void { // We ignore the multi-pass requirement since we don't currently expose full access to // seeding the self.m array completely. mem.set(u64, self.m[0..], 0); @@ -551,7 +551,7 @@ pub const Isaac64 = struct { self.i = self.r.len; // trigger refill on first value } - fn fill(r: &Random, buf: []u8) void { + fn fill(r: *Random, buf: []u8) void { const self = @fieldParentPtr(Isaac64, "random", r); var i: usize = 0; @@ -666,7 +666,7 @@ test "Random range" { testRange(&prng.random, 10, 14); } -fn testRange(r: &Random, start: i32, end: i32) void { +fn testRange(r: *Random, start: i32, end: i32) void { const count = usize(end - start); var values_buffer = []bool{false} ** 20; const values = values_buffer[0..count]; diff --git a/std/rand/ziggurat.zig b/std/rand/ziggurat.zig index 7daeb59165..774d3bd52a 100644 --- a/std/rand/ziggurat.zig +++ b/std/rand/ziggurat.zig @@ -12,7 +12,7 @@ const std = @import("../index.zig"); const math = std.math; const Random = std.rand.Random; -pub fn next_f64(random: &Random, comptime tables: &const ZigTable) f64 { +pub fn next_f64(random: *Random, comptime tables: *const ZigTable) f64 { while (true) { // We manually construct a float from parts as we can avoid an extra random lookup here by // using the unused exponent for the lookup table entry. @@ -60,7 +60,7 @@ pub const ZigTable = struct { // whether the distribution is symmetric is_symmetric: bool, // fallback calculation in the case we are in the 0 block - zero_case: fn (&Random, f64) f64, + zero_case: fn (*Random, f64) f64, }; // zigNorInit @@ -70,7 +70,7 @@ fn ZigTableGen( comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, - comptime zero_case: fn (&Random, f64) f64, + comptime zero_case: fn (*Random, f64) f64, ) ZigTable { var tables: ZigTable = undefined; @@ -110,7 +110,7 @@ fn norm_f(x: f64) f64 { fn norm_f_inv(y: f64) f64 { return math.sqrt(-2.0 * math.ln(y)); } -fn norm_zero_case(random: &Random, u: f64) f64 { +fn norm_zero_case(random: *Random, u: f64) f64 { var x: f64 = 1; var y: f64 = 0; @@ -149,7 +149,7 @@ fn exp_f(x: f64) f64 { fn exp_f_inv(y: f64) f64 { return -math.ln(y); } -fn exp_zero_case(random: &Random, _: f64) f64 { +fn exp_zero_case(random: *Random, _: f64) f64 { return exp_r - math.ln(random.float(f64)); } diff --git a/std/segmented_list.zig b/std/segmented_list.zig index d755135fe8..be9a2071a0 100644 --- a/std/segmented_list.zig +++ b/std/segmented_list.zig @@ -87,49 +87,49 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type const ShelfIndex = std.math.Log2Int(usize); prealloc_segment: [prealloc_item_count]T, - dynamic_segments: []&T, - allocator: &Allocator, + dynamic_segments: []*T, + allocator: *Allocator, len: usize, pub const prealloc_count = prealloc_item_count; /// Deinitialize with `deinit` - pub fn init(allocator: &Allocator) Self { + pub fn init(allocator: *Allocator) Self { return Self{ .allocator = allocator, .len = 0, .prealloc_segment = undefined, - .dynamic_segments = []&T{}, + .dynamic_segments = []*T{}, }; } - pub fn deinit(self: &Self) void { + pub fn deinit(self: *Self) void { self.freeShelves(ShelfIndex(self.dynamic_segments.len), 0); self.allocator.free(self.dynamic_segments); self.* = undefined; } - pub fn at(self: &Self, i: usize) &T { + pub fn at(self: *Self, i: usize) *T { assert(i < self.len); return self.uncheckedAt(i); } - pub fn count(self: &const Self) usize { + pub fn count(self: *const Self) usize { return self.len; } - pub fn push(self: &Self, item: &const T) !void { + pub fn push(self: *Self, item: *const T) !void { const new_item_ptr = try self.addOne(); new_item_ptr.* = item.*; } - pub fn pushMany(self: &Self, items: []const T) !void { + pub fn pushMany(self: *Self, items: []const T) !void { for (items) |item| { try self.push(item); } } - pub fn pop(self: &Self) ?T { + pub fn pop(self: *Self) ?T { if (self.len == 0) return null; const index = self.len - 1; @@ -138,7 +138,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return result; } - pub fn addOne(self: &Self) !&T { + pub fn addOne(self: *Self) !*T { const new_length = self.len + 1; try self.growCapacity(new_length); const result = self.uncheckedAt(self.len); @@ -147,7 +147,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } /// Grows or shrinks capacity to match usage. - pub fn setCapacity(self: &Self, new_capacity: usize) !void { + pub fn setCapacity(self: *Self, new_capacity: usize) !void { if (new_capacity <= usize(1) << (prealloc_exp + self.dynamic_segments.len)) { return self.shrinkCapacity(new_capacity); } else { @@ -156,15 +156,15 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } /// Only grows capacity, or retains current capacity - pub fn growCapacity(self: &Self, new_capacity: usize) !void { + pub fn growCapacity(self: *Self, new_capacity: usize) !void { const new_cap_shelf_count = shelfCount(new_capacity); const old_shelf_count = ShelfIndex(self.dynamic_segments.len); if (new_cap_shelf_count > old_shelf_count) { - self.dynamic_segments = try self.allocator.realloc(&T, self.dynamic_segments, new_cap_shelf_count); + self.dynamic_segments = try self.allocator.realloc(*T, self.dynamic_segments, new_cap_shelf_count); var i = old_shelf_count; errdefer { self.freeShelves(i, old_shelf_count); - self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, old_shelf_count); + self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, old_shelf_count); } while (i < new_cap_shelf_count) : (i += 1) { self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr; @@ -173,12 +173,12 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } /// Only shrinks capacity or retains current capacity - pub fn shrinkCapacity(self: &Self, new_capacity: usize) void { + pub fn shrinkCapacity(self: *Self, new_capacity: usize) void { if (new_capacity <= prealloc_item_count) { const len = ShelfIndex(self.dynamic_segments.len); self.freeShelves(len, 0); self.allocator.free(self.dynamic_segments); - self.dynamic_segments = []&T{}; + self.dynamic_segments = []*T{}; return; } @@ -190,10 +190,10 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } self.freeShelves(old_shelf_count, new_cap_shelf_count); - self.dynamic_segments = self.allocator.shrink(&T, self.dynamic_segments, new_cap_shelf_count); + self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, new_cap_shelf_count); } - pub fn uncheckedAt(self: &Self, index: usize) &T { + pub fn uncheckedAt(self: *Self, index: usize) *T { if (index < prealloc_item_count) { return &self.prealloc_segment[index]; } @@ -230,7 +230,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return list_index + prealloc_item_count - (usize(1) << ((prealloc_exp + 1) + shelf_index)); } - fn freeShelves(self: &Self, from_count: ShelfIndex, to_count: ShelfIndex) void { + fn freeShelves(self: *Self, from_count: ShelfIndex, to_count: ShelfIndex) void { var i = from_count; while (i != to_count) { i -= 1; @@ -239,13 +239,13 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } pub const Iterator = struct { - list: &Self, + list: *Self, index: usize, box_index: usize, shelf_index: ShelfIndex, shelf_size: usize, - pub fn next(it: &Iterator) ?&T { + pub fn next(it: *Iterator) ?*T { if (it.index >= it.list.len) return null; if (it.index < prealloc_item_count) { const ptr = &it.list.prealloc_segment[it.index]; @@ -269,7 +269,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return ptr; } - pub fn prev(it: &Iterator) ?&T { + pub fn prev(it: *Iterator) ?*T { if (it.index == 0) return null; it.index -= 1; @@ -286,7 +286,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return &it.list.dynamic_segments[it.shelf_index][it.box_index]; } - pub fn peek(it: &Iterator) ?&T { + pub fn peek(it: *Iterator) ?*T { if (it.index >= it.list.len) return null; if (it.index < prealloc_item_count) @@ -295,7 +295,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type return &it.list.dynamic_segments[it.shelf_index][it.box_index]; } - pub fn set(it: &Iterator, index: usize) void { + pub fn set(it: *Iterator, index: usize) void { it.index = index; if (index < prealloc_item_count) return; it.shelf_index = shelfIndex(index); @@ -304,7 +304,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } }; - pub fn iterator(self: &Self, start_index: usize) Iterator { + pub fn iterator(self: *Self, start_index: usize) Iterator { var it = Iterator{ .list = self, .index = undefined, @@ -331,7 +331,7 @@ test "std.SegmentedList" { try testSegmentedList(16, a); } -fn testSegmentedList(comptime prealloc: usize, allocator: &Allocator) !void { +fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void { var list = SegmentedList(i32, prealloc).init(allocator); defer list.deinit(); diff --git a/std/sort.zig b/std/sort.zig index 4e17718241..1b44c18dd9 100644 --- a/std/sort.zig +++ b/std/sort.zig @@ -5,7 +5,7 @@ const math = std.math; const builtin = @import("builtin"); /// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required). -pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void { +pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void { { var i: usize = 1; while (i < items.len) : (i += 1) { @@ -30,7 +30,7 @@ const Range = struct { }; } - fn length(self: &const Range) usize { + fn length(self: *const Range) usize { return self.end - self.start; } }; @@ -58,12 +58,12 @@ const Iterator = struct { }; } - fn begin(self: &Iterator) void { + fn begin(self: *Iterator) void { self.numerator = 0; self.decimal = 0; } - fn nextRange(self: &Iterator) Range { + fn nextRange(self: *Iterator) Range { const start = self.decimal; self.decimal += self.decimal_step; @@ -79,11 +79,11 @@ const Iterator = struct { }; } - fn finished(self: &Iterator) bool { + fn finished(self: *Iterator) bool { return self.decimal >= self.size; } - fn nextLevel(self: &Iterator) bool { + fn nextLevel(self: *Iterator) bool { self.decimal_step += self.decimal_step; self.numerator_step += self.numerator_step; if (self.numerator_step >= self.denominator) { @@ -94,7 +94,7 @@ const Iterator = struct { return (self.decimal_step < self.size); } - fn length(self: &Iterator) usize { + fn length(self: *Iterator) usize { return self.decimal_step; } }; @@ -108,7 +108,7 @@ const Pull = struct { /// Stable in-place sort. O(n) best case, O(n*log(n)) worst case and average case. O(1) memory (no allocator required). /// Currently implemented as block sort. -pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) void { +pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) void { // Implementation ported from https://github.com/BonzaiThePenguin/WikiSort/blob/master/WikiSort.c var cache: [512]T = undefined; @@ -741,7 +741,7 @@ pub fn sort(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &con } // merge operation without a buffer -fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const Range, lessThan: fn (&const T, &const T) bool) void { +fn mergeInPlace(comptime T: type, items: []T, A_arg: *const Range, B_arg: *const Range, lessThan: fn (*const T, *const T) bool) void { if (A_arg.length() == 0 or B_arg.length() == 0) return; // this just repeatedly binary searches into B and rotates A into position. @@ -783,7 +783,7 @@ fn mergeInPlace(comptime T: type, items: []T, A_arg: &const Range, B_arg: &const } // merge operation using an internal buffer -fn mergeInternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, buffer: &const Range) void { +fn mergeInternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, buffer: *const Range) void { // whenever we find a value to add to the final array, swap it with the value that's already in that spot // when this algorithm is finished, 'buffer' will contain its original contents, but in a different order var A_count: usize = 0; @@ -819,7 +819,7 @@ fn blockSwap(comptime T: type, items: []T, start1: usize, start2: usize, block_s // combine a linear search with a binary search to reduce the number of comparisons in situations // where have some idea as to how many unique values there are and where the next value might be -fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize { +fn findFirstForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, usize(1)); @@ -833,7 +833,7 @@ fn findFirstForward(comptime T: type, items: []T, value: &const T, range: &const return binaryFirst(T, items, value, Range.init(index - skip, index), lessThan); } -fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize { +fn findFirstBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, usize(1)); @@ -847,7 +847,7 @@ fn findFirstBackward(comptime T: type, items: []T, value: &const T, range: &cons return binaryFirst(T, items, value, Range.init(index, index + skip), lessThan); } -fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize { +fn findLastForward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, usize(1)); @@ -861,7 +861,7 @@ fn findLastForward(comptime T: type, items: []T, value: &const T, range: &const return binaryLast(T, items, value, Range.init(index - skip, index), lessThan); } -fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool, unique: usize) usize { +fn findLastBackward(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool, unique: usize) usize { if (range.length() == 0) return range.start; const skip = math.max(range.length() / unique, usize(1)); @@ -875,7 +875,7 @@ fn findLastBackward(comptime T: type, items: []T, value: &const T, range: &const return binaryLast(T, items, value, Range.init(index, index + skip), lessThan); } -fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize { +fn binaryFirst(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize { var start = range.start; var end = range.end - 1; if (range.start >= range.end) return range.end; @@ -893,7 +893,7 @@ fn binaryFirst(comptime T: type, items: []T, value: &const T, range: &const Rang return start; } -fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range, lessThan: fn (&const T, &const T) bool) usize { +fn binaryLast(comptime T: type, items: []T, value: *const T, range: *const Range, lessThan: fn (*const T, *const T) bool) usize { var start = range.start; var end = range.end - 1; if (range.start >= range.end) return range.end; @@ -911,7 +911,7 @@ fn binaryLast(comptime T: type, items: []T, value: &const T, range: &const Range return start; } -fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, into: []T) void { +fn mergeInto(comptime T: type, from: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, into: []T) void { var A_index: usize = A.start; var B_index: usize = B.start; const A_last = A.end; @@ -941,7 +941,7 @@ fn mergeInto(comptime T: type, from: []T, A: &const Range, B: &const Range, less } } -fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, lessThan: fn (&const T, &const T) bool, cache: []T) void { +fn mergeExternal(comptime T: type, items: []T, A: *const Range, B: *const Range, lessThan: fn (*const T, *const T) bool, cache: []T) void { // A fits into the cache, so use that instead of the internal buffer var A_index: usize = 0; var B_index: usize = B.start; @@ -969,26 +969,26 @@ fn mergeExternal(comptime T: type, items: []T, A: &const Range, B: &const Range, mem.copy(T, items[insert_index..], cache[A_index..A_last]); } -fn swap(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool, order: &[8]u8, x: usize, y: usize) void { +fn swap(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool, order: *[8]u8, x: usize, y: usize) void { if (lessThan(items[y], items[x]) or ((order.*)[x] > (order.*)[y] and !lessThan(items[x], items[y]))) { mem.swap(T, &items[x], &items[y]); mem.swap(u8, &(order.*)[x], &(order.*)[y]); } } -fn i32asc(lhs: &const i32, rhs: &const i32) bool { +fn i32asc(lhs: *const i32, rhs: *const i32) bool { return lhs.* < rhs.*; } -fn i32desc(lhs: &const i32, rhs: &const i32) bool { +fn i32desc(lhs: *const i32, rhs: *const i32) bool { return rhs.* < lhs.*; } -fn u8asc(lhs: &const u8, rhs: &const u8) bool { +fn u8asc(lhs: *const u8, rhs: *const u8) bool { return lhs.* < rhs.*; } -fn u8desc(lhs: &const u8, rhs: &const u8) bool { +fn u8desc(lhs: *const u8, rhs: *const u8) bool { return rhs.* < lhs.*; } @@ -1125,7 +1125,7 @@ const IdAndValue = struct { id: usize, value: i32, }; -fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) bool { +fn cmpByValue(a: *const IdAndValue, b: *const IdAndValue) bool { return i32asc(a.value, b.value); } @@ -1324,7 +1324,7 @@ test "sort fuzz testing" { var fixed_buffer_mem: [100 * 1024]u8 = undefined; -fn fuzzTest(rng: &std.rand.Random) void { +fn fuzzTest(rng: *std.rand.Random) void { const array_size = rng.range(usize, 0, 1000); var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable; @@ -1345,7 +1345,7 @@ fn fuzzTest(rng: &std.rand.Random) void { } } -pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T { +pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T { var i: usize = 0; var smallest = items[0]; for (items[1..]) |item| { @@ -1356,7 +1356,7 @@ pub fn min(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &cons return smallest; } -pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: &const T, rhs: &const T) bool) T { +pub fn max(comptime T: type, items: []T, lessThan: fn (lhs: *const T, rhs: *const T) bool) T { var i: usize = 0; var biggest = items[0]; for (items[1..]) |item| { diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig index c10f4aa806..5ed7874ca5 100644 --- a/std/special/bootstrap.zig +++ b/std/special/bootstrap.zig @@ -5,7 +5,7 @@ const root = @import("@root"); const std = @import("std"); const builtin = @import("builtin"); -var argc_ptr: &usize = undefined; +var argc_ptr: *usize = undefined; comptime { const strong_linkage = builtin.GlobalLinkage.Strong; @@ -28,12 +28,12 @@ nakedcc fn _start() noreturn { switch (builtin.arch) { builtin.Arch.x86_64 => { argc_ptr = asm ("lea (%%rsp), %[argc]" - : [argc] "=r" (-> &usize) + : [argc] "=r" (-> *usize) ); }, builtin.Arch.i386 => { argc_ptr = asm ("lea (%%esp), %[argc]" - : [argc] "=r" (-> &usize) + : [argc] "=r" (-> *usize) ); }, else => @compileError("unsupported arch"), @@ -51,13 +51,13 @@ extern fn WinMainCRTStartup() noreturn { fn posixCallMainAndExit() noreturn { const argc = argc_ptr.*; - const argv = @ptrCast(&&u8, &argc_ptr[1]); - const envp_nullable = @ptrCast(&?&u8, &argv[argc + 1]); + const argv = @ptrCast(**u8, &argc_ptr[1]); + const envp_nullable = @ptrCast(*?*u8, &argv[argc + 1]); var envp_count: usize = 0; while (envp_nullable[envp_count]) |_| : (envp_count += 1) {} - const envp = @ptrCast(&&u8, envp_nullable)[0..envp_count]; + const envp = @ptrCast(**u8, envp_nullable)[0..envp_count]; if (builtin.os == builtin.Os.linux) { - const auxv = &@ptrCast(&usize, envp.ptr)[envp_count + 1]; + const auxv = &@ptrCast(*usize, envp.ptr)[envp_count + 1]; var i: usize = 0; while (auxv[i] != 0) : (i += 2) { if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1]; @@ -68,16 +68,16 @@ fn posixCallMainAndExit() noreturn { std.os.posix.exit(callMainWithArgs(argc, argv, envp)); } -fn callMainWithArgs(argc: usize, argv: &&u8, envp: []&u8) u8 { +fn callMainWithArgs(argc: usize, argv: **u8, envp: []*u8) u8 { std.os.ArgIteratorPosix.raw = argv[0..argc]; std.os.posix_environ_raw = envp; return callMain(); } -extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) i32 { +extern fn main(c_argc: i32, c_argv: **u8, c_envp: *?*u8) i32 { var env_count: usize = 0; while (c_envp[env_count] != null) : (env_count += 1) {} - const envp = @ptrCast(&&u8, c_envp)[0..env_count]; + const envp = @ptrCast(**u8, c_envp)[0..env_count]; return callMainWithArgs(usize(c_argc), c_argv, envp); } diff --git a/std/special/build_file_template.zig b/std/special/build_file_template.zig index 1c06c93cdc..1e3eb01136 100644 --- a/std/special/build_file_template.zig +++ b/std/special/build_file_template.zig @@ -1,10 +1,10 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const mode = b.standardReleaseOptions(); const exe = b.addExecutable("YOUR_NAME_HERE", "src/main.zig"); exe.setBuildMode(mode); - b.default_step.dependOn(&exe.step); + b.default_step.dependOn(*exe.step); b.installArtifact(exe); } diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig index 3ff11bbee4..3471d6ed21 100644 --- a/std/special/build_runner.zig +++ b/std/special/build_runner.zig @@ -129,7 +129,7 @@ pub fn main() !void { }; } -fn runBuild(builder: &Builder) error!void { +fn runBuild(builder: *Builder) error!void { switch (@typeId(@typeOf(root.build).ReturnType)) { builtin.TypeId.Void => root.build(builder), builtin.TypeId.ErrorUnion => try root.build(builder), @@ -137,7 +137,7 @@ fn runBuild(builder: &Builder) error!void { } } -fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void { +fn usage(builder: *Builder, already_ran_build: bool, out_stream: var) !void { // run the build script to collect the options if (!already_ran_build) { builder.setInstallPrefix(null); @@ -195,7 +195,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: var) !void { ); } -fn usageAndErr(builder: &Builder, already_ran_build: bool, out_stream: var) error { +fn usageAndErr(builder: *Builder, already_ran_build: bool, out_stream: var) error { usage(builder, already_ran_build, out_stream) catch {}; return error.InvalidArgs; } diff --git a/std/special/builtin.zig b/std/special/builtin.zig index 63149d5161..9c9cd35103 100644 --- a/std/special/builtin.zig +++ b/std/special/builtin.zig @@ -5,7 +5,7 @@ const builtin = @import("builtin"); // Avoid dragging in the runtime safety mechanisms into this .o file, // unless we're trying to test this file. -pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn { +pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn { if (builtin.is_test) { @setCold(true); @import("std").debug.panic("{}", msg); @@ -14,7 +14,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn } } -export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 { +export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 { @setRuntimeSafety(false); var index: usize = 0; @@ -24,7 +24,7 @@ export fn memset(dest: ?&u8, c: u8, n: usize) ?&u8 { return dest; } -export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 { +export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 { @setRuntimeSafety(false); var index: usize = 0; @@ -34,7 +34,7 @@ export fn memcpy(noalias dest: ?&u8, noalias src: ?&const u8, n: usize) ?&u8 { return dest; } -export fn memmove(dest: ?&u8, src: ?&const u8, n: usize) ?&u8 { +export fn memmove(dest: ?*u8, src: ?*const u8, n: usize) ?*u8 { @setRuntimeSafety(false); if (@ptrToInt(dest) < @ptrToInt(src)) { diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig index 3e014d4d16..d328324320 100644 --- a/std/special/compiler_rt/index.zig +++ b/std/special/compiler_rt/index.zig @@ -78,7 +78,7 @@ const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4; // Avoid dragging in the runtime safety mechanisms into this .o file, // unless we're trying to test this file. -pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn { +pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn { @setCold(true); if (is_test) { std.debug.panic("{}", msg); @@ -284,7 +284,7 @@ nakedcc fn ___chkstk_ms() align(4) void { ); } -extern fn __udivmodsi4(a: u32, b: u32, rem: &u32) u32 { +extern fn __udivmodsi4(a: u32, b: u32, rem: *u32) u32 { @setRuntimeSafety(is_test); const d = __udivsi3(a, b); diff --git a/std/special/compiler_rt/udivmod.zig b/std/special/compiler_rt/udivmod.zig index 0dee5e45f6..894dd02239 100644 --- a/std/special/compiler_rt/udivmod.zig +++ b/std/special/compiler_rt/udivmod.zig @@ -7,15 +7,15 @@ const low = switch (builtin.endian) { }; const high = 1 - low; -pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?&DoubleInt) DoubleInt { +pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt { @setRuntimeSafety(is_test); const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2)); const SignedDoubleInt = @IntType(true, DoubleInt.bit_count); const Log2SingleInt = @import("std").math.Log2Int(SingleInt); - const n = @ptrCast(&const [2]SingleInt, &a).*; // TODO issue #421 - const d = @ptrCast(&const [2]SingleInt, &b).*; // TODO issue #421 + const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421 + const d = @ptrCast(*const [2]SingleInt, &b).*; // TODO issue #421 var q: [2]SingleInt = undefined; var r: [2]SingleInt = undefined; var sr: c_uint = undefined; @@ -57,7 +57,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: if (maybe_rem) |rem| { r[high] = n[high] % d[high]; r[low] = 0; - rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 + rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 } return n[high] / d[high]; } @@ -69,7 +69,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: if (maybe_rem) |rem| { r[low] = n[low]; r[high] = n[high] & (d[high] - 1); - rem.* = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 + rem.* = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 } return n[high] >> Log2SingleInt(@ctz(d[high])); } @@ -109,7 +109,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: sr = @ctz(d[low]); q[high] = n[high] >> Log2SingleInt(sr); q[low] = (n[high] << Log2SingleInt(SingleInt.bit_count - sr)) | (n[low] >> Log2SingleInt(sr)); - return @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421 + return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421 } // K X // --- @@ -183,13 +183,13 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: // r.all -= b; // carry = 1; // } - r_all = @ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 + r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421 const s: SignedDoubleInt = SignedDoubleInt(b -% r_all -% 1) >> (DoubleInt.bit_count - 1); carry = u32(s & 1); r_all -= b & @bitCast(DoubleInt, s); - r = @ptrCast(&[2]SingleInt, &r_all).*; // TODO issue #421 + r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421 } - const q_all = ((@ptrCast(&align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421 + const q_all = ((@ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*) << 1) | carry; // TODO issue #421 if (maybe_rem) |rem| { rem.* = r_all; } diff --git a/std/special/compiler_rt/udivmoddi4.zig b/std/special/compiler_rt/udivmoddi4.zig index 6cc54bb6bf..de86c845e5 100644 --- a/std/special/compiler_rt/udivmoddi4.zig +++ b/std/special/compiler_rt/udivmoddi4.zig @@ -1,7 +1,7 @@ const udivmod = @import("udivmod.zig").udivmod; const builtin = @import("builtin"); -pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?&u64) u64 { +pub extern fn __udivmoddi4(a: u64, b: u64, maybe_rem: ?*u64) u64 { @setRuntimeSafety(builtin.is_test); return udivmod(u64, a, b, maybe_rem); } diff --git a/std/special/compiler_rt/udivmodti4.zig b/std/special/compiler_rt/udivmodti4.zig index 816f82b900..3fa596442f 100644 --- a/std/special/compiler_rt/udivmodti4.zig +++ b/std/special/compiler_rt/udivmodti4.zig @@ -2,12 +2,12 @@ const udivmod = @import("udivmod.zig").udivmod; const builtin = @import("builtin"); const compiler_rt = @import("index.zig"); -pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?&u128) u128 { +pub extern fn __udivmodti4(a: u128, b: u128, maybe_rem: ?*u128) u128 { @setRuntimeSafety(builtin.is_test); return udivmod(u128, a, b, maybe_rem); } -pub extern fn __udivmodti4_windows_x86_64(a: &const u128, b: &const u128, maybe_rem: ?&u128) void { +pub extern fn __udivmodti4_windows_x86_64(a: *const u128, b: *const u128, maybe_rem: ?*u128) void { @setRuntimeSafety(builtin.is_test); compiler_rt.setXmm0(u128, udivmod(u128, a.*, b.*, maybe_rem)); } diff --git a/std/special/compiler_rt/udivti3.zig b/std/special/compiler_rt/udivti3.zig index ad0f09e733..510e21ac1d 100644 --- a/std/special/compiler_rt/udivti3.zig +++ b/std/special/compiler_rt/udivti3.zig @@ -6,7 +6,7 @@ pub extern fn __udivti3(a: u128, b: u128) u128 { return udivmodti4.__udivmodti4(a, b, null); } -pub extern fn __udivti3_windows_x86_64(a: &const u128, b: &const u128) void { +pub extern fn __udivti3_windows_x86_64(a: *const u128, b: *const u128) void { @setRuntimeSafety(builtin.is_test); udivmodti4.__udivmodti4_windows_x86_64(a, b, null); } diff --git a/std/special/compiler_rt/umodti3.zig b/std/special/compiler_rt/umodti3.zig index 11e2955bb3..9551e63a6f 100644 --- a/std/special/compiler_rt/umodti3.zig +++ b/std/special/compiler_rt/umodti3.zig @@ -9,7 +9,7 @@ pub extern fn __umodti3(a: u128, b: u128) u128 { return r; } -pub extern fn __umodti3_windows_x86_64(a: &const u128, b: &const u128) void { +pub extern fn __umodti3_windows_x86_64(a: *const u128, b: *const u128) void { @setRuntimeSafety(builtin.is_test); compiler_rt.setXmm0(u128, __umodti3(a.*, b.*)); } diff --git a/std/special/panic.zig b/std/special/panic.zig index 8f933ddd97..ca1caea73c 100644 --- a/std/special/panic.zig +++ b/std/special/panic.zig @@ -6,7 +6,7 @@ const builtin = @import("builtin"); const std = @import("std"); -pub fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) noreturn { +pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn { @setCold(true); switch (builtin.os) { // TODO: fix panic in zen. diff --git a/std/unicode.zig b/std/unicode.zig index 36f04778f4..3d1bebdb55 100644 --- a/std/unicode.zig +++ b/std/unicode.zig @@ -196,7 +196,7 @@ pub const Utf8View = struct { } } - pub fn iterator(s: &const Utf8View) Utf8Iterator { + pub fn iterator(s: *const Utf8View) Utf8Iterator { return Utf8Iterator{ .bytes = s.bytes, .i = 0, @@ -208,7 +208,7 @@ const Utf8Iterator = struct { bytes: []const u8, i: usize, - pub fn nextCodepointSlice(it: &Utf8Iterator) ?[]const u8 { + pub fn nextCodepointSlice(it: *Utf8Iterator) ?[]const u8 { if (it.i >= it.bytes.len) { return null; } @@ -219,7 +219,7 @@ const Utf8Iterator = struct { return it.bytes[it.i - cp_len .. it.i]; } - pub fn nextCodepoint(it: &Utf8Iterator) ?u32 { + pub fn nextCodepoint(it: *Utf8Iterator) ?u32 { const slice = it.nextCodepointSlice() ?? return null; switch (slice.len) { diff --git a/std/zig/ast.zig b/std/zig/ast.zig index 56d4f9c393..4d25ceb7db 100644 --- a/std/zig/ast.zig +++ b/std/zig/ast.zig @@ -9,26 +9,26 @@ pub const TokenIndex = usize; pub const Tree = struct { source: []const u8, tokens: TokenList, - root_node: &Node.Root, + root_node: *Node.Root, arena_allocator: std.heap.ArenaAllocator, errors: ErrorList, pub const TokenList = SegmentedList(Token, 64); pub const ErrorList = SegmentedList(Error, 0); - pub fn deinit(self: &Tree) void { + pub fn deinit(self: *Tree) void { self.arena_allocator.deinit(); } - pub fn renderError(self: &Tree, parse_error: &Error, stream: var) !void { + pub fn renderError(self: *Tree, parse_error: *Error, stream: var) !void { return parse_error.render(&self.tokens, stream); } - pub fn tokenSlice(self: &Tree, token_index: TokenIndex) []const u8 { + pub fn tokenSlice(self: *Tree, token_index: TokenIndex) []const u8 { return self.tokenSlicePtr(self.tokens.at(token_index)); } - pub fn tokenSlicePtr(self: &Tree, token: &const Token) []const u8 { + pub fn tokenSlicePtr(self: *Tree, token: *const Token) []const u8 { return self.source[token.start..token.end]; } @@ -39,7 +39,7 @@ pub const Tree = struct { line_end: usize, }; - pub fn tokenLocationPtr(self: &Tree, start_index: usize, token: &const Token) Location { + pub fn tokenLocationPtr(self: *Tree, start_index: usize, token: *const Token) Location { var loc = Location{ .line = 0, .column = 0, @@ -64,24 +64,24 @@ pub const Tree = struct { return loc; } - pub fn tokenLocation(self: &Tree, start_index: usize, token_index: TokenIndex) Location { + pub fn tokenLocation(self: *Tree, start_index: usize, token_index: TokenIndex) Location { return self.tokenLocationPtr(start_index, self.tokens.at(token_index)); } - pub fn tokensOnSameLine(self: &Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool { + pub fn tokensOnSameLine(self: *Tree, token1_index: TokenIndex, token2_index: TokenIndex) bool { return self.tokensOnSameLinePtr(self.tokens.at(token1_index), self.tokens.at(token2_index)); } - pub fn tokensOnSameLinePtr(self: &Tree, token1: &const Token, token2: &const Token) bool { + pub fn tokensOnSameLinePtr(self: *Tree, token1: *const Token, token2: *const Token) bool { return mem.indexOfScalar(u8, self.source[token1.end..token2.start], '\n') == null; } - pub fn dump(self: &Tree) void { + pub fn dump(self: *Tree) void { self.root_node.base.dump(0); } /// Skips over comments - pub fn prevToken(self: &Tree, token_index: TokenIndex) TokenIndex { + pub fn prevToken(self: *Tree, token_index: TokenIndex) TokenIndex { var index = token_index - 1; while (self.tokens.at(index).id == Token.Id.LineComment) { index -= 1; @@ -90,7 +90,7 @@ pub const Tree = struct { } /// Skips over comments - pub fn nextToken(self: &Tree, token_index: TokenIndex) TokenIndex { + pub fn nextToken(self: *Tree, token_index: TokenIndex) TokenIndex { var index = token_index + 1; while (self.tokens.at(index).id == Token.Id.LineComment) { index += 1; @@ -120,7 +120,7 @@ pub const Error = union(enum) { ExpectedToken: ExpectedToken, ExpectedCommaOrEnd: ExpectedCommaOrEnd, - pub fn render(self: &const Error, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const Error, tokens: *Tree.TokenList, stream: var) !void { switch (self.*) { // TODO https://github.com/ziglang/zig/issues/683 @TagType(Error).InvalidToken => |*x| return x.render(tokens, stream), @@ -145,7 +145,7 @@ pub const Error = union(enum) { } } - pub fn loc(self: &const Error) TokenIndex { + pub fn loc(self: *const Error) TokenIndex { switch (self.*) { // TODO https://github.com/ziglang/zig/issues/683 @TagType(Error).InvalidToken => |x| return x.token, @@ -188,17 +188,17 @@ pub const Error = union(enum) { pub const ExtraVolatileQualifier = SimpleError("Extra volatile qualifier"); pub const ExpectedCall = struct { - node: &Node, + node: *Node, - pub fn render(self: &const ExpectedCall, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ExpectedCall, tokens: *Tree.TokenList, stream: var) !void { return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ ", found {}", @tagName(self.node.id)); } }; pub const ExpectedCallOrFnProto = struct { - node: &Node, + node: *Node, - pub fn render(self: &const ExpectedCallOrFnProto, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ExpectedCallOrFnProto, tokens: *Tree.TokenList, stream: var) !void { return stream.print("expected " ++ @tagName(@TagType(Node.SuffixOp.Op).Call) ++ " or " ++ @tagName(Node.Id.FnProto) ++ ", found {}", @tagName(self.node.id)); } }; @@ -207,7 +207,7 @@ pub const Error = union(enum) { token: TokenIndex, expected_id: @TagType(Token.Id), - pub fn render(self: &const ExpectedToken, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ExpectedToken, tokens: *Tree.TokenList, stream: var) !void { const token_name = @tagName(tokens.at(self.token).id); return stream.print("expected {}, found {}", @tagName(self.expected_id), token_name); } @@ -217,7 +217,7 @@ pub const Error = union(enum) { token: TokenIndex, end_id: @TagType(Token.Id), - pub fn render(self: &const ExpectedCommaOrEnd, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ExpectedCommaOrEnd, tokens: *Tree.TokenList, stream: var) !void { const token_name = @tagName(tokens.at(self.token).id); return stream.print("expected ',' or {}, found {}", @tagName(self.end_id), token_name); } @@ -229,7 +229,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void { const token_name = @tagName(tokens.at(self.token).id); return stream.print(msg, token_name); } @@ -242,7 +242,7 @@ pub const Error = union(enum) { token: TokenIndex, - pub fn render(self: &const ThisError, tokens: &Tree.TokenList, stream: var) !void { + pub fn render(self: *const ThisError, tokens: *Tree.TokenList, stream: var) !void { return stream.write(msg); } }; @@ -320,14 +320,14 @@ pub const Node = struct { FieldInitializer, }; - pub fn cast(base: &Node, comptime T: type) ?&T { + pub fn cast(base: *Node, comptime T: type) ?*T { if (base.id == comptime typeToId(T)) { return @fieldParentPtr(T, "base", base); } return null; } - pub fn iterate(base: &Node, index: usize) ?&Node { + pub fn iterate(base: *Node, index: usize) ?*Node { comptime var i = 0; inline while (i < @memberCount(Id)) : (i += 1) { if (base.id == @field(Id, @memberName(Id, i))) { @@ -338,7 +338,7 @@ pub const Node = struct { unreachable; } - pub fn firstToken(base: &Node) TokenIndex { + pub fn firstToken(base: *Node) TokenIndex { comptime var i = 0; inline while (i < @memberCount(Id)) : (i += 1) { if (base.id == @field(Id, @memberName(Id, i))) { @@ -349,7 +349,7 @@ pub const Node = struct { unreachable; } - pub fn lastToken(base: &Node) TokenIndex { + pub fn lastToken(base: *Node) TokenIndex { comptime var i = 0; inline while (i < @memberCount(Id)) : (i += 1) { if (base.id == @field(Id, @memberName(Id, i))) { @@ -370,7 +370,7 @@ pub const Node = struct { unreachable; } - pub fn requireSemiColon(base: &const Node) bool { + pub fn requireSemiColon(base: *const Node) bool { var n = base; while (true) { switch (n.id) { @@ -443,7 +443,7 @@ pub const Node = struct { } } - pub fn dump(self: &Node, indent: usize) void { + pub fn dump(self: *Node, indent: usize) void { { var i: usize = 0; while (i < indent) : (i += 1) { @@ -460,44 +460,44 @@ pub const Node = struct { pub const Root = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, decls: DeclList, eof_token: TokenIndex, - pub const DeclList = SegmentedList(&Node, 4); + pub const DeclList = SegmentedList(*Node, 4); - pub fn iterate(self: &Root, index: usize) ?&Node { + pub fn iterate(self: *Root, index: usize) ?*Node { if (index < self.decls.len) { return self.decls.at(index).*; } return null; } - pub fn firstToken(self: &Root) TokenIndex { + pub fn firstToken(self: *Root) TokenIndex { return if (self.decls.len == 0) self.eof_token else (self.decls.at(0).*).firstToken(); } - pub fn lastToken(self: &Root) TokenIndex { + pub fn lastToken(self: *Root) TokenIndex { return if (self.decls.len == 0) self.eof_token else (self.decls.at(self.decls.len - 1).*).lastToken(); } }; pub const VarDecl = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, visib_token: ?TokenIndex, name_token: TokenIndex, eq_token: TokenIndex, mut_token: TokenIndex, comptime_token: ?TokenIndex, extern_export_token: ?TokenIndex, - lib_name: ?&Node, - type_node: ?&Node, - align_node: ?&Node, - init_node: ?&Node, + lib_name: ?*Node, + type_node: ?*Node, + align_node: ?*Node, + init_node: ?*Node, semicolon_token: TokenIndex, - pub fn iterate(self: &VarDecl, index: usize) ?&Node { + pub fn iterate(self: *VarDecl, index: usize) ?*Node { var i = index; if (self.type_node) |type_node| { @@ -518,7 +518,7 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &VarDecl) TokenIndex { + pub fn firstToken(self: *VarDecl) TokenIndex { if (self.visib_token) |visib_token| return visib_token; if (self.comptime_token) |comptime_token| return comptime_token; if (self.extern_export_token) |extern_export_token| return extern_export_token; @@ -526,20 +526,20 @@ pub const Node = struct { return self.mut_token; } - pub fn lastToken(self: &VarDecl) TokenIndex { + pub fn lastToken(self: *VarDecl) TokenIndex { return self.semicolon_token; } }; pub const Use = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, visib_token: ?TokenIndex, use_token: TokenIndex, - expr: &Node, + expr: *Node, semicolon_token: TokenIndex, - pub fn iterate(self: &Use, index: usize) ?&Node { + pub fn iterate(self: *Use, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -548,12 +548,12 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Use) TokenIndex { + pub fn firstToken(self: *Use) TokenIndex { if (self.visib_token) |visib_token| return visib_token; return self.use_token; } - pub fn lastToken(self: &Use) TokenIndex { + pub fn lastToken(self: *Use) TokenIndex { return self.semicolon_token; } }; @@ -564,9 +564,9 @@ pub const Node = struct { decls: DeclList, rbrace_token: TokenIndex, - pub const DeclList = SegmentedList(&Node, 2); + pub const DeclList = SegmentedList(*Node, 2); - pub fn iterate(self: &ErrorSetDecl, index: usize) ?&Node { + pub fn iterate(self: *ErrorSetDecl, index: usize) ?*Node { var i = index; if (i < self.decls.len) return self.decls.at(i).*; @@ -575,11 +575,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &ErrorSetDecl) TokenIndex { + pub fn firstToken(self: *ErrorSetDecl) TokenIndex { return self.error_token; } - pub fn lastToken(self: &ErrorSetDecl) TokenIndex { + pub fn lastToken(self: *ErrorSetDecl) TokenIndex { return self.rbrace_token; } }; @@ -597,11 +597,11 @@ pub const Node = struct { const InitArg = union(enum) { None, - Enum: ?&Node, - Type: &Node, + Enum: ?*Node, + Type: *Node, }; - pub fn iterate(self: &ContainerDecl, index: usize) ?&Node { + pub fn iterate(self: *ContainerDecl, index: usize) ?*Node { var i = index; switch (self.init_arg_expr) { @@ -618,26 +618,26 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &ContainerDecl) TokenIndex { + pub fn firstToken(self: *ContainerDecl) TokenIndex { if (self.layout_token) |layout_token| { return layout_token; } return self.kind_token; } - pub fn lastToken(self: &ContainerDecl) TokenIndex { + pub fn lastToken(self: *ContainerDecl) TokenIndex { return self.rbrace_token; } }; pub const StructField = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, visib_token: ?TokenIndex, name_token: TokenIndex, - type_expr: &Node, + type_expr: *Node, - pub fn iterate(self: &StructField, index: usize) ?&Node { + pub fn iterate(self: *StructField, index: usize) ?*Node { var i = index; if (i < 1) return self.type_expr; @@ -646,24 +646,24 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &StructField) TokenIndex { + pub fn firstToken(self: *StructField) TokenIndex { if (self.visib_token) |visib_token| return visib_token; return self.name_token; } - pub fn lastToken(self: &StructField) TokenIndex { + pub fn lastToken(self: *StructField) TokenIndex { return self.type_expr.lastToken(); } }; pub const UnionTag = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, name_token: TokenIndex, - type_expr: ?&Node, - value_expr: ?&Node, + type_expr: ?*Node, + value_expr: ?*Node, - pub fn iterate(self: &UnionTag, index: usize) ?&Node { + pub fn iterate(self: *UnionTag, index: usize) ?*Node { var i = index; if (self.type_expr) |type_expr| { @@ -679,11 +679,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &UnionTag) TokenIndex { + pub fn firstToken(self: *UnionTag) TokenIndex { return self.name_token; } - pub fn lastToken(self: &UnionTag) TokenIndex { + pub fn lastToken(self: *UnionTag) TokenIndex { if (self.value_expr) |value_expr| { return value_expr.lastToken(); } @@ -697,11 +697,11 @@ pub const Node = struct { pub const EnumTag = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, name_token: TokenIndex, - value: ?&Node, + value: ?*Node, - pub fn iterate(self: &EnumTag, index: usize) ?&Node { + pub fn iterate(self: *EnumTag, index: usize) ?*Node { var i = index; if (self.value) |value| { @@ -712,11 +712,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &EnumTag) TokenIndex { + pub fn firstToken(self: *EnumTag) TokenIndex { return self.name_token; } - pub fn lastToken(self: &EnumTag) TokenIndex { + pub fn lastToken(self: *EnumTag) TokenIndex { if (self.value) |value| { return value.lastToken(); } @@ -727,25 +727,25 @@ pub const Node = struct { pub const ErrorTag = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, name_token: TokenIndex, - pub fn iterate(self: &ErrorTag, index: usize) ?&Node { + pub fn iterate(self: *ErrorTag, index: usize) ?*Node { var i = index; if (self.doc_comments) |comments| { - if (i < 1) return &comments.base; + if (i < 1) return *comments.base; i -= 1; } return null; } - pub fn firstToken(self: &ErrorTag) TokenIndex { + pub fn firstToken(self: *ErrorTag) TokenIndex { return self.name_token; } - pub fn lastToken(self: &ErrorTag) TokenIndex { + pub fn lastToken(self: *ErrorTag) TokenIndex { return self.name_token; } }; @@ -754,15 +754,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &Identifier, index: usize) ?&Node { + pub fn iterate(self: *Identifier, index: usize) ?*Node { return null; } - pub fn firstToken(self: &Identifier) TokenIndex { + pub fn firstToken(self: *Identifier) TokenIndex { return self.token; } - pub fn lastToken(self: &Identifier) TokenIndex { + pub fn lastToken(self: *Identifier) TokenIndex { return self.token; } }; @@ -770,10 +770,10 @@ pub const Node = struct { pub const AsyncAttribute = struct { base: Node, async_token: TokenIndex, - allocator_type: ?&Node, + allocator_type: ?*Node, rangle_bracket: ?TokenIndex, - pub fn iterate(self: &AsyncAttribute, index: usize) ?&Node { + pub fn iterate(self: *AsyncAttribute, index: usize) ?*Node { var i = index; if (self.allocator_type) |allocator_type| { @@ -784,11 +784,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &AsyncAttribute) TokenIndex { + pub fn firstToken(self: *AsyncAttribute) TokenIndex { return self.async_token; } - pub fn lastToken(self: &AsyncAttribute) TokenIndex { + pub fn lastToken(self: *AsyncAttribute) TokenIndex { if (self.rangle_bracket) |rangle_bracket| { return rangle_bracket; } @@ -799,7 +799,7 @@ pub const Node = struct { pub const FnProto = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, visib_token: ?TokenIndex, fn_token: TokenIndex, name_token: ?TokenIndex, @@ -808,19 +808,19 @@ pub const Node = struct { var_args_token: ?TokenIndex, extern_export_inline_token: ?TokenIndex, cc_token: ?TokenIndex, - async_attr: ?&AsyncAttribute, - body_node: ?&Node, - lib_name: ?&Node, // populated if this is an extern declaration - align_expr: ?&Node, // populated if align(A) is present + async_attr: ?*AsyncAttribute, + body_node: ?*Node, + lib_name: ?*Node, // populated if this is an extern declaration + align_expr: ?*Node, // populated if align(A) is present - pub const ParamList = SegmentedList(&Node, 2); + pub const ParamList = SegmentedList(*Node, 2); pub const ReturnType = union(enum) { - Explicit: &Node, - InferErrorSet: &Node, + Explicit: *Node, + InferErrorSet: *Node, }; - pub fn iterate(self: &FnProto, index: usize) ?&Node { + pub fn iterate(self: *FnProto, index: usize) ?*Node { var i = index; if (self.lib_name) |lib_name| { @@ -856,7 +856,7 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &FnProto) TokenIndex { + pub fn firstToken(self: *FnProto) TokenIndex { if (self.visib_token) |visib_token| return visib_token; if (self.extern_export_inline_token) |extern_export_inline_token| return extern_export_inline_token; assert(self.lib_name == null); @@ -864,7 +864,7 @@ pub const Node = struct { return self.fn_token; } - pub fn lastToken(self: &FnProto) TokenIndex { + pub fn lastToken(self: *FnProto) TokenIndex { if (self.body_node) |body_node| return body_node.lastToken(); switch (self.return_type) { // TODO allow this and next prong to share bodies since the types are the same @@ -881,10 +881,10 @@ pub const Node = struct { pub const Result = struct { arrow_token: TokenIndex, - return_type: &Node, + return_type: *Node, }; - pub fn iterate(self: &PromiseType, index: usize) ?&Node { + pub fn iterate(self: *PromiseType, index: usize) ?*Node { var i = index; if (self.result) |result| { @@ -895,11 +895,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &PromiseType) TokenIndex { + pub fn firstToken(self: *PromiseType) TokenIndex { return self.promise_token; } - pub fn lastToken(self: &PromiseType) TokenIndex { + pub fn lastToken(self: *PromiseType) TokenIndex { if (self.result) |result| return result.return_type.lastToken(); return self.promise_token; } @@ -910,10 +910,10 @@ pub const Node = struct { comptime_token: ?TokenIndex, noalias_token: ?TokenIndex, name_token: ?TokenIndex, - type_node: &Node, + type_node: *Node, var_args_token: ?TokenIndex, - pub fn iterate(self: &ParamDecl, index: usize) ?&Node { + pub fn iterate(self: *ParamDecl, index: usize) ?*Node { var i = index; if (i < 1) return self.type_node; @@ -922,14 +922,14 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &ParamDecl) TokenIndex { + pub fn firstToken(self: *ParamDecl) TokenIndex { if (self.comptime_token) |comptime_token| return comptime_token; if (self.noalias_token) |noalias_token| return noalias_token; if (self.name_token) |name_token| return name_token; return self.type_node.firstToken(); } - pub fn lastToken(self: &ParamDecl) TokenIndex { + pub fn lastToken(self: *ParamDecl) TokenIndex { if (self.var_args_token) |var_args_token| return var_args_token; return self.type_node.lastToken(); } @@ -944,7 +944,7 @@ pub const Node = struct { pub const StatementList = Root.DeclList; - pub fn iterate(self: &Block, index: usize) ?&Node { + pub fn iterate(self: *Block, index: usize) ?*Node { var i = index; if (i < self.statements.len) return self.statements.at(i).*; @@ -953,7 +953,7 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Block) TokenIndex { + pub fn firstToken(self: *Block) TokenIndex { if (self.label) |label| { return label; } @@ -961,7 +961,7 @@ pub const Node = struct { return self.lbrace; } - pub fn lastToken(self: &Block) TokenIndex { + pub fn lastToken(self: *Block) TokenIndex { return self.rbrace; } }; @@ -970,14 +970,14 @@ pub const Node = struct { base: Node, defer_token: TokenIndex, kind: Kind, - expr: &Node, + expr: *Node, const Kind = enum { Error, Unconditional, }; - pub fn iterate(self: &Defer, index: usize) ?&Node { + pub fn iterate(self: *Defer, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -986,22 +986,22 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Defer) TokenIndex { + pub fn firstToken(self: *Defer) TokenIndex { return self.defer_token; } - pub fn lastToken(self: &Defer) TokenIndex { + pub fn lastToken(self: *Defer) TokenIndex { return self.expr.lastToken(); } }; pub const Comptime = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, comptime_token: TokenIndex, - expr: &Node, + expr: *Node, - pub fn iterate(self: &Comptime, index: usize) ?&Node { + pub fn iterate(self: *Comptime, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -1010,11 +1010,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Comptime) TokenIndex { + pub fn firstToken(self: *Comptime) TokenIndex { return self.comptime_token; } - pub fn lastToken(self: &Comptime) TokenIndex { + pub fn lastToken(self: *Comptime) TokenIndex { return self.expr.lastToken(); } }; @@ -1022,10 +1022,10 @@ pub const Node = struct { pub const Payload = struct { base: Node, lpipe: TokenIndex, - error_symbol: &Node, + error_symbol: *Node, rpipe: TokenIndex, - pub fn iterate(self: &Payload, index: usize) ?&Node { + pub fn iterate(self: *Payload, index: usize) ?*Node { var i = index; if (i < 1) return self.error_symbol; @@ -1034,11 +1034,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Payload) TokenIndex { + pub fn firstToken(self: *Payload) TokenIndex { return self.lpipe; } - pub fn lastToken(self: &Payload) TokenIndex { + pub fn lastToken(self: *Payload) TokenIndex { return self.rpipe; } }; @@ -1047,10 +1047,10 @@ pub const Node = struct { base: Node, lpipe: TokenIndex, ptr_token: ?TokenIndex, - value_symbol: &Node, + value_symbol: *Node, rpipe: TokenIndex, - pub fn iterate(self: &PointerPayload, index: usize) ?&Node { + pub fn iterate(self: *PointerPayload, index: usize) ?*Node { var i = index; if (i < 1) return self.value_symbol; @@ -1059,11 +1059,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &PointerPayload) TokenIndex { + pub fn firstToken(self: *PointerPayload) TokenIndex { return self.lpipe; } - pub fn lastToken(self: &PointerPayload) TokenIndex { + pub fn lastToken(self: *PointerPayload) TokenIndex { return self.rpipe; } }; @@ -1072,11 +1072,11 @@ pub const Node = struct { base: Node, lpipe: TokenIndex, ptr_token: ?TokenIndex, - value_symbol: &Node, - index_symbol: ?&Node, + value_symbol: *Node, + index_symbol: ?*Node, rpipe: TokenIndex, - pub fn iterate(self: &PointerIndexPayload, index: usize) ?&Node { + pub fn iterate(self: *PointerIndexPayload, index: usize) ?*Node { var i = index; if (i < 1) return self.value_symbol; @@ -1090,11 +1090,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &PointerIndexPayload) TokenIndex { + pub fn firstToken(self: *PointerIndexPayload) TokenIndex { return self.lpipe; } - pub fn lastToken(self: &PointerIndexPayload) TokenIndex { + pub fn lastToken(self: *PointerIndexPayload) TokenIndex { return self.rpipe; } }; @@ -1102,10 +1102,10 @@ pub const Node = struct { pub const Else = struct { base: Node, else_token: TokenIndex, - payload: ?&Node, - body: &Node, + payload: ?*Node, + body: *Node, - pub fn iterate(self: &Else, index: usize) ?&Node { + pub fn iterate(self: *Else, index: usize) ?*Node { var i = index; if (self.payload) |payload| { @@ -1119,11 +1119,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Else) TokenIndex { + pub fn firstToken(self: *Else) TokenIndex { return self.else_token; } - pub fn lastToken(self: &Else) TokenIndex { + pub fn lastToken(self: *Else) TokenIndex { return self.body.lastToken(); } }; @@ -1131,15 +1131,15 @@ pub const Node = struct { pub const Switch = struct { base: Node, switch_token: TokenIndex, - expr: &Node, + expr: *Node, /// these must be SwitchCase nodes cases: CaseList, rbrace: TokenIndex, - pub const CaseList = SegmentedList(&Node, 2); + pub const CaseList = SegmentedList(*Node, 2); - pub fn iterate(self: &Switch, index: usize) ?&Node { + pub fn iterate(self: *Switch, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -1151,11 +1151,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Switch) TokenIndex { + pub fn firstToken(self: *Switch) TokenIndex { return self.switch_token; } - pub fn lastToken(self: &Switch) TokenIndex { + pub fn lastToken(self: *Switch) TokenIndex { return self.rbrace; } }; @@ -1164,12 +1164,12 @@ pub const Node = struct { base: Node, items: ItemList, arrow_token: TokenIndex, - payload: ?&Node, - expr: &Node, + payload: ?*Node, + expr: *Node, - pub const ItemList = SegmentedList(&Node, 1); + pub const ItemList = SegmentedList(*Node, 1); - pub fn iterate(self: &SwitchCase, index: usize) ?&Node { + pub fn iterate(self: *SwitchCase, index: usize) ?*Node { var i = index; if (i < self.items.len) return self.items.at(i).*; @@ -1186,11 +1186,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &SwitchCase) TokenIndex { + pub fn firstToken(self: *SwitchCase) TokenIndex { return (self.items.at(0).*).firstToken(); } - pub fn lastToken(self: &SwitchCase) TokenIndex { + pub fn lastToken(self: *SwitchCase) TokenIndex { return self.expr.lastToken(); } }; @@ -1199,15 +1199,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &SwitchElse, index: usize) ?&Node { + pub fn iterate(self: *SwitchElse, index: usize) ?*Node { return null; } - pub fn firstToken(self: &SwitchElse) TokenIndex { + pub fn firstToken(self: *SwitchElse) TokenIndex { return self.token; } - pub fn lastToken(self: &SwitchElse) TokenIndex { + pub fn lastToken(self: *SwitchElse) TokenIndex { return self.token; } }; @@ -1217,13 +1217,13 @@ pub const Node = struct { label: ?TokenIndex, inline_token: ?TokenIndex, while_token: TokenIndex, - condition: &Node, - payload: ?&Node, - continue_expr: ?&Node, - body: &Node, - @"else": ?&Else, + condition: *Node, + payload: ?*Node, + continue_expr: ?*Node, + body: *Node, + @"else": ?*Else, - pub fn iterate(self: &While, index: usize) ?&Node { + pub fn iterate(self: *While, index: usize) ?*Node { var i = index; if (i < 1) return self.condition; @@ -1243,14 +1243,14 @@ pub const Node = struct { i -= 1; if (self.@"else") |@"else"| { - if (i < 1) return &@"else".base; + if (i < 1) return *@"else".base; i -= 1; } return null; } - pub fn firstToken(self: &While) TokenIndex { + pub fn firstToken(self: *While) TokenIndex { if (self.label) |label| { return label; } @@ -1262,7 +1262,7 @@ pub const Node = struct { return self.while_token; } - pub fn lastToken(self: &While) TokenIndex { + pub fn lastToken(self: *While) TokenIndex { if (self.@"else") |@"else"| { return @"else".body.lastToken(); } @@ -1276,12 +1276,12 @@ pub const Node = struct { label: ?TokenIndex, inline_token: ?TokenIndex, for_token: TokenIndex, - array_expr: &Node, - payload: ?&Node, - body: &Node, - @"else": ?&Else, + array_expr: *Node, + payload: ?*Node, + body: *Node, + @"else": ?*Else, - pub fn iterate(self: &For, index: usize) ?&Node { + pub fn iterate(self: *For, index: usize) ?*Node { var i = index; if (i < 1) return self.array_expr; @@ -1296,14 +1296,14 @@ pub const Node = struct { i -= 1; if (self.@"else") |@"else"| { - if (i < 1) return &@"else".base; + if (i < 1) return *@"else".base; i -= 1; } return null; } - pub fn firstToken(self: &For) TokenIndex { + pub fn firstToken(self: *For) TokenIndex { if (self.label) |label| { return label; } @@ -1315,7 +1315,7 @@ pub const Node = struct { return self.for_token; } - pub fn lastToken(self: &For) TokenIndex { + pub fn lastToken(self: *For) TokenIndex { if (self.@"else") |@"else"| { return @"else".body.lastToken(); } @@ -1327,12 +1327,12 @@ pub const Node = struct { pub const If = struct { base: Node, if_token: TokenIndex, - condition: &Node, - payload: ?&Node, - body: &Node, - @"else": ?&Else, + condition: *Node, + payload: ?*Node, + body: *Node, + @"else": ?*Else, - pub fn iterate(self: &If, index: usize) ?&Node { + pub fn iterate(self: *If, index: usize) ?*Node { var i = index; if (i < 1) return self.condition; @@ -1347,18 +1347,18 @@ pub const Node = struct { i -= 1; if (self.@"else") |@"else"| { - if (i < 1) return &@"else".base; + if (i < 1) return *@"else".base; i -= 1; } return null; } - pub fn firstToken(self: &If) TokenIndex { + pub fn firstToken(self: *If) TokenIndex { return self.if_token; } - pub fn lastToken(self: &If) TokenIndex { + pub fn lastToken(self: *If) TokenIndex { if (self.@"else") |@"else"| { return @"else".body.lastToken(); } @@ -1370,9 +1370,9 @@ pub const Node = struct { pub const InfixOp = struct { base: Node, op_token: TokenIndex, - lhs: &Node, + lhs: *Node, op: Op, - rhs: &Node, + rhs: *Node, pub const Op = union(enum) { Add, @@ -1401,7 +1401,7 @@ pub const Node = struct { BitXor, BoolAnd, BoolOr, - Catch: ?&Node, + Catch: ?*Node, Div, EqualEqual, ErrorUnion, @@ -1420,7 +1420,7 @@ pub const Node = struct { UnwrapMaybe, }; - pub fn iterate(self: &InfixOp, index: usize) ?&Node { + pub fn iterate(self: *InfixOp, index: usize) ?*Node { var i = index; if (i < 1) return self.lhs; @@ -1485,11 +1485,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &InfixOp) TokenIndex { + pub fn firstToken(self: *InfixOp) TokenIndex { return self.lhs.firstToken(); } - pub fn lastToken(self: &InfixOp) TokenIndex { + pub fn lastToken(self: *InfixOp) TokenIndex { return self.rhs.lastToken(); } }; @@ -1498,11 +1498,11 @@ pub const Node = struct { base: Node, op_token: TokenIndex, op: Op, - rhs: &Node, + rhs: *Node, pub const Op = union(enum) { AddrOf: AddrOfInfo, - ArrayType: &Node, + ArrayType: *Node, Await, BitNot, BoolNot, @@ -1523,17 +1523,17 @@ pub const Node = struct { volatile_token: ?TokenIndex, pub const Align = struct { - node: &Node, + node: *Node, bit_range: ?BitRange, pub const BitRange = struct { - start: &Node, - end: &Node, + start: *Node, + end: *Node, }; }; }; - pub fn iterate(self: &PrefixOp, index: usize) ?&Node { + pub fn iterate(self: *PrefixOp, index: usize) ?*Node { var i = index; switch (self.op) { @@ -1573,11 +1573,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &PrefixOp) TokenIndex { + pub fn firstToken(self: *PrefixOp) TokenIndex { return self.op_token; } - pub fn lastToken(self: &PrefixOp) TokenIndex { + pub fn lastToken(self: *PrefixOp) TokenIndex { return self.rhs.lastToken(); } }; @@ -1586,9 +1586,9 @@ pub const Node = struct { base: Node, period_token: TokenIndex, name_token: TokenIndex, - expr: &Node, + expr: *Node, - pub fn iterate(self: &FieldInitializer, index: usize) ?&Node { + pub fn iterate(self: *FieldInitializer, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -1597,45 +1597,45 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &FieldInitializer) TokenIndex { + pub fn firstToken(self: *FieldInitializer) TokenIndex { return self.period_token; } - pub fn lastToken(self: &FieldInitializer) TokenIndex { + pub fn lastToken(self: *FieldInitializer) TokenIndex { return self.expr.lastToken(); } }; pub const SuffixOp = struct { base: Node, - lhs: &Node, + lhs: *Node, op: Op, rtoken: TokenIndex, pub const Op = union(enum) { Call: Call, - ArrayAccess: &Node, + ArrayAccess: *Node, Slice: Slice, ArrayInitializer: InitList, StructInitializer: InitList, Deref, - pub const InitList = SegmentedList(&Node, 2); + pub const InitList = SegmentedList(*Node, 2); pub const Call = struct { params: ParamList, - async_attr: ?&AsyncAttribute, + async_attr: ?*AsyncAttribute, - pub const ParamList = SegmentedList(&Node, 2); + pub const ParamList = SegmentedList(*Node, 2); }; pub const Slice = struct { - start: &Node, - end: ?&Node, + start: *Node, + end: ?*Node, }; }; - pub fn iterate(self: &SuffixOp, index: usize) ?&Node { + pub fn iterate(self: *SuffixOp, index: usize) ?*Node { var i = index; if (i < 1) return self.lhs; @@ -1673,7 +1673,7 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &SuffixOp) TokenIndex { + pub fn firstToken(self: *SuffixOp) TokenIndex { switch (self.op) { @TagType(Op).Call => |*call_info| if (call_info.async_attr) |async_attr| return async_attr.firstToken(), else => {}, @@ -1681,7 +1681,7 @@ pub const Node = struct { return self.lhs.firstToken(); } - pub fn lastToken(self: &SuffixOp) TokenIndex { + pub fn lastToken(self: *SuffixOp) TokenIndex { return self.rtoken; } }; @@ -1689,10 +1689,10 @@ pub const Node = struct { pub const GroupedExpression = struct { base: Node, lparen: TokenIndex, - expr: &Node, + expr: *Node, rparen: TokenIndex, - pub fn iterate(self: &GroupedExpression, index: usize) ?&Node { + pub fn iterate(self: *GroupedExpression, index: usize) ?*Node { var i = index; if (i < 1) return self.expr; @@ -1701,11 +1701,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &GroupedExpression) TokenIndex { + pub fn firstToken(self: *GroupedExpression) TokenIndex { return self.lparen; } - pub fn lastToken(self: &GroupedExpression) TokenIndex { + pub fn lastToken(self: *GroupedExpression) TokenIndex { return self.rparen; } }; @@ -1714,15 +1714,15 @@ pub const Node = struct { base: Node, ltoken: TokenIndex, kind: Kind, - rhs: ?&Node, + rhs: ?*Node, const Kind = union(enum) { - Break: ?&Node, - Continue: ?&Node, + Break: ?*Node, + Continue: ?*Node, Return, }; - pub fn iterate(self: &ControlFlowExpression, index: usize) ?&Node { + pub fn iterate(self: *ControlFlowExpression, index: usize) ?*Node { var i = index; switch (self.kind) { @@ -1749,11 +1749,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &ControlFlowExpression) TokenIndex { + pub fn firstToken(self: *ControlFlowExpression) TokenIndex { return self.ltoken; } - pub fn lastToken(self: &ControlFlowExpression) TokenIndex { + pub fn lastToken(self: *ControlFlowExpression) TokenIndex { if (self.rhs) |rhs| { return rhs.lastToken(); } @@ -1780,10 +1780,10 @@ pub const Node = struct { base: Node, label: ?TokenIndex, suspend_token: TokenIndex, - payload: ?&Node, - body: ?&Node, + payload: ?*Node, + body: ?*Node, - pub fn iterate(self: &Suspend, index: usize) ?&Node { + pub fn iterate(self: *Suspend, index: usize) ?*Node { var i = index; if (self.payload) |payload| { @@ -1799,12 +1799,12 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &Suspend) TokenIndex { + pub fn firstToken(self: *Suspend) TokenIndex { if (self.label) |label| return label; return self.suspend_token; } - pub fn lastToken(self: &Suspend) TokenIndex { + pub fn lastToken(self: *Suspend) TokenIndex { if (self.body) |body| { return body.lastToken(); } @@ -1821,15 +1821,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &IntegerLiteral, index: usize) ?&Node { + pub fn iterate(self: *IntegerLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &IntegerLiteral) TokenIndex { + pub fn firstToken(self: *IntegerLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &IntegerLiteral) TokenIndex { + pub fn lastToken(self: *IntegerLiteral) TokenIndex { return self.token; } }; @@ -1838,15 +1838,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &FloatLiteral, index: usize) ?&Node { + pub fn iterate(self: *FloatLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &FloatLiteral) TokenIndex { + pub fn firstToken(self: *FloatLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &FloatLiteral) TokenIndex { + pub fn lastToken(self: *FloatLiteral) TokenIndex { return self.token; } }; @@ -1857,9 +1857,9 @@ pub const Node = struct { params: ParamList, rparen_token: TokenIndex, - pub const ParamList = SegmentedList(&Node, 2); + pub const ParamList = SegmentedList(*Node, 2); - pub fn iterate(self: &BuiltinCall, index: usize) ?&Node { + pub fn iterate(self: *BuiltinCall, index: usize) ?*Node { var i = index; if (i < self.params.len) return self.params.at(i).*; @@ -1868,11 +1868,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &BuiltinCall) TokenIndex { + pub fn firstToken(self: *BuiltinCall) TokenIndex { return self.builtin_token; } - pub fn lastToken(self: &BuiltinCall) TokenIndex { + pub fn lastToken(self: *BuiltinCall) TokenIndex { return self.rparen_token; } }; @@ -1881,15 +1881,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &StringLiteral, index: usize) ?&Node { + pub fn iterate(self: *StringLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &StringLiteral) TokenIndex { + pub fn firstToken(self: *StringLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &StringLiteral) TokenIndex { + pub fn lastToken(self: *StringLiteral) TokenIndex { return self.token; } }; @@ -1900,15 +1900,15 @@ pub const Node = struct { pub const LineList = SegmentedList(TokenIndex, 4); - pub fn iterate(self: &MultilineStringLiteral, index: usize) ?&Node { + pub fn iterate(self: *MultilineStringLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &MultilineStringLiteral) TokenIndex { + pub fn firstToken(self: *MultilineStringLiteral) TokenIndex { return self.lines.at(0).*; } - pub fn lastToken(self: &MultilineStringLiteral) TokenIndex { + pub fn lastToken(self: *MultilineStringLiteral) TokenIndex { return self.lines.at(self.lines.len - 1).*; } }; @@ -1917,15 +1917,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &CharLiteral, index: usize) ?&Node { + pub fn iterate(self: *CharLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &CharLiteral) TokenIndex { + pub fn firstToken(self: *CharLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &CharLiteral) TokenIndex { + pub fn lastToken(self: *CharLiteral) TokenIndex { return self.token; } }; @@ -1934,15 +1934,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &BoolLiteral, index: usize) ?&Node { + pub fn iterate(self: *BoolLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &BoolLiteral) TokenIndex { + pub fn firstToken(self: *BoolLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &BoolLiteral) TokenIndex { + pub fn lastToken(self: *BoolLiteral) TokenIndex { return self.token; } }; @@ -1951,15 +1951,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &NullLiteral, index: usize) ?&Node { + pub fn iterate(self: *NullLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &NullLiteral) TokenIndex { + pub fn firstToken(self: *NullLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &NullLiteral) TokenIndex { + pub fn lastToken(self: *NullLiteral) TokenIndex { return self.token; } }; @@ -1968,15 +1968,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &UndefinedLiteral, index: usize) ?&Node { + pub fn iterate(self: *UndefinedLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &UndefinedLiteral) TokenIndex { + pub fn firstToken(self: *UndefinedLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &UndefinedLiteral) TokenIndex { + pub fn lastToken(self: *UndefinedLiteral) TokenIndex { return self.token; } }; @@ -1985,15 +1985,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &ThisLiteral, index: usize) ?&Node { + pub fn iterate(self: *ThisLiteral, index: usize) ?*Node { return null; } - pub fn firstToken(self: &ThisLiteral) TokenIndex { + pub fn firstToken(self: *ThisLiteral) TokenIndex { return self.token; } - pub fn lastToken(self: &ThisLiteral) TokenIndex { + pub fn lastToken(self: *ThisLiteral) TokenIndex { return self.token; } }; @@ -2001,17 +2001,17 @@ pub const Node = struct { pub const AsmOutput = struct { base: Node, lbracket: TokenIndex, - symbolic_name: &Node, - constraint: &Node, + symbolic_name: *Node, + constraint: *Node, kind: Kind, rparen: TokenIndex, const Kind = union(enum) { - Variable: &Identifier, - Return: &Node, + Variable: *Identifier, + Return: *Node, }; - pub fn iterate(self: &AsmOutput, index: usize) ?&Node { + pub fn iterate(self: *AsmOutput, index: usize) ?*Node { var i = index; if (i < 1) return self.symbolic_name; @@ -2022,7 +2022,7 @@ pub const Node = struct { switch (self.kind) { Kind.Variable => |variable_name| { - if (i < 1) return &variable_name.base; + if (i < 1) return *variable_name.base; i -= 1; }, Kind.Return => |return_type| { @@ -2034,11 +2034,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &AsmOutput) TokenIndex { + pub fn firstToken(self: *AsmOutput) TokenIndex { return self.lbracket; } - pub fn lastToken(self: &AsmOutput) TokenIndex { + pub fn lastToken(self: *AsmOutput) TokenIndex { return self.rparen; } }; @@ -2046,12 +2046,12 @@ pub const Node = struct { pub const AsmInput = struct { base: Node, lbracket: TokenIndex, - symbolic_name: &Node, - constraint: &Node, - expr: &Node, + symbolic_name: *Node, + constraint: *Node, + expr: *Node, rparen: TokenIndex, - pub fn iterate(self: &AsmInput, index: usize) ?&Node { + pub fn iterate(self: *AsmInput, index: usize) ?*Node { var i = index; if (i < 1) return self.symbolic_name; @@ -2066,11 +2066,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &AsmInput) TokenIndex { + pub fn firstToken(self: *AsmInput) TokenIndex { return self.lbracket; } - pub fn lastToken(self: &AsmInput) TokenIndex { + pub fn lastToken(self: *AsmInput) TokenIndex { return self.rparen; } }; @@ -2079,33 +2079,33 @@ pub const Node = struct { base: Node, asm_token: TokenIndex, volatile_token: ?TokenIndex, - template: &Node, + template: *Node, outputs: OutputList, inputs: InputList, clobbers: ClobberList, rparen: TokenIndex, - const OutputList = SegmentedList(&AsmOutput, 2); - const InputList = SegmentedList(&AsmInput, 2); + const OutputList = SegmentedList(*AsmOutput, 2); + const InputList = SegmentedList(*AsmInput, 2); const ClobberList = SegmentedList(TokenIndex, 2); - pub fn iterate(self: &Asm, index: usize) ?&Node { + pub fn iterate(self: *Asm, index: usize) ?*Node { var i = index; - if (i < self.outputs.len) return &(self.outputs.at(index).*).base; + if (i < self.outputs.len) return *(self.outputs.at(index).*).base; i -= self.outputs.len; - if (i < self.inputs.len) return &(self.inputs.at(index).*).base; + if (i < self.inputs.len) return *(self.inputs.at(index).*).base; i -= self.inputs.len; return null; } - pub fn firstToken(self: &Asm) TokenIndex { + pub fn firstToken(self: *Asm) TokenIndex { return self.asm_token; } - pub fn lastToken(self: &Asm) TokenIndex { + pub fn lastToken(self: *Asm) TokenIndex { return self.rparen; } }; @@ -2114,15 +2114,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &Unreachable, index: usize) ?&Node { + pub fn iterate(self: *Unreachable, index: usize) ?*Node { return null; } - pub fn firstToken(self: &Unreachable) TokenIndex { + pub fn firstToken(self: *Unreachable) TokenIndex { return self.token; } - pub fn lastToken(self: &Unreachable) TokenIndex { + pub fn lastToken(self: *Unreachable) TokenIndex { return self.token; } }; @@ -2131,15 +2131,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &ErrorType, index: usize) ?&Node { + pub fn iterate(self: *ErrorType, index: usize) ?*Node { return null; } - pub fn firstToken(self: &ErrorType) TokenIndex { + pub fn firstToken(self: *ErrorType) TokenIndex { return self.token; } - pub fn lastToken(self: &ErrorType) TokenIndex { + pub fn lastToken(self: *ErrorType) TokenIndex { return self.token; } }; @@ -2148,15 +2148,15 @@ pub const Node = struct { base: Node, token: TokenIndex, - pub fn iterate(self: &VarType, index: usize) ?&Node { + pub fn iterate(self: *VarType, index: usize) ?*Node { return null; } - pub fn firstToken(self: &VarType) TokenIndex { + pub fn firstToken(self: *VarType) TokenIndex { return self.token; } - pub fn lastToken(self: &VarType) TokenIndex { + pub fn lastToken(self: *VarType) TokenIndex { return self.token; } }; @@ -2167,27 +2167,27 @@ pub const Node = struct { pub const LineList = SegmentedList(TokenIndex, 4); - pub fn iterate(self: &DocComment, index: usize) ?&Node { + pub fn iterate(self: *DocComment, index: usize) ?*Node { return null; } - pub fn firstToken(self: &DocComment) TokenIndex { + pub fn firstToken(self: *DocComment) TokenIndex { return self.lines.at(0).*; } - pub fn lastToken(self: &DocComment) TokenIndex { + pub fn lastToken(self: *DocComment) TokenIndex { return self.lines.at(self.lines.len - 1).*; } }; pub const TestDecl = struct { base: Node, - doc_comments: ?&DocComment, + doc_comments: ?*DocComment, test_token: TokenIndex, - name: &Node, - body_node: &Node, + name: *Node, + body_node: *Node, - pub fn iterate(self: &TestDecl, index: usize) ?&Node { + pub fn iterate(self: *TestDecl, index: usize) ?*Node { var i = index; if (i < 1) return self.body_node; @@ -2196,11 +2196,11 @@ pub const Node = struct { return null; } - pub fn firstToken(self: &TestDecl) TokenIndex { + pub fn firstToken(self: *TestDecl) TokenIndex { return self.test_token; } - pub fn lastToken(self: &TestDecl) TokenIndex { + pub fn lastToken(self: *TestDecl) TokenIndex { return self.body_node.lastToken(); } }; diff --git a/std/zig/bench.zig b/std/zig/bench.zig index c3b6b0d3d3..59392889a6 100644 --- a/std/zig/bench.zig +++ b/std/zig/bench.zig @@ -24,15 +24,15 @@ pub fn main() !void { const mb_per_sec = bytes_per_sec / (1024 * 1024); var stdout_file = try std.io.getStdOut(); - const stdout = &std.io.FileOutStream.init(&stdout_file).stream; + const stdout = *std.io.FileOutStream.init(*stdout_file).stream; try stdout.print("{.3} MB/s, {} KB used \n", mb_per_sec, memory_used / 1024); } fn testOnce() usize { var fixed_buf_alloc = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); - var allocator = &fixed_buf_alloc.allocator; + var allocator = *fixed_buf_alloc.allocator; var tokenizer = Tokenizer.init(source); - var parser = Parser.init(&tokenizer, allocator, "(memory buffer)"); + var parser = Parser.init(*tokenizer, allocator, "(memory buffer)"); _ = parser.parse() catch @panic("parse failure"); return fixed_buf_alloc.end_index; } diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 05554f5d34..6d29300aed 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -9,7 +9,7 @@ const Error = ast.Error; /// Result should be freed with tree.deinit() when there are /// no more references to any of the tokens or nodes. -pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree { +pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { var tree_arena = std.heap.ArenaAllocator.init(allocator); errdefer tree_arena.deinit(); @@ -2754,16 +2754,16 @@ pub fn parse(allocator: &mem.Allocator, source: []const u8) !ast.Tree { } const AnnotatedToken = struct { - ptr: &Token, + ptr: *Token, index: TokenIndex, }; const TopLevelDeclCtx = struct { - decls: &ast.Node.Root.DeclList, + decls: *ast.Node.Root.DeclList, visib_token: ?TokenIndex, extern_export_inline_token: ?AnnotatedToken, - lib_name: ?&ast.Node, - comments: ?&ast.Node.DocComment, + lib_name: ?*ast.Node, + comments: ?*ast.Node.DocComment, }; const VarDeclCtx = struct { @@ -2771,21 +2771,21 @@ const VarDeclCtx = struct { visib_token: ?TokenIndex, comptime_token: ?TokenIndex, extern_export_token: ?TokenIndex, - lib_name: ?&ast.Node, - list: &ast.Node.Root.DeclList, - comments: ?&ast.Node.DocComment, + lib_name: ?*ast.Node, + list: *ast.Node.Root.DeclList, + comments: ?*ast.Node.DocComment, }; const TopLevelExternOrFieldCtx = struct { visib_token: TokenIndex, - container_decl: &ast.Node.ContainerDecl, - comments: ?&ast.Node.DocComment, + container_decl: *ast.Node.ContainerDecl, + comments: ?*ast.Node.DocComment, }; const ExternTypeCtx = struct { opt_ctx: OptionalCtx, extern_token: TokenIndex, - comments: ?&ast.Node.DocComment, + comments: ?*ast.Node.DocComment, }; const ContainerKindCtx = struct { @@ -2795,24 +2795,24 @@ const ContainerKindCtx = struct { const ExpectTokenSave = struct { id: @TagType(Token.Id), - ptr: &TokenIndex, + ptr: *TokenIndex, }; const OptionalTokenSave = struct { id: @TagType(Token.Id), - ptr: &?TokenIndex, + ptr: *?TokenIndex, }; const ExprListCtx = struct { - list: &ast.Node.SuffixOp.Op.InitList, + list: *ast.Node.SuffixOp.Op.InitList, end: Token.Id, - ptr: &TokenIndex, + ptr: *TokenIndex, }; fn ListSave(comptime List: type) type { return struct { - list: &List, - ptr: &TokenIndex, + list: *List, + ptr: *TokenIndex, }; } @@ -2841,7 +2841,7 @@ const LoopCtx = struct { const AsyncEndCtx = struct { ctx: OptionalCtx, - attribute: &ast.Node.AsyncAttribute, + attribute: *ast.Node.AsyncAttribute, }; const ErrorTypeOrSetDeclCtx = struct { @@ -2850,21 +2850,21 @@ const ErrorTypeOrSetDeclCtx = struct { }; const ParamDeclEndCtx = struct { - fn_proto: &ast.Node.FnProto, - param_decl: &ast.Node.ParamDecl, + fn_proto: *ast.Node.FnProto, + param_decl: *ast.Node.ParamDecl, }; const ComptimeStatementCtx = struct { comptime_token: TokenIndex, - block: &ast.Node.Block, + block: *ast.Node.Block, }; const OptionalCtx = union(enum) { - Optional: &?&ast.Node, - RequiredNull: &?&ast.Node, - Required: &&ast.Node, + Optional: *?*ast.Node, + RequiredNull: *?*ast.Node, + Required: **ast.Node, - pub fn store(self: &const OptionalCtx, value: &ast.Node) void { + pub fn store(self: *const OptionalCtx, value: *ast.Node) void { switch (self.*) { OptionalCtx.Optional => |ptr| ptr.* = value, OptionalCtx.RequiredNull => |ptr| ptr.* = value, @@ -2872,7 +2872,7 @@ const OptionalCtx = union(enum) { } } - pub fn get(self: &const OptionalCtx) ?&ast.Node { + pub fn get(self: *const OptionalCtx) ?*ast.Node { switch (self.*) { OptionalCtx.Optional => |ptr| return ptr.*, OptionalCtx.RequiredNull => |ptr| return ??ptr.*, @@ -2880,7 +2880,7 @@ const OptionalCtx = union(enum) { } } - pub fn toRequired(self: &const OptionalCtx) OptionalCtx { + pub fn toRequired(self: *const OptionalCtx) OptionalCtx { switch (self.*) { OptionalCtx.Optional => |ptr| { return OptionalCtx{ .RequiredNull = ptr }; @@ -2892,8 +2892,8 @@ const OptionalCtx = union(enum) { }; const AddCommentsCtx = struct { - node_ptr: &&ast.Node, - comments: ?&ast.Node.DocComment, + node_ptr: **ast.Node, + comments: ?*ast.Node.DocComment, }; const State = union(enum) { @@ -2904,67 +2904,67 @@ const State = union(enum) { TopLevelExternOrField: TopLevelExternOrFieldCtx, ContainerKind: ContainerKindCtx, - ContainerInitArgStart: &ast.Node.ContainerDecl, - ContainerInitArg: &ast.Node.ContainerDecl, - ContainerDecl: &ast.Node.ContainerDecl, + ContainerInitArgStart: *ast.Node.ContainerDecl, + ContainerInitArg: *ast.Node.ContainerDecl, + ContainerDecl: *ast.Node.ContainerDecl, VarDecl: VarDeclCtx, - VarDeclAlign: &ast.Node.VarDecl, - VarDeclEq: &ast.Node.VarDecl, - VarDeclSemiColon: &ast.Node.VarDecl, - - FnDef: &ast.Node.FnProto, - FnProto: &ast.Node.FnProto, - FnProtoAlign: &ast.Node.FnProto, - FnProtoReturnType: &ast.Node.FnProto, - - ParamDecl: &ast.Node.FnProto, - ParamDeclAliasOrComptime: &ast.Node.ParamDecl, - ParamDeclName: &ast.Node.ParamDecl, + VarDeclAlign: *ast.Node.VarDecl, + VarDeclEq: *ast.Node.VarDecl, + VarDeclSemiColon: *ast.Node.VarDecl, + + FnDef: *ast.Node.FnProto, + FnProto: *ast.Node.FnProto, + FnProtoAlign: *ast.Node.FnProto, + FnProtoReturnType: *ast.Node.FnProto, + + ParamDecl: *ast.Node.FnProto, + ParamDeclAliasOrComptime: *ast.Node.ParamDecl, + ParamDeclName: *ast.Node.ParamDecl, ParamDeclEnd: ParamDeclEndCtx, - ParamDeclComma: &ast.Node.FnProto, + ParamDeclComma: *ast.Node.FnProto, MaybeLabeledExpression: MaybeLabeledExpressionCtx, LabeledExpression: LabelCtx, Inline: InlineCtx, While: LoopCtx, - WhileContinueExpr: &?&ast.Node, + WhileContinueExpr: *?*ast.Node, For: LoopCtx, - Else: &?&ast.Node.Else, + Else: *?*ast.Node.Else, - Block: &ast.Node.Block, - Statement: &ast.Node.Block, + Block: *ast.Node.Block, + Statement: *ast.Node.Block, ComptimeStatement: ComptimeStatementCtx, - Semicolon: &&ast.Node, + Semicolon: **ast.Node, - AsmOutputItems: &ast.Node.Asm.OutputList, - AsmOutputReturnOrType: &ast.Node.AsmOutput, - AsmInputItems: &ast.Node.Asm.InputList, - AsmClobberItems: &ast.Node.Asm.ClobberList, + AsmOutputItems: *ast.Node.Asm.OutputList, + AsmOutputReturnOrType: *ast.Node.AsmOutput, + AsmInputItems: *ast.Node.Asm.InputList, + AsmClobberItems: *ast.Node.Asm.ClobberList, ExprListItemOrEnd: ExprListCtx, ExprListCommaOrEnd: ExprListCtx, FieldInitListItemOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList), FieldInitListCommaOrEnd: ListSave(ast.Node.SuffixOp.Op.InitList), - FieldListCommaOrEnd: &ast.Node.ContainerDecl, + FieldListCommaOrEnd: *ast.Node.ContainerDecl, FieldInitValue: OptionalCtx, ErrorTagListItemOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList), ErrorTagListCommaOrEnd: ListSave(ast.Node.ErrorSetDecl.DeclList), SwitchCaseOrEnd: ListSave(ast.Node.Switch.CaseList), SwitchCaseCommaOrEnd: ListSave(ast.Node.Switch.CaseList), - SwitchCaseFirstItem: &ast.Node.SwitchCase, - SwitchCaseItemCommaOrEnd: &ast.Node.SwitchCase, - SwitchCaseItemOrEnd: &ast.Node.SwitchCase, + SwitchCaseFirstItem: *ast.Node.SwitchCase, + SwitchCaseItemCommaOrEnd: *ast.Node.SwitchCase, + SwitchCaseItemOrEnd: *ast.Node.SwitchCase, - SuspendBody: &ast.Node.Suspend, - AsyncAllocator: &ast.Node.AsyncAttribute, + SuspendBody: *ast.Node.Suspend, + AsyncAllocator: *ast.Node.AsyncAttribute, AsyncEnd: AsyncEndCtx, ExternType: ExternTypeCtx, - SliceOrArrayAccess: &ast.Node.SuffixOp, - SliceOrArrayType: &ast.Node.PrefixOp, - AddrOfModifiers: &ast.Node.PrefixOp.AddrOfInfo, - AlignBitRange: &ast.Node.PrefixOp.AddrOfInfo.Align, + SliceOrArrayAccess: *ast.Node.SuffixOp, + SliceOrArrayType: *ast.Node.PrefixOp, + AddrOfModifiers: *ast.Node.PrefixOp.AddrOfInfo, + AlignBitRange: *ast.Node.PrefixOp.AddrOfInfo.Align, Payload: OptionalCtx, PointerPayload: OptionalCtx, @@ -3007,7 +3007,7 @@ const State = union(enum) { ErrorTypeOrSetDecl: ErrorTypeOrSetDeclCtx, StringLiteral: OptionalCtx, Identifier: OptionalCtx, - ErrorTag: &&ast.Node, + ErrorTag: **ast.Node, IfToken: @TagType(Token.Id), IfTokenSave: ExpectTokenSave, @@ -3016,7 +3016,7 @@ const State = union(enum) { OptionalTokenSave: OptionalTokenSave, }; -fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&ast.Node.DocComment) !void { +fn pushDocComment(arena: *mem.Allocator, line_comment: TokenIndex, result: *?*ast.Node.DocComment) !void { const node = blk: { if (result.*) |comment_node| { break :blk comment_node; @@ -3032,8 +3032,8 @@ fn pushDocComment(arena: &mem.Allocator, line_comment: TokenIndex, result: &?&as try node.lines.push(line_comment); } -fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) !?&ast.Node.DocComment { - var result: ?&ast.Node.DocComment = null; +fn eatDocComments(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) !?*ast.Node.DocComment { + var result: ?*ast.Node.DocComment = null; while (true) { if (eatToken(tok_it, tree, Token.Id.DocComment)) |line_comment| { try pushDocComment(arena, line_comment, &result); @@ -3044,7 +3044,7 @@ fn eatDocComments(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, t return result; } -fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterator, token_ptr: &const Token, token_index: TokenIndex, tree: &ast.Tree) !?&ast.Node { +fn parseStringLiteral(arena: *mem.Allocator, tok_it: *ast.Tree.TokenList.Iterator, token_ptr: *const Token, token_index: TokenIndex, tree: *ast.Tree) !?*ast.Node { switch (token_ptr.id) { Token.Id.StringLiteral => { return &(try createLiteral(arena, ast.Node.StringLiteral, token_index)).base; @@ -3071,11 +3071,11 @@ fn parseStringLiteral(arena: &mem.Allocator, tok_it: &ast.Tree.TokenList.Iterato }, // TODO: We shouldn't need a cast, but: // zig: /home/jc/Documents/zig/src/ir.cpp:7962: TypeTableEntry* ir_resolve_peer_types(IrAnalyze*, AstNode*, IrInstruction**, size_t): Assertion `err_set_type != nullptr' failed. - else => return (?&ast.Node)(null), + else => return (?*ast.Node)(null), } } -fn parseBlockExpr(stack: &std.ArrayList(State), arena: &mem.Allocator, ctx: &const OptionalCtx, token_ptr: &const Token, token_index: TokenIndex) !bool { +fn parseBlockExpr(stack: *std.ArrayList(State), arena: *mem.Allocator, ctx: *const OptionalCtx, token_ptr: *const Token, token_index: TokenIndex) !bool { switch (token_ptr.id) { Token.Id.Keyword_suspend => { const node = try arena.construct(ast.Node.Suspend{ @@ -3189,7 +3189,7 @@ const ExpectCommaOrEndResult = union(enum) { parse_error: Error, }; -fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult { +fn expectCommaOrEnd(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, end: @TagType(Token.Id)) ExpectCommaOrEndResult { const token = nextToken(tok_it, tree); const token_index = token.index; const token_ptr = token.ptr; @@ -3212,7 +3212,7 @@ fn expectCommaOrEnd(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, end: } } -fn tokenIdToAssignment(id: &const Token.Id) ?ast.Node.InfixOp.Op { +fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op { // TODO: We have to cast all cases because of this: // error: expected type '?InfixOp', found '?@TagType(InfixOp)' return switch (id.*) { @@ -3307,21 +3307,21 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op { }; } -fn createLiteral(arena: &mem.Allocator, comptime T: type, token_index: TokenIndex) !&T { +fn createLiteral(arena: *mem.Allocator, comptime T: type, token_index: TokenIndex) !*T { return arena.construct(T{ .base = ast.Node{ .id = ast.Node.typeToId(T) }, .token = token_index, }); } -fn createToCtxLiteral(arena: &mem.Allocator, opt_ctx: &const OptionalCtx, comptime T: type, token_index: TokenIndex) !&T { +fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, comptime T: type, token_index: TokenIndex) !*T { const node = try createLiteral(arena, T, token_index); opt_ctx.store(&node.base); return node; } -fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType(Token.Id)) ?TokenIndex { +fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex { const token = ??tok_it.peek(); if (token.id == id) { @@ -3331,7 +3331,7 @@ fn eatToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree, id: @TagType( return null; } -fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedToken { +fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken { const result = AnnotatedToken{ .index = tok_it.index, .ptr = ??tok_it.next(), @@ -3345,7 +3345,7 @@ fn nextToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) AnnotatedTok } } -fn prevToken(tok_it: &ast.Tree.TokenList.Iterator, tree: &ast.Tree) void { +fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void { while (true) { const prev_tok = tok_it.prev() ?? return; if (prev_tok.id == Token.Id.LineComment) continue; diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 69903bc3fd..8507470bcc 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -1803,7 +1803,7 @@ const io = std.io; var fixed_buffer_mem: [100 * 1024]u8 = undefined; -fn testParse(source: []const u8, allocator: &mem.Allocator, anything_changed: &bool) ![]u8 { +fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *bool) ![]u8 { var stderr_file = try io.getStdErr(); var stderr = &io.FileOutStream.init(&stderr_file).stream; diff --git a/std/zig/render.zig b/std/zig/render.zig index ac07917ff1..07e01241b7 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -13,7 +13,7 @@ pub const Error = error{ }; /// Returns whether anything changed -pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf(stream).Child.Error || Error)!bool { +pub fn render(allocator: *mem.Allocator, stream: var, tree: *ast.Tree) (@typeOf(stream).Child.Error || Error)!bool { comptime assert(@typeId(@typeOf(stream)) == builtin.TypeId.Pointer); var anything_changed: bool = false; @@ -24,13 +24,13 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf( const StreamError = @typeOf(stream).Child.Error; const Stream = std.io.OutStream(StreamError); - anything_changed_ptr: &bool, + anything_changed_ptr: *bool, child_stream: @typeOf(stream), stream: Stream, source_index: usize, source: []const u8, - fn write(iface_stream: &Stream, bytes: []const u8) StreamError!void { + fn write(iface_stream: *Stream, bytes: []const u8) StreamError!void { const self = @fieldParentPtr(MyStream, "stream", iface_stream); if (!self.anything_changed_ptr.*) { @@ -63,9 +63,9 @@ pub fn render(allocator: &mem.Allocator, stream: var, tree: &ast.Tree) (@typeOf( } fn renderRoot( - allocator: &mem.Allocator, + allocator: *mem.Allocator, stream: var, - tree: &ast.Tree, + tree: *ast.Tree, ) (@typeOf(stream).Child.Error || Error)!void { // render all the line comments at the beginning of the file var tok_it = tree.tokens.iterator(0); @@ -90,7 +90,7 @@ fn renderRoot( } } -fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &ast.Node) !void { +fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *ast.Node) !void { const first_token = node.firstToken(); var prev_token = first_token; while (tree.tokens.at(prev_token - 1).id == Token.Id.DocComment) { @@ -104,7 +104,7 @@ fn renderExtraNewline(tree: &ast.Tree, stream: var, start_col: &usize, node: &as } } -fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, indent: usize, start_col: &usize, decl: &ast.Node) (@typeOf(stream).Child.Error || Error)!void { +fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@typeOf(stream).Child.Error || Error)!void { switch (decl.id) { ast.Node.Id.FnProto => { const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl); @@ -214,12 +214,12 @@ fn renderTopLevelDecl(allocator: &mem.Allocator, stream: var, tree: &ast.Tree, i } fn renderExpression( - allocator: &mem.Allocator, + allocator: *mem.Allocator, stream: var, - tree: &ast.Tree, + tree: *ast.Tree, indent: usize, - start_col: &usize, - base: &ast.Node, + start_col: *usize, + base: *ast.Node, space: Space, ) (@typeOf(stream).Child.Error || Error)!void { switch (base.id) { @@ -1640,12 +1640,12 @@ fn renderExpression( } fn renderVarDecl( - allocator: &mem.Allocator, + allocator: *mem.Allocator, stream: var, - tree: &ast.Tree, + tree: *ast.Tree, indent: usize, - start_col: &usize, - var_decl: &ast.Node.VarDecl, + start_col: *usize, + var_decl: *ast.Node.VarDecl, ) (@typeOf(stream).Child.Error || Error)!void { if (var_decl.visib_token) |visib_token| { try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub @@ -1696,12 +1696,12 @@ fn renderVarDecl( } fn renderParamDecl( - allocator: &mem.Allocator, + allocator: *mem.Allocator, stream: var, - tree: &ast.Tree, + tree: *ast.Tree, indent: usize, - start_col: &usize, - base: &ast.Node, + start_col: *usize, + base: *ast.Node, space: Space, ) (@typeOf(stream).Child.Error || Error)!void { const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", base); @@ -1724,12 +1724,12 @@ fn renderParamDecl( } fn renderStatement( - allocator: &mem.Allocator, + allocator: *mem.Allocator, stream: var, - tree: &ast.Tree, + tree: *ast.Tree, indent: usize, - start_col: &usize, - base: &ast.Node, + start_col: *usize, + base: *ast.Node, ) (@typeOf(stream).Child.Error || Error)!void { switch (base.id) { ast.Node.Id.VarDecl => { @@ -1761,7 +1761,7 @@ const Space = enum { BlockStart, }; -fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: &usize, space: Space) (@typeOf(stream).Child.Error || Error)!void { +fn renderToken(tree: *ast.Tree, stream: var, token_index: ast.TokenIndex, indent: usize, start_col: *usize, space: Space) (@typeOf(stream).Child.Error || Error)!void { if (space == Space.BlockStart) { if (start_col.* < indent + indent_delta) return renderToken(tree, stream, token_index, indent, start_col, Space.Space); @@ -1928,11 +1928,11 @@ fn renderToken(tree: &ast.Tree, stream: var, token_index: ast.TokenIndex, indent } fn renderDocComments( - tree: &ast.Tree, + tree: *ast.Tree, stream: var, node: var, indent: usize, - start_col: &usize, + start_col: *usize, ) (@typeOf(stream).Child.Error || Error)!void { const comment = node.doc_comments ?? return; var it = comment.lines.iterator(0); @@ -1949,7 +1949,7 @@ fn renderDocComments( } } -fn nodeIsBlock(base: &const ast.Node) bool { +fn nodeIsBlock(base: *const ast.Node) bool { return switch (base.id) { ast.Node.Id.Block, ast.Node.Id.If, @@ -1961,7 +1961,7 @@ fn nodeIsBlock(base: &const ast.Node) bool { }; } -fn nodeCausesSliceOpSpace(base: &ast.Node) bool { +fn nodeCausesSliceOpSpace(base: *ast.Node) bool { const infix_op = base.cast(ast.Node.InfixOp) ?? return false; return switch (infix_op.op) { ast.Node.InfixOp.Op.Period => false, diff --git a/std/zig/tokenizer.zig b/std/zig/tokenizer.zig index 7c3b3210fb..8378a9011d 100644 --- a/std/zig/tokenizer.zig +++ b/std/zig/tokenizer.zig @@ -200,7 +200,7 @@ pub const Tokenizer = struct { pending_invalid_token: ?Token, /// For debugging purposes - pub fn dump(self: &Tokenizer, token: &const Token) void { + pub fn dump(self: *Tokenizer, token: *const Token) void { std.debug.warn("{} \"{}\"\n", @tagName(token.id), self.buffer[token.start..token.end]); } @@ -265,7 +265,7 @@ pub const Tokenizer = struct { SawAtSign, }; - pub fn next(self: &Tokenizer) Token { + pub fn next(self: *Tokenizer) Token { if (self.pending_invalid_token) |token| { self.pending_invalid_token = null; return token; @@ -1089,7 +1089,7 @@ pub const Tokenizer = struct { return result; } - fn checkLiteralCharacter(self: &Tokenizer) void { + fn checkLiteralCharacter(self: *Tokenizer) void { if (self.pending_invalid_token != null) return; const invalid_length = self.getInvalidCharacterLength(); if (invalid_length == 0) return; @@ -1100,7 +1100,7 @@ pub const Tokenizer = struct { }; } - fn getInvalidCharacterLength(self: &Tokenizer) u3 { + fn getInvalidCharacterLength(self: *Tokenizer) u3 { const c0 = self.buffer[self.index]; if (c0 < 0x80) { if (c0 < 0x20 or c0 == 0x7f) { diff --git a/test/assemble_and_link.zig b/test/assemble_and_link.zig index 2593f3306a..8c727e87b5 100644 --- a/test/assemble_and_link.zig +++ b/test/assemble_and_link.zig @@ -1,7 +1,7 @@ const builtin = @import("builtin"); const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.CompareOutputContext) void { +pub fn addCases(cases: *tests.CompareOutputContext) void { if (builtin.os == builtin.Os.linux and builtin.arch == builtin.Arch.x86_64) { cases.addAsm("hello world linux x86_64", \\.text diff --git a/test/build_examples.zig b/test/build_examples.zig index 7a4c0f35d9..1ba0ca46cf 100644 --- a/test/build_examples.zig +++ b/test/build_examples.zig @@ -2,7 +2,7 @@ const tests = @import("tests.zig"); const builtin = @import("builtin"); const is_windows = builtin.os == builtin.Os.windows; -pub fn addCases(cases: &tests.BuildExamplesContext) void { +pub fn addCases(cases: *tests.BuildExamplesContext) void { cases.add("example/hello_world/hello.zig"); cases.addC("example/hello_world/hello_libc.zig"); cases.add("example/cat/main.zig"); diff --git a/test/cases/align.zig b/test/cases/align.zig index 582063766f..99bdcdf940 100644 --- a/test/cases/align.zig +++ b/test/cases/align.zig @@ -5,7 +5,7 @@ var foo: u8 align(4) = 100; test "global variable alignment" { assert(@typeOf(&foo).alignment == 4); - assert(@typeOf(&foo) == &align(4) u8); + assert(@typeOf(&foo) == *align(4) u8); const slice = (&foo)[0..1]; assert(@typeOf(slice) == []align(4) u8); } @@ -30,7 +30,7 @@ var baz: packed struct { } = undefined; test "packed struct alignment" { - assert(@typeOf(&baz.b) == &align(1) u32); + assert(@typeOf(&baz.b) == *align(1) u32); } const blah: packed struct { @@ -40,11 +40,11 @@ const blah: packed struct { } = undefined; test "bit field alignment" { - assert(@typeOf(&blah.b) == &align(1:3:6) const u3); + assert(@typeOf(&blah.b) == *align(1:3:6) const u3); } test "default alignment allows unspecified in type syntax" { - assert(&u32 == &align(@alignOf(u32)) u32); + assert(*u32 == *align(@alignOf(u32)) u32); } test "implicitly decreasing pointer alignment" { @@ -53,7 +53,7 @@ test "implicitly decreasing pointer alignment" { assert(addUnaligned(&a, &b) == 7); } -fn addUnaligned(a: &align(1) const u32, b: &align(1) const u32) u32 { +fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { return a.* + b.*; } @@ -76,7 +76,7 @@ fn testBytesAlign(b: u8) void { b, b, }; - const ptr = @ptrCast(&u32, &bytes[0]); + const ptr = @ptrCast(*u32, &bytes[0]); assert(ptr.* == 0x33333333); } @@ -99,10 +99,10 @@ test "@alignCast pointers" { expectsOnly1(&x); assert(x == 2); } -fn expectsOnly1(x: &align(1) u32) void { +fn expectsOnly1(x: *align(1) u32) void { expects4(@alignCast(4, x)); } -fn expects4(x: &align(4) u32) void { +fn expects4(x: *align(4) u32) void { x.* += 1; } @@ -163,8 +163,8 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 { test "@ptrCast preserves alignment of bigger source" { var x: u32 align(16) = 1234; - const ptr = @ptrCast(&u8, &x); - assert(@typeOf(ptr) == &align(16) u8); + const ptr = @ptrCast(*u8, &x); + assert(@typeOf(ptr) == *align(16) u8); } test "compile-time known array index has best alignment possible" { @@ -175,10 +175,10 @@ test "compile-time known array index has best alignment possible" { 3, 4, }; - assert(@typeOf(&array[0]) == &align(4) u8); - assert(@typeOf(&array[1]) == &u8); - assert(@typeOf(&array[2]) == &align(2) u8); - assert(@typeOf(&array[3]) == &u8); + assert(@typeOf(&array[0]) == *align(4) u8); + assert(@typeOf(&array[1]) == *u8); + assert(@typeOf(&array[2]) == *align(2) u8); + assert(@typeOf(&array[3]) == *u8); // because align is too small but we still figure out to use 2 var bigger align(2) = []u64{ @@ -187,10 +187,10 @@ test "compile-time known array index has best alignment possible" { 3, 4, }; - assert(@typeOf(&bigger[0]) == &align(2) u64); - assert(@typeOf(&bigger[1]) == &align(2) u64); - assert(@typeOf(&bigger[2]) == &align(2) u64); - assert(@typeOf(&bigger[3]) == &align(2) u64); + assert(@typeOf(&bigger[0]) == *align(2) u64); + assert(@typeOf(&bigger[1]) == *align(2) u64); + assert(@typeOf(&bigger[2]) == *align(2) u64); + assert(@typeOf(&bigger[3]) == *align(2) u64); // because pointer is align 2 and u32 align % 2 == 0 we can assume align 2 var smaller align(2) = []u32{ @@ -199,21 +199,21 @@ test "compile-time known array index has best alignment possible" { 3, 4, }; - testIndex(&smaller[0], 0, &align(2) u32); - testIndex(&smaller[0], 1, &align(2) u32); - testIndex(&smaller[0], 2, &align(2) u32); - testIndex(&smaller[0], 3, &align(2) u32); + testIndex(&smaller[0], 0, *align(2) u32); + testIndex(&smaller[0], 1, *align(2) u32); + testIndex(&smaller[0], 2, *align(2) u32); + testIndex(&smaller[0], 3, *align(2) u32); // has to use ABI alignment because index known at runtime only - testIndex2(&array[0], 0, &u8); - testIndex2(&array[0], 1, &u8); - testIndex2(&array[0], 2, &u8); - testIndex2(&array[0], 3, &u8); + testIndex2(&array[0], 0, *u8); + testIndex2(&array[0], 1, *u8); + testIndex2(&array[0], 2, *u8); + testIndex2(&array[0], 3, *u8); } -fn testIndex(smaller: &align(2) u32, index: usize, comptime T: type) void { +fn testIndex(smaller: *align(2) u32, index: usize, comptime T: type) void { assert(@typeOf(&smaller[index]) == T); } -fn testIndex2(ptr: &align(4) u8, index: usize, comptime T: type) void { +fn testIndex2(ptr: *align(4) u8, index: usize, comptime T: type) void { assert(@typeOf(&ptr[index]) == T); } diff --git a/test/cases/atomics.zig b/test/cases/atomics.zig index d406285d29..67c9ab3dd1 100644 --- a/test/cases/atomics.zig +++ b/test/cases/atomics.zig @@ -34,7 +34,7 @@ test "atomicrmw and atomicload" { testAtomicLoad(&data); } -fn testAtomicRmw(ptr: &u8) void { +fn testAtomicRmw(ptr: *u8) void { const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst); assert(prev_value == 200); comptime { @@ -45,7 +45,7 @@ fn testAtomicRmw(ptr: &u8) void { } } -fn testAtomicLoad(ptr: &u8) void { +fn testAtomicLoad(ptr: *u8) void { const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst); assert(x == 42); } @@ -54,18 +54,18 @@ test "cmpxchg with ptr" { var data1: i32 = 1234; var data2: i32 = 5678; var data3: i32 = 9101; - var x: &i32 = &data1; - if (@cmpxchgWeak(&i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + var x: *i32 = &data1; + if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { assert(x1 == &data1); } else { @panic("cmpxchg should have failed"); } - while (@cmpxchgWeak(&i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { assert(x1 == &data1); } assert(x == &data3); - assert(@cmpxchgStrong(&i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null); + assert(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null); assert(x == &data2); } diff --git a/test/cases/bugs/655.zig b/test/cases/bugs/655.zig index 4431767d5c..50374d4e6d 100644 --- a/test/cases/bugs/655.zig +++ b/test/cases/bugs/655.zig @@ -3,10 +3,10 @@ const other_file = @import("655_other_file.zig"); test "function with &const parameter with type dereferenced by namespace" { const x: other_file.Integer = 1234; - comptime std.debug.assert(@typeOf(&x) == &const other_file.Integer); + comptime std.debug.assert(@typeOf(&x) == *const other_file.Integer); foo(x); } -fn foo(x: &const other_file.Integer) void { +fn foo(x: *const other_file.Integer) void { std.debug.assert(x.* == 1234); } diff --git a/test/cases/bugs/828.zig b/test/cases/bugs/828.zig index 10d7370b90..50ae0fd279 100644 --- a/test/cases/bugs/828.zig +++ b/test/cases/bugs/828.zig @@ -3,7 +3,7 @@ const CountBy = struct { const One = CountBy{ .a = 1 }; - pub fn counter(self: &const CountBy) Counter { + pub fn counter(self: *const CountBy) Counter { return Counter{ .i = 0 }; } }; @@ -11,13 +11,13 @@ const CountBy = struct { const Counter = struct { i: usize, - pub fn count(self: &Counter) bool { + pub fn count(self: *Counter) bool { self.i += 1; return self.i <= 10; } }; -fn constCount(comptime cb: &const CountBy, comptime unused: u32) void { +fn constCount(comptime cb: *const CountBy, comptime unused: u32) void { comptime { var cnt = cb.counter(); if (cnt.i != 0) @compileError("Counter instance reused!"); diff --git a/test/cases/bugs/920.zig b/test/cases/bugs/920.zig index c315206072..2903f05a29 100644 --- a/test/cases/bugs/920.zig +++ b/test/cases/bugs/920.zig @@ -9,10 +9,10 @@ const ZigTable = struct { pdf: fn (f64) f64, is_symmetric: bool, - zero_case: fn (&Random, f64) f64, + zero_case: fn (*Random, f64) f64, }; -fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (&Random, f64) f64) ZigTable { +fn ZigTableGen(comptime is_symmetric: bool, comptime r: f64, comptime v: f64, comptime f: fn (f64) f64, comptime f_inv: fn (f64) f64, comptime zero_case: fn (*Random, f64) f64) ZigTable { var tables: ZigTable = undefined; tables.is_symmetric = is_symmetric; @@ -45,7 +45,7 @@ fn norm_f(x: f64) f64 { fn norm_f_inv(y: f64) f64 { return math.sqrt(-2.0 * math.ln(y)); } -fn norm_zero_case(random: &Random, u: f64) f64 { +fn norm_zero_case(random: *Random, u: f64) f64 { return 0.0; } diff --git a/test/cases/cast.zig b/test/cases/cast.zig index e37451ea93..7358a4ffd8 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -3,20 +3,20 @@ const mem = @import("std").mem; test "int to ptr cast" { const x = usize(13); - const y = @intToPtr(&u8, x); + const y = @intToPtr(*u8, x); const z = @ptrToInt(y); assert(z == 13); } test "integer literal to pointer cast" { - const vga_mem = @intToPtr(&u16, 0xB8000); + const vga_mem = @intToPtr(*u16, 0xB8000); assert(@ptrToInt(vga_mem) == 0xB8000); } test "pointer reinterpret const float to int" { const float: f64 = 5.99999999999994648725e-01; const float_ptr = &float; - const int_ptr = @ptrCast(&const i32, float_ptr); + const int_ptr = @ptrCast(*const i32, float_ptr); const int_val = int_ptr.*; assert(int_val == 858993411); } @@ -28,7 +28,7 @@ test "implicitly cast a pointer to a const pointer of it" { assert(x == 2); } -fn funcWithConstPtrPtr(x: &const &i32) void { +fn funcWithConstPtrPtr(x: *const *i32) void { x.*.* += 1; } @@ -66,11 +66,11 @@ fn Struct(comptime T: type) type { const Self = this; x: T, - fn pointer(self: &const Self) Self { + fn pointer(self: *const Self) Self { return self.*; } - fn maybePointer(self: ?&const Self) Self { + fn maybePointer(self: ?*const Self) Self { const none = Self{ .x = if (T == void) void{} else 0 }; return (self ?? &none).*; } @@ -80,11 +80,11 @@ fn Struct(comptime T: type) type { const Union = union { x: u8, - fn pointer(self: &const Union) Union { + fn pointer(self: *const Union) Union { return self.*; } - fn maybePointer(self: ?&const Union) Union { + fn maybePointer(self: ?*const Union) Union { const none = Union{ .x = 0 }; return (self ?? &none).*; } @@ -94,11 +94,11 @@ const Enum = enum { None, Some, - fn pointer(self: &const Enum) Enum { + fn pointer(self: *const Enum) Enum { return self.*; } - fn maybePointer(self: ?&const Enum) Enum { + fn maybePointer(self: ?*const Enum) Enum { return (self ?? &Enum.None).*; } }; @@ -107,16 +107,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" { const S = struct { const Self = this; x: u8, - fn constConst(p: &const &const Self) u8 { + fn constConst(p: *const *const Self) u8 { return (p.*).x; } - fn maybeConstConst(p: ?&const &const Self) u8 { + fn maybeConstConst(p: ?*const *const Self) u8 { return ((??p).*).x; } - fn constConstConst(p: &const &const &const Self) u8 { + fn constConstConst(p: *const *const *const Self) u8 { return (p.*.*).x; } - fn maybeConstConstConst(p: ?&const &const &const Self) u8 { + fn maybeConstConstConst(p: ?*const *const *const Self) u8 { return ((??p).*.*).x; } }; @@ -166,12 +166,12 @@ fn testPeerResolveArrayConstSlice(b: bool) void { } test "integer literal to &const int" { - const x: &const i32 = 3; + const x: *const i32 = 3; assert(x.* == 3); } test "string literal to &const []const u8" { - const x: &const []const u8 = "hello"; + const x: *const []const u8 = "hello"; assert(mem.eql(u8, x.*, "hello")); } @@ -209,11 +209,11 @@ test "return null from fn() error!?&T" { const b = returnNullLitFromMaybeTypeErrorRef(); assert((try a) == null and (try b) == null); } -fn returnNullFromMaybeTypeErrorRef() error!?&A { - const a: ?&A = null; +fn returnNullFromMaybeTypeErrorRef() error!?*A { + const a: ?*A = null; return a; } -fn returnNullLitFromMaybeTypeErrorRef() error!?&A { +fn returnNullLitFromMaybeTypeErrorRef() error!?*A { return null; } @@ -312,7 +312,7 @@ test "implicit cast from &const [N]T to []const T" { fn testCastConstArrayRefToConstSlice() void { const blah = "aoeu"; const const_array_ref = &blah; - assert(@typeOf(const_array_ref) == &const [4]u8); + assert(@typeOf(const_array_ref) == *const [4]u8); const slice: []const u8 = const_array_ref; assert(mem.eql(u8, slice, "aoeu")); } @@ -322,7 +322,7 @@ test "var args implicitly casts by value arg to const ref" { } fn foo(args: ...) void { - assert(@typeOf(args[0]) == &const [5]u8); + assert(@typeOf(args[0]) == *const [5]u8); } test "peer type resolution: error and [N]T" { diff --git a/test/cases/const_slice_child.zig b/test/cases/const_slice_child.zig index a92c589186..e012c729a0 100644 --- a/test/cases/const_slice_child.zig +++ b/test/cases/const_slice_child.zig @@ -1,10 +1,10 @@ const debug = @import("std").debug; const assert = debug.assert; -var argv: &const &const u8 = undefined; +var argv: *const *const u8 = undefined; test "const slice child" { - const strs = ([]&const u8){ + const strs = ([]*const u8){ c"one", c"two", c"three", @@ -29,7 +29,7 @@ fn bar(argc: usize) void { foo(args); } -fn strlen(ptr: &const u8) usize { +fn strlen(ptr: *const u8) usize { var count: usize = 0; while (ptr[count] != 0) : (count += 1) {} return count; diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index 8a071c6aad..4d2aa54a69 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -154,7 +154,7 @@ test "async function with dot syntax" { test "async fn pointer in a struct field" { var data: i32 = 1; const Foo = struct { - bar: async<&std.mem.Allocator> fn (&i32) void, + bar: async<*std.mem.Allocator> fn (*i32) void, }; var foo = Foo{ .bar = simpleAsyncFn2 }; const p = (async foo.bar(&data)) catch unreachable; @@ -162,7 +162,7 @@ test "async fn pointer in a struct field" { cancel p; assert(data == 4); } -async<&std.mem.Allocator> fn simpleAsyncFn2(y: &i32) void { +async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { defer y.* += 2; y.* += 1; suspend; @@ -220,7 +220,7 @@ test "break from suspend" { cancel p; std.debug.assert(my_result == 2); } -async fn testBreakFromSuspend(my_result: &i32) void { +async fn testBreakFromSuspend(my_result: *i32) void { s: suspend |p| { break :s; } diff --git a/test/cases/enum.zig b/test/cases/enum.zig index cbcbc5e306..ae9f04869b 100644 --- a/test/cases/enum.zig +++ b/test/cases/enum.zig @@ -56,14 +56,14 @@ test "constant enum with payload" { shouldBeNotEmpty(full); } -fn shouldBeEmpty(x: &const AnEnumWithPayload) void { +fn shouldBeEmpty(x: *const AnEnumWithPayload) void { switch (x.*) { AnEnumWithPayload.Empty => {}, else => unreachable, } } -fn shouldBeNotEmpty(x: &const AnEnumWithPayload) void { +fn shouldBeNotEmpty(x: *const AnEnumWithPayload) void { switch (x.*) { AnEnumWithPayload.Empty => unreachable, else => {}, @@ -750,15 +750,15 @@ test "bit field access with enum fields" { assert(data.b == B.Four3); } -fn getA(data: &const BitFieldOfEnums) A { +fn getA(data: *const BitFieldOfEnums) A { return data.a; } -fn getB(data: &const BitFieldOfEnums) B { +fn getB(data: *const BitFieldOfEnums) B { return data.b; } -fn getC(data: &const BitFieldOfEnums) C { +fn getC(data: *const BitFieldOfEnums) C { return data.c; } diff --git a/test/cases/enum_with_members.zig b/test/cases/enum_with_members.zig index 8fafa70b02..18174186a9 100644 --- a/test/cases/enum_with_members.zig +++ b/test/cases/enum_with_members.zig @@ -6,7 +6,7 @@ const ET = union(enum) { SINT: i32, UINT: u32, - pub fn print(a: &const ET, buf: []u8) error!usize { + pub fn print(a: *const ET, buf: []u8) error!usize { return switch (a.*) { ET.SINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0), ET.UINT => |x| fmt.formatIntBuf(buf, x, 10, false, 0), diff --git a/test/cases/eval.zig b/test/cases/eval.zig index 8a6dc25bd8..6c8bcfdbab 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -282,7 +282,7 @@ fn fnWithFloatMode() f32 { const SimpleStruct = struct { field: i32, - fn method(self: &const SimpleStruct) i32 { + fn method(self: *const SimpleStruct) i32 { return self.field + 3; } }; @@ -367,7 +367,7 @@ test "const global shares pointer with other same one" { assertEqualPtrs(&hi1[0], &hi2[0]); comptime assert(&hi1[0] == &hi2[0]); } -fn assertEqualPtrs(ptr1: &const u8, ptr2: &const u8) void { +fn assertEqualPtrs(ptr1: *const u8, ptr2: *const u8) void { assert(ptr1 == ptr2); } @@ -418,9 +418,9 @@ test "string literal used as comptime slice is memoized" { } test "comptime slice of undefined pointer of length 0" { - const slice1 = (&i32)(undefined)[0..0]; + const slice1 = (*i32)(undefined)[0..0]; assert(slice1.len == 0); - const slice2 = (&i32)(undefined)[100..100]; + const slice2 = (*i32)(undefined)[100..100]; assert(slice2.len == 0); } @@ -472,7 +472,7 @@ test "comptime function with mutable pointer is not memoized" { } } -fn increment(value: &i32) void { +fn increment(value: *i32) void { value.* += 1; } @@ -517,7 +517,7 @@ test "comptime slice of pointer preserves comptime var" { const SingleFieldStruct = struct { x: i32, - fn read_x(self: &const SingleFieldStruct) i32 { + fn read_x(self: *const SingleFieldStruct) i32 { return self.x; } }; diff --git a/test/cases/field_parent_ptr.zig b/test/cases/field_parent_ptr.zig index 1a7de9ce35..00d4e0f367 100644 --- a/test/cases/field_parent_ptr.zig +++ b/test/cases/field_parent_ptr.zig @@ -24,7 +24,7 @@ const foo = Foo{ .d = -10, }; -fn testParentFieldPtr(c: &const i32) void { +fn testParentFieldPtr(c: *const i32) void { assert(c == &foo.c); const base = @fieldParentPtr(Foo, "c", c); @@ -32,7 +32,7 @@ fn testParentFieldPtr(c: &const i32) void { assert(&base.c == c); } -fn testParentFieldPtrFirst(a: &const bool) void { +fn testParentFieldPtrFirst(a: *const bool) void { assert(a == &foo.a); const base = @fieldParentPtr(Foo, "a", a); diff --git a/test/cases/fn_in_struct_in_comptime.zig b/test/cases/fn_in_struct_in_comptime.zig index c22da71940..fabb57e9cb 100644 --- a/test/cases/fn_in_struct_in_comptime.zig +++ b/test/cases/fn_in_struct_in_comptime.zig @@ -1,9 +1,9 @@ const assert = @import("std").debug.assert; -fn get_foo() fn (&u8) usize { +fn get_foo() fn (*u8) usize { comptime { return struct { - fn func(ptr: &u8) usize { + fn func(ptr: *u8) usize { var u = @ptrToInt(ptr); return u; } @@ -13,5 +13,5 @@ fn get_foo() fn (&u8) usize { test "define a function in an anonymous struct in comptime" { const foo = get_foo(); - assert(foo(@intToPtr(&u8, 12345)) == 12345); + assert(foo(@intToPtr(*u8, 12345)) == 12345); } diff --git a/test/cases/generics.zig b/test/cases/generics.zig index 37cd1b89e4..a76990e2a1 100644 --- a/test/cases/generics.zig +++ b/test/cases/generics.zig @@ -96,8 +96,8 @@ test "generic struct" { fn GenNode(comptime T: type) type { return struct { value: T, - next: ?&GenNode(T), - fn getVal(n: &const GenNode(T)) T { + next: ?*GenNode(T), + fn getVal(n: *const GenNode(T)) T { return n.value; } }; @@ -126,11 +126,11 @@ test "generic fn with implicit cast" { 13, }) == 0); } -fn getByte(ptr: ?&const u8) u8 { +fn getByte(ptr: ?*const u8) u8 { return (??ptr).*; } fn getFirstByte(comptime T: type, mem: []const T) u8 { - return getByte(@ptrCast(&const u8, &mem[0])); + return getByte(@ptrCast(*const u8, &mem[0])); } const foos = []fn (var) bool{ diff --git a/test/cases/incomplete_struct_param_tld.zig b/test/cases/incomplete_struct_param_tld.zig index a2f57743d0..552d6ef185 100644 --- a/test/cases/incomplete_struct_param_tld.zig +++ b/test/cases/incomplete_struct_param_tld.zig @@ -11,12 +11,12 @@ const B = struct { const C = struct { x: i32, - fn d(c: &const C) i32 { + fn d(c: *const C) i32 { return c.x; } }; -fn foo(a: &const A) i32 { +fn foo(a: *const A) i32 { return a.b.c.d(); } diff --git a/test/cases/math.zig b/test/cases/math.zig index 0b4622702f..5f16e903b2 100644 --- a/test/cases/math.zig +++ b/test/cases/math.zig @@ -28,13 +28,13 @@ fn testDivision() void { assert(divTrunc(f32, -5.0, 3.0) == -1.0); comptime { - assert(1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600); - assert(@rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600); - assert(1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2); - assert(@divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2); - assert(@divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2); - assert(@divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2); - assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1); + assert(1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,); + assert(@rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,); + assert(1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,); + assert(@divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,); + assert(@divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,); + assert(@divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,); + assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1,); } } fn div(comptime T: type, a: T, b: T) T { @@ -324,8 +324,8 @@ test "big number addition" { test "big number multiplication" { comptime { - assert(45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567); - assert(594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016); + assert(45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567,); + assert(594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016,); } } diff --git a/test/cases/misc.zig b/test/cases/misc.zig index b6b2da8de5..919b978f9f 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -252,20 +252,20 @@ test "multiline C string" { } test "type equality" { - assert(&const u8 != &u8); + assert(*const u8 != *u8); } const global_a: i32 = 1234; -const global_b: &const i32 = &global_a; -const global_c: &const f32 = @ptrCast(&const f32, global_b); +const global_b: *const i32 = &global_a; +const global_c: *const f32 = @ptrCast(*const f32, global_b); test "compile time global reinterpret" { - const d = @ptrCast(&const i32, global_c); + const d = @ptrCast(*const i32, global_c); assert(d.* == 1234); } test "explicit cast maybe pointers" { - const a: ?&i32 = undefined; - const b: ?&f32 = @ptrCast(?&f32, a); + const a: ?*i32 = undefined; + const b: ?*f32 = @ptrCast(?*f32, a); } test "generic malloc free" { @@ -274,7 +274,7 @@ test "generic malloc free" { } var some_mem: [100]u8 = undefined; fn memAlloc(comptime T: type, n: usize) error![]T { - return @ptrCast(&T, &some_mem[0])[0..n]; + return @ptrCast(*T, &some_mem[0])[0..n]; } fn memFree(comptime T: type, memory: []T) void {} @@ -357,7 +357,7 @@ const test3_foo = Test3Foo{ }, }; const test3_bar = Test3Foo{ .Two = 13 }; -fn test3_1(f: &const Test3Foo) void { +fn test3_1(f: *const Test3Foo) void { switch (f.*) { Test3Foo.Three => |pt| { assert(pt.x == 3); @@ -366,7 +366,7 @@ fn test3_1(f: &const Test3Foo) void { else => unreachable, } } -fn test3_2(f: &const Test3Foo) void { +fn test3_2(f: *const Test3Foo) void { switch (f.*) { Test3Foo.Two => |x| { assert(x == 13); @@ -393,7 +393,7 @@ test "pointer comparison" { const b = &a; assert(ptrEql(b, b)); } -fn ptrEql(a: &const []const u8, b: &const []const u8) bool { +fn ptrEql(a: *const []const u8, b: *const []const u8) bool { return a == b; } @@ -446,13 +446,13 @@ fn testPointerToVoidReturnType() error!void { return a.*; } const test_pointer_to_void_return_type_x = void{}; -fn testPointerToVoidReturnType2() &const void { +fn testPointerToVoidReturnType2() *const void { return &test_pointer_to_void_return_type_x; } test "non const ptr to aliased type" { const int = i32; - assert(?&int == ?&i32); + assert(?*int == ?*i32); } test "array 2D const double ptr" { @@ -463,7 +463,7 @@ test "array 2D const double ptr" { testArray2DConstDoublePtr(&rect_2d_vertexes[0][0]); } -fn testArray2DConstDoublePtr(ptr: &const f32) void { +fn testArray2DConstDoublePtr(ptr: *const f32) void { assert(ptr[0] == 1.0); assert(ptr[1] == 2.0); } @@ -497,7 +497,7 @@ test "@typeId" { assert(@typeId(u64) == Tid.Int); assert(@typeId(f32) == Tid.Float); assert(@typeId(f64) == Tid.Float); - assert(@typeId(&f32) == Tid.Pointer); + assert(@typeId(*f32) == Tid.Pointer); assert(@typeId([2]u8) == Tid.Array); assert(@typeId(AStruct) == Tid.Struct); assert(@typeId(@typeOf(1)) == Tid.IntLiteral); @@ -540,7 +540,7 @@ test "@typeName" { }; comptime { assert(mem.eql(u8, @typeName(i64), "i64")); - assert(mem.eql(u8, @typeName(&usize), "&usize")); + assert(mem.eql(u8, @typeName(*usize), "*usize")); // https://github.com/ziglang/zig/issues/675 assert(mem.eql(u8, @typeName(TypeFromFn(u8)), "TypeFromFn(u8)")); assert(mem.eql(u8, @typeName(Struct), "Struct")); @@ -555,7 +555,7 @@ fn TypeFromFn(comptime T: type) type { test "volatile load and store" { var number: i32 = 1234; - const ptr = (&volatile i32)(&number); + const ptr = (*volatile i32)(&number); ptr.* += 1; assert(ptr.* == 1235); } @@ -587,28 +587,28 @@ var global_ptr = &gdt[0]; // can't really run this test but we can make sure it has no compile error // and generates code -const vram = @intToPtr(&volatile u8, 0x20000000)[0..0x8000]; +const vram = @intToPtr(*volatile u8, 0x20000000)[0..0x8000]; export fn writeToVRam() void { vram[0] = 'X'; } test "pointer child field" { - assert((&u32).Child == u32); + assert((*u32).Child == u32); } const OpaqueA = @OpaqueType(); const OpaqueB = @OpaqueType(); test "@OpaqueType" { - assert(&OpaqueA != &OpaqueB); + assert(*OpaqueA != *OpaqueB); assert(mem.eql(u8, @typeName(OpaqueA), "OpaqueA")); assert(mem.eql(u8, @typeName(OpaqueB), "OpaqueB")); } test "variable is allowed to be a pointer to an opaque type" { var x: i32 = 1234; - _ = hereIsAnOpaqueType(@ptrCast(&OpaqueA, &x)); + _ = hereIsAnOpaqueType(@ptrCast(*OpaqueA, &x)); } -fn hereIsAnOpaqueType(ptr: &OpaqueA) &OpaqueA { +fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA { var a = ptr; return a; } @@ -692,7 +692,7 @@ test "packed struct, enum, union parameters in extern function" { }, PackedUnion{ .a = 1 }, PackedEnum.A); } -export fn testPackedStuff(a: &const PackedStruct, b: &const PackedUnion, c: PackedEnum) void {} +export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion, c: PackedEnum) void {} test "slicing zero length array" { const s1 = ""[0..]; @@ -703,8 +703,8 @@ test "slicing zero length array" { assert(mem.eql(u32, s2, []u32{})); } -const addr1 = @ptrCast(&const u8, emptyFn); +const addr1 = @ptrCast(*const u8, emptyFn); test "comptime cast fn to ptr" { - const addr2 = @ptrCast(&const u8, emptyFn); + const addr2 = @ptrCast(*const u8, emptyFn); comptime assert(addr1 == addr2); } diff --git a/test/cases/null.zig b/test/cases/null.zig index 936e5fafbd..bd78990ff4 100644 --- a/test/cases/null.zig +++ b/test/cases/null.zig @@ -65,7 +65,7 @@ test "if var maybe pointer" { .d = 1, }) == 15); } -fn shouldBeAPlus1(p: &const Particle) u64 { +fn shouldBeAPlus1(p: *const Particle) u64 { var maybe_particle: ?Particle = p.*; if (maybe_particle) |*particle| { particle.a += 1; diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig index b82ce6340f..48fcc9ef03 100644 --- a/test/cases/reflection.zig +++ b/test/cases/reflection.zig @@ -5,7 +5,7 @@ const reflection = this; test "reflection: array, pointer, nullable, error union type child" { comptime { assert(([10]u8).Child == u8); - assert((&u8).Child == u8); + assert((*u8).Child == u8); assert((error!u8).Payload == u8); assert((?u8).Child == u8); } diff --git a/test/cases/slice.zig b/test/cases/slice.zig index eae6fa895e..24e5239e2d 100644 --- a/test/cases/slice.zig +++ b/test/cases/slice.zig @@ -1,7 +1,7 @@ const assert = @import("std").debug.assert; const mem = @import("std").mem; -const x = @intToPtr(&i32, 0x1000)[0..0x500]; +const x = @intToPtr(*i32, 0x1000)[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { assert(@ptrToInt(x.ptr) == 0x1000); diff --git a/test/cases/struct.zig b/test/cases/struct.zig index d4a1c7fbe3..0712e508de 100644 --- a/test/cases/struct.zig +++ b/test/cases/struct.zig @@ -43,7 +43,7 @@ const VoidStructFieldsFoo = struct { test "structs" { var foo: StructFoo = undefined; - @memset(@ptrCast(&u8, &foo), 0, @sizeOf(StructFoo)); + @memset(@ptrCast(*u8, &foo), 0, @sizeOf(StructFoo)); foo.a += 1; foo.b = foo.a == 1; testFoo(foo); @@ -55,16 +55,16 @@ const StructFoo = struct { b: bool, c: f32, }; -fn testFoo(foo: &const StructFoo) void { +fn testFoo(foo: *const StructFoo) void { assert(foo.b); } -fn testMutation(foo: &StructFoo) void { +fn testMutation(foo: *StructFoo) void { foo.c = 100; } const Node = struct { val: Val, - next: &Node, + next: *Node, }; const Val = struct { @@ -112,7 +112,7 @@ fn aFunc() i32 { return 13; } -fn callStructField(foo: &const Foo) i32 { +fn callStructField(foo: *const Foo) i32 { return foo.ptr(); } @@ -124,7 +124,7 @@ test "store member function in variable" { } const MemberFnTestFoo = struct { x: i32, - fn member(foo: &const MemberFnTestFoo) i32 { + fn member(foo: *const MemberFnTestFoo) i32 { return foo.x; } }; @@ -141,7 +141,7 @@ test "member functions" { } const MemberFnRand = struct { seed: u32, - pub fn getSeed(r: &const MemberFnRand) u32 { + pub fn getSeed(r: *const MemberFnRand) u32 { return r.seed; } }; @@ -166,7 +166,7 @@ test "empty struct method call" { assert(es.method() == 1234); } const EmptyStruct = struct { - fn method(es: &const EmptyStruct) i32 { + fn method(es: *const EmptyStruct) i32 { return 1234; } }; @@ -228,15 +228,15 @@ test "bit field access" { assert(data.b == 3); } -fn getA(data: &const BitField1) u3 { +fn getA(data: *const BitField1) u3 { return data.a; } -fn getB(data: &const BitField1) u3 { +fn getB(data: *const BitField1) u3 { return data.b; } -fn getC(data: &const BitField1) u2 { +fn getC(data: *const BitField1) u2 { return data.c; } @@ -396,8 +396,8 @@ const Bitfields = packed struct { test "native bit field understands endianness" { var all: u64 = 0x7765443322221111; var bytes: [8]u8 = undefined; - @memcpy(&bytes[0], @ptrCast(&u8, &all), 8); - var bitfields = @ptrCast(&Bitfields, &bytes[0]).*; + @memcpy(&bytes[0], @ptrCast(*u8, &all), 8); + var bitfields = @ptrCast(*Bitfields, &bytes[0]).*; assert(bitfields.f1 == 0x1111); assert(bitfields.f2 == 0x2222); @@ -415,7 +415,7 @@ test "align 1 field before self referential align 8 field as slice return type" const Expr = union(enum) { Literal: u8, - Question: &Expr, + Question: *Expr, }; fn alloc(comptime T: type) []T { diff --git a/test/cases/struct_contains_null_ptr_itself.zig b/test/cases/struct_contains_null_ptr_itself.zig index b6cb1a94cc..21175974b3 100644 --- a/test/cases/struct_contains_null_ptr_itself.zig +++ b/test/cases/struct_contains_null_ptr_itself.zig @@ -2,13 +2,13 @@ const std = @import("std"); const assert = std.debug.assert; test "struct contains null pointer which contains original struct" { - var x: ?&NodeLineComment = null; + var x: ?*NodeLineComment = null; assert(x == null); } pub const Node = struct { id: Id, - comment: ?&NodeLineComment, + comment: ?*NodeLineComment, pub const Id = enum { Root, diff --git a/test/cases/switch.zig b/test/cases/switch.zig index 495fa9f3ed..c6a4b60f09 100644 --- a/test/cases/switch.zig +++ b/test/cases/switch.zig @@ -90,7 +90,7 @@ const SwitchProngWithVarEnum = union(enum) { Two: f32, Meh: void, }; -fn switchProngWithVarFn(a: &const SwitchProngWithVarEnum) void { +fn switchProngWithVarFn(a: *const SwitchProngWithVarEnum) void { switch (a.*) { SwitchProngWithVarEnum.One => |x| { assert(x == 13); diff --git a/test/cases/this.zig b/test/cases/this.zig index 5e433b5037..ba51d0ac90 100644 --- a/test/cases/this.zig +++ b/test/cases/this.zig @@ -8,7 +8,7 @@ fn Point(comptime T: type) type { x: T, y: T, - fn addOne(self: &Self) void { + fn addOne(self: *Self) void { self.x += 1; self.y += 1; } diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig index 2561d70865..921ff785a7 100644 --- a/test/cases/type_info.zig +++ b/test/cases/type_info.zig @@ -37,7 +37,7 @@ test "type info: pointer type info" { } fn testPointer() void { - const u32_ptr_info = @typeInfo(&u32); + const u32_ptr_info = @typeInfo(*u32); assert(TypeId(u32_ptr_info) == TypeId.Pointer); assert(u32_ptr_info.Pointer.is_const == false); assert(u32_ptr_info.Pointer.is_volatile == false); @@ -169,14 +169,14 @@ fn testUnion() void { assert(notag_union_info.Union.fields[1].field_type == u32); const TestExternUnion = extern union { - foo: &c_void, + foo: *c_void, }; const extern_union_info = @typeInfo(TestExternUnion); assert(extern_union_info.Union.layout == TypeInfo.ContainerLayout.Extern); assert(extern_union_info.Union.tag_type == @typeOf(undefined)); assert(extern_union_info.Union.fields[0].enum_field == null); - assert(extern_union_info.Union.fields[0].field_type == &c_void); + assert(extern_union_info.Union.fields[0].field_type == *c_void); } test "type info: struct info" { @@ -190,13 +190,13 @@ fn testStruct() void { assert(struct_info.Struct.layout == TypeInfo.ContainerLayout.Packed); assert(struct_info.Struct.fields.len == 3); assert(struct_info.Struct.fields[1].offset == null); - assert(struct_info.Struct.fields[2].field_type == &TestStruct); + assert(struct_info.Struct.fields[2].field_type == *TestStruct); assert(struct_info.Struct.defs.len == 2); assert(struct_info.Struct.defs[0].is_pub); assert(!struct_info.Struct.defs[0].data.Fn.is_extern); assert(struct_info.Struct.defs[0].data.Fn.lib_name == null); assert(struct_info.Struct.defs[0].data.Fn.return_type == void); - assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (&const TestStruct) void); + assert(struct_info.Struct.defs[0].data.Fn.fn_type == fn (*const TestStruct) void); } const TestStruct = packed struct { @@ -204,9 +204,9 @@ const TestStruct = packed struct { fieldA: usize, fieldB: void, - fieldC: &Self, + fieldC: *Self, - pub fn foo(self: &const Self) void {} + pub fn foo(self: *const Self) void {} }; test "type info: function type info" { @@ -227,7 +227,7 @@ fn testFunction() void { const test_instance: TestStruct = undefined; const bound_fn_info = @typeInfo(@typeOf(test_instance.foo)); assert(TypeId(bound_fn_info) == TypeId.BoundFn); - assert(bound_fn_info.BoundFn.args[0].arg_type == &const TestStruct); + assert(bound_fn_info.BoundFn.args[0].arg_type == *const TestStruct); } fn foo(comptime a: usize, b: bool, args: ...) usize { diff --git a/test/cases/undefined.zig b/test/cases/undefined.zig index f1af10e532..83c620d211 100644 --- a/test/cases/undefined.zig +++ b/test/cases/undefined.zig @@ -27,12 +27,12 @@ test "init static array to undefined" { const Foo = struct { x: i32, - fn setFooXMethod(foo: &Foo) void { + fn setFooXMethod(foo: *Foo) void { foo.x = 3; } }; -fn setFooX(foo: &Foo) void { +fn setFooX(foo: *Foo) void { foo.x = 2; } diff --git a/test/cases/union.zig b/test/cases/union.zig index 005ad08e6a..bdcbbdb452 100644 --- a/test/cases/union.zig +++ b/test/cases/union.zig @@ -68,11 +68,11 @@ test "init union with runtime value" { assert(foo.int == 42); } -fn setFloat(foo: &Foo, x: f64) void { +fn setFloat(foo: *Foo, x: f64) void { foo.* = Foo{ .float = x }; } -fn setInt(foo: &Foo, x: i32) void { +fn setInt(foo: *Foo, x: i32) void { foo.* = Foo{ .int = x }; } @@ -108,7 +108,7 @@ fn doTest() void { assert(bar(Payload{ .A = 1234 }) == -10); } -fn bar(value: &const Payload) i32 { +fn bar(value: *const Payload) i32 { assert(Letter(value.*) == Letter.A); return switch (value.*) { Payload.A => |x| return x - 1244, @@ -147,7 +147,7 @@ test "union(enum(u32)) with specified and unspecified tag values" { comptime testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); } -fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void { +fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: *const MultipleChoice2) void { assert(u32(@TagType(MultipleChoice2)(x.*)) == 60); assert(1123 == switch (x.*) { MultipleChoice2.A => 1, @@ -163,7 +163,7 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: &const MultipleChoice2) void } const ExternPtrOrInt = extern union { - ptr: &u8, + ptr: *u8, int: u64, }; test "extern union size" { @@ -171,7 +171,7 @@ test "extern union size" { } const PackedPtrOrInt = packed union { - ptr: &u8, + ptr: *u8, int: u64, }; test "extern union size" { @@ -206,7 +206,7 @@ test "cast union to tag type of union" { comptime testCastUnionToTagType(TheUnion{ .B = 1234 }); } -fn testCastUnionToTagType(x: &const TheUnion) void { +fn testCastUnionToTagType(x: *const TheUnion) void { assert(TheTag(x.*) == TheTag.B); } @@ -243,7 +243,7 @@ const TheUnion2 = union(enum) { Item2: i32, }; -fn assertIsTheUnion2Item1(value: &const TheUnion2) void { +fn assertIsTheUnion2Item1(value: *const TheUnion2) void { assert(value.* == TheUnion2.Item1); } diff --git a/test/compare_output.zig b/test/compare_output.zig index 0170477b8b..00ad4a709b 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -3,10 +3,10 @@ const std = @import("std"); const os = std.os; const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.CompareOutputContext) void { +pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addC("hello world with libc", \\const c = @cImport(@cInclude("stdio.h")); - \\export fn main(argc: c_int, argv: &&u8) c_int { + \\export fn main(argc: c_int, argv: **u8) c_int { \\ _ = c.puts(c"Hello, world!"); \\ return 0; \\} @@ -139,7 +139,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void { \\ @cInclude("stdio.h"); \\}); \\ - \\export fn main(argc: c_int, argv: &&u8) c_int { + \\export fn main(argc: c_int, argv: **u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); @@ -284,9 +284,9 @@ pub fn addCases(cases: &tests.CompareOutputContext) void { cases.addC("expose function pointer to C land", \\const c = @cImport(@cInclude("stdlib.h")); \\ - \\export fn compare_fn(a: ?&const c_void, b: ?&const c_void) c_int { - \\ const a_int = @ptrCast(&align(1) const i32, a ?? unreachable); - \\ const b_int = @ptrCast(&align(1) const i32, b ?? unreachable); + \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int { + \\ const a_int = @ptrCast(*align(1) const i32, a ?? unreachable); + \\ const b_int = @ptrCast(*align(1) const i32, b ?? unreachable); \\ if (a_int.* < b_int.*) { \\ return -1; \\ } else if (a_int.* > b_int.*) { @@ -299,7 +299,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void { \\export fn main() c_int { \\ var array = []u32 { 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ - \\ c.qsort(@ptrCast(&c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn); + \\ c.qsort(@ptrCast(*c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn); \\ \\ for (array) |item, i| { \\ if (item != i) { @@ -324,7 +324,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) void { \\ @cInclude("stdio.h"); \\}); \\ - \\export fn main(argc: c_int, argv: &&u8) c_int { + \\export fn main(argc: c_int, argv: **u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); @@ -344,13 +344,13 @@ pub fn addCases(cases: &tests.CompareOutputContext) void { \\const Foo = struct { \\ field1: Bar, \\ - \\ fn method(a: &const Foo) bool { return true; } + \\ fn method(a: *const Foo) bool { return true; } \\}; \\ \\const Bar = struct { \\ field2: i32, \\ - \\ fn method(b: &const Bar) bool { return true; } + \\ fn method(b: *const Bar) bool { return true; } \\}; \\ \\pub fn main() void { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 5215953d0a..1297ed29ab 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,6 @@ const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.CompileErrorContext) void { +pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "invalid deref on switch target", \\comptime { @@ -109,7 +109,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { "@ptrCast discards const qualifier", \\export fn entry() void { \\ const x: i32 = 1234; - \\ const y = @ptrCast(&i32, &x); + \\ const y = @ptrCast(*i32, &x); \\} , ".tmp_source.zig:3:15: error: cast discards const qualifier", @@ -118,7 +118,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "comptime slice of undefined pointer non-zero len", \\export fn entry() void { - \\ const slice = (&i32)(undefined)[0..1]; + \\ const slice = (*i32)(undefined)[0..1]; \\} , ".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer", @@ -126,7 +126,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "type checking function pointers", - \\fn a(b: fn (&const u8) void) void { + \\fn a(b: fn (*const u8) void) void { \\ b('a'); \\} \\fn c(d: u8) void { @@ -136,7 +136,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ a(c); \\} , - ".tmp_source.zig:8:7: error: expected type 'fn(&const u8) void', found 'fn(u8) void'", + ".tmp_source.zig:8:7: error: expected type 'fn(*const u8) void', found 'fn(u8) void'", ); cases.add( @@ -594,15 +594,15 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "attempt to use 0 bit type in extern fn", - \\extern fn foo(ptr: extern fn(&void) void) void; + \\extern fn foo(ptr: extern fn(*void) void) void; \\ \\export fn entry() void { \\ foo(bar); \\} \\ - \\extern fn bar(x: &void) void { } + \\extern fn bar(x: *void) void { } , - ".tmp_source.zig:7:18: error: parameter of type '&void' has 0 bits; not allowed in function with calling convention 'ccc'", + ".tmp_source.zig:7:18: error: parameter of type '*void' has 0 bits; not allowed in function with calling convention 'ccc'", ); cases.add( @@ -911,10 +911,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "pointer to noreturn", - \\fn a() &noreturn {} + \\fn a() *noreturn {} \\export fn entry() void { _ = a(); } , - ".tmp_source.zig:1:9: error: pointer to noreturn not allowed", + ".tmp_source.zig:1:8: error: pointer to noreturn not allowed", ); cases.add( @@ -985,7 +985,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ return a; \\} , - ".tmp_source.zig:3:12: error: expected type 'i32', found '&const u8'", + ".tmp_source.zig:3:12: error: expected type 'i32', found '*const u8'", ); cases.add( @@ -1446,7 +1446,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "switch expression - switch on pointer type with no else", - \\fn foo(x: &u8) void { + \\fn foo(x: *u8) void { \\ switch (x) { \\ &y => {}, \\ } @@ -1454,7 +1454,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\const y: u8 = 100; \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } , - ".tmp_source.zig:2:5: error: else prong required when switching on type '&u8'", + ".tmp_source.zig:2:5: error: else prong required when switching on type '*u8'", ); cases.add( @@ -1501,10 +1501,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { "address of number literal", \\const x = 3; \\const y = &x; - \\fn foo() &const i32 { return y; } + \\fn foo() *const i32 { return y; } \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } , - ".tmp_source.zig:3:30: error: expected type '&const i32', found '&const (integer literal)'", + ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const (integer literal)'", ); cases.add( @@ -1529,10 +1529,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ a: i32, \\ b: i32, \\ - \\ fn member_a(foo: &const Foo) i32 { + \\ fn member_a(foo: *const Foo) i32 { \\ return foo.a; \\ } - \\ fn member_b(foo: &const Foo) i32 { + \\ fn member_b(foo: *const Foo) i32 { \\ return foo.b; \\ } \\}; @@ -1543,7 +1543,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ Foo.member_b, \\}; \\ - \\fn f(foo: &const Foo, index: usize) void { + \\fn f(foo: *const Foo, index: usize) void { \\ const result = members[index](); \\} \\ @@ -1692,11 +1692,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "assign null to non-nullable pointer", - \\const a: &u8 = null; + \\const a: *u8 = null; \\ \\export fn entry() usize { return @sizeOf(@typeOf(a)); } , - ".tmp_source.zig:1:16: error: expected type '&u8', found '(null)'", + ".tmp_source.zig:1:16: error: expected type '*u8', found '(null)'", ); cases.add( @@ -1806,7 +1806,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ One: void, \\ Two: i32, \\}; - \\fn bad_eql_2(a: &const EnumWithData, b: &const EnumWithData) bool { + \\fn bad_eql_2(a: *const EnumWithData, b: *const EnumWithData) bool { \\ return a.* == b.*; \\} \\ @@ -2011,9 +2011,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "wrong number of arguments for method fn call", \\const Foo = struct { - \\ fn method(self: &const Foo, a: i32) void {} + \\ fn method(self: *const Foo, a: i32) void {} \\}; - \\fn f(foo: &const Foo) void { + \\fn f(foo: *const Foo) void { \\ \\ foo.method(1, 2); \\} @@ -2062,7 +2062,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "misspelled type with pointer only reference", \\const JasonHM = u8; - \\const JasonList = &JsonNode; + \\const JasonList = *JsonNode; \\ \\const JsonOA = union(enum) { \\ JSONArray: JsonList, @@ -2113,16 +2113,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ derp.init(); \\} , - ".tmp_source.zig:14:5: error: expected type 'i32', found '&const Foo'", + ".tmp_source.zig:14:5: error: expected type 'i32', found '*const Foo'", ); cases.add( "method call with first arg type wrong container", \\pub const List = struct { \\ len: usize, - \\ allocator: &Allocator, + \\ allocator: *Allocator, \\ - \\ pub fn init(allocator: &Allocator) List { + \\ pub fn init(allocator: *Allocator) List { \\ return List { \\ .len = 0, \\ .allocator = allocator, @@ -2143,7 +2143,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ x.init(); \\} , - ".tmp_source.zig:23:5: error: expected type '&Allocator', found '&List'", + ".tmp_source.zig:23:5: error: expected type '*Allocator', found '*List'", ); cases.add( @@ -2308,17 +2308,17 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ c: u2, \\}; \\ - \\fn foo(bit_field: &const BitField) u3 { + \\fn foo(bit_field: *const BitField) u3 { \\ return bar(&bit_field.b); \\} \\ - \\fn bar(x: &const u3) u3 { + \\fn bar(x: *const u3) u3 { \\ return x.*; \\} \\ \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } , - ".tmp_source.zig:8:26: error: expected type '&const u3', found '&align(1:3:6) const u3'", + ".tmp_source.zig:8:26: error: expected type '*const u3', found '*align(1:3:6) const u3'", ); cases.add( @@ -2441,13 +2441,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ const b = &a; \\ return ptrEql(b, b); \\} - \\fn ptrEql(a: &[]const u8, b: &[]const u8) bool { + \\fn ptrEql(a: *[]const u8, b: *[]const u8) bool { \\ return true; \\} \\ \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } , - ".tmp_source.zig:4:19: error: expected type '&[]const u8', found '&const []const u8'", + ".tmp_source.zig:4:19: error: expected type '*[]const u8', found '*const []const u8'", ); cases.addCase(x: { @@ -2493,7 +2493,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "ptrcast to non-pointer", - \\export fn entry(a: &i32) usize { + \\export fn entry(a: *i32) usize { \\ return @ptrCast(usize, a); \\} , @@ -2542,16 +2542,16 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { "int to ptr of 0 bits", \\export fn foo() void { \\ var x: usize = 0x1000; - \\ var y: &void = @intToPtr(&void, x); + \\ var y: *void = @intToPtr(*void, x); \\} , - ".tmp_source.zig:3:31: error: type '&void' has 0 bits and cannot store information", + ".tmp_source.zig:3:30: error: type '*void' has 0 bits and cannot store information", ); cases.add( "@fieldParentPtr - non struct", \\const Foo = i32; - \\export fn foo(a: &i32) &Foo { + \\export fn foo(a: *i32) *Foo { \\ return @fieldParentPtr(Foo, "a", a); \\} , @@ -2563,7 +2563,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\const Foo = extern struct { \\ derp: i32, \\}; - \\export fn foo(a: &i32) &Foo { + \\export fn foo(a: *i32) *Foo { \\ return @fieldParentPtr(Foo, "a", a); \\} , @@ -2575,7 +2575,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\const Foo = extern struct { \\ a: i32, \\}; - \\export fn foo(a: i32) &Foo { + \\export fn foo(a: i32) *Foo { \\ return @fieldParentPtr(Foo, "a", a); \\} , @@ -2591,7 +2591,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\const foo = Foo { .a = 1, .b = 2, }; \\ \\comptime { - \\ const field_ptr = @intToPtr(&i32, 0x1234); + \\ const field_ptr = @intToPtr(*i32, 0x1234); \\ const another_foo_ptr = @fieldParentPtr(Foo, "b", field_ptr); \\} , @@ -2682,7 +2682,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "returning address of local variable - simple", - \\export fn foo() &i32 { + \\export fn foo() *i32 { \\ var a: i32 = undefined; \\ return &a; \\} @@ -2692,7 +2692,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "returning address of local variable - phi", - \\export fn foo(c: bool) &i32 { + \\export fn foo(c: bool) *i32 { \\ var a: i32 = undefined; \\ var b: i32 = undefined; \\ return if (c) &a else &b; @@ -3086,11 +3086,11 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ bar(&foo.b); \\} \\ - \\fn bar(x: &u32) void { + \\fn bar(x: *u32) void { \\ x.* += 1; \\} , - ".tmp_source.zig:8:13: error: expected type '&u32', found '&align(1) u32'", + ".tmp_source.zig:8:13: error: expected type '*u32', found '*align(1) u32'", ); cases.add( @@ -3117,13 +3117,13 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { "increase pointer alignment in @ptrCast", \\export fn entry() u32 { \\ var bytes: [4]u8 = []u8{0x01, 0x02, 0x03, 0x04}; - \\ const ptr = @ptrCast(&u32, &bytes[0]); + \\ const ptr = @ptrCast(*u32, &bytes[0]); \\ return ptr.*; \\} , ".tmp_source.zig:3:17: error: cast increases pointer alignment", - ".tmp_source.zig:3:38: note: '&u8' has alignment 1", - ".tmp_source.zig:3:27: note: '&u32' has alignment 4", + ".tmp_source.zig:3:38: note: '*u8' has alignment 1", + ".tmp_source.zig:3:26: note: '*u32' has alignment 4", ); cases.add( @@ -3169,7 +3169,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ return x == 5678; \\} , - ".tmp_source.zig:4:32: error: expected type '&i32', found '&align(1) i32'", + ".tmp_source.zig:4:32: error: expected type '*i32', found '*align(1) i32'", ); cases.add( @@ -3198,20 +3198,20 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { cases.add( "wrong pointer implicitly casted to pointer to @OpaqueType()", \\const Derp = @OpaqueType(); - \\extern fn bar(d: &Derp) void; + \\extern fn bar(d: *Derp) void; \\export fn foo() void { \\ var x = u8(1); - \\ bar(@ptrCast(&c_void, &x)); + \\ bar(@ptrCast(*c_void, &x)); \\} , - ".tmp_source.zig:5:9: error: expected type '&Derp', found '&c_void'", + ".tmp_source.zig:5:9: error: expected type '*Derp', found '*c_void'", ); cases.add( "non-const variables of things that require const variables", \\const Opaque = @OpaqueType(); \\ - \\export fn entry(opaque: &Opaque) void { + \\export fn entry(opaque: *Opaque) void { \\ var m2 = &2; \\ const y: u32 = m2.*; \\ @@ -3229,10 +3229,10 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\} \\ \\const Foo = struct { - \\ fn bar(self: &const Foo) void {} + \\ fn bar(self: *const Foo) void {} \\}; , - ".tmp_source.zig:4:4: error: variable of type '&const (integer literal)' must be const or comptime", + ".tmp_source.zig:4:4: error: variable of type '*const (integer literal)' must be const or comptime", ".tmp_source.zig:7:4: error: variable of type '(undefined)' must be const or comptime", ".tmp_source.zig:8:4: error: variable of type '(integer literal)' must be const or comptime", ".tmp_source.zig:9:4: error: variable of type '(float literal)' must be const or comptime", @@ -3241,7 +3241,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime", ".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime", ".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime", - ".tmp_source.zig:15:4: error: variable of type '(bound fn(&const Foo) void)' must be const or comptime", + ".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime", ".tmp_source.zig:17:4: error: unreachable code", ); @@ -3397,14 +3397,14 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ \\export fn entry() bool { \\ var x: i32 = 1; - \\ return bar(@ptrCast(&MyType, &x)); + \\ return bar(@ptrCast(*MyType, &x)); \\} \\ - \\fn bar(x: &MyType) bool { + \\fn bar(x: *MyType) bool { \\ return x.blah; \\} , - ".tmp_source.zig:9:13: error: type '&MyType' does not support field access", + ".tmp_source.zig:9:13: error: type '*MyType' does not support field access", ); cases.add( @@ -3535,9 +3535,9 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\export fn entry() void { \\ foo("hello",); \\} - \\pub extern fn foo(format: &const u8, ...) void; + \\pub extern fn foo(format: *const u8, ...) void; , - ".tmp_source.zig:2:9: error: expected type '&const u8', found '[5]u8'", + ".tmp_source.zig:2:9: error: expected type '*const u8', found '[5]u8'", ); cases.add( @@ -3902,7 +3902,7 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\ const a = Payload { .A = 1234 }; \\ foo(a); \\} - \\fn foo(a: &const Payload) void { + \\fn foo(a: *const Payload) void { \\ switch (a.*) { \\ Payload.A => {}, \\ else => unreachable, diff --git a/test/gen_h.zig b/test/gen_h.zig index 2def39bed7..9559c3395c 100644 --- a/test/gen_h.zig +++ b/test/gen_h.zig @@ -1,6 +1,6 @@ const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.GenHContext) void { +pub fn addCases(cases: *tests.GenHContext) void { cases.add("declare enum", \\const Foo = extern enum { A, B, C }; \\export fn entry(foo: Foo) void { } diff --git a/test/runtime_safety.zig b/test/runtime_safety.zig index 1fea6347ab..71d1d68764 100644 --- a/test/runtime_safety.zig +++ b/test/runtime_safety.zig @@ -1,6 +1,6 @@ const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.CompareOutputContext) void { +pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addRuntimeSafety("calling panic", \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) noreturn { \\ @import("std").os.exit(126); diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig index 7752f599df..64f3c08583 100644 --- a/test/standalone/brace_expansion/build.zig +++ b/test/standalone/brace_expansion/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const main = b.addTest("main.zig"); main.setBuildMode(b.standardReleaseOptions()); diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig index c96cc2cbb9..ccb4f6dd45 100644 --- a/test/standalone/brace_expansion/main.zig +++ b/test/standalone/brace_expansion/main.zig @@ -14,7 +14,7 @@ const Token = union(enum) { Eof, }; -var global_allocator: &mem.Allocator = undefined; +var global_allocator: *mem.Allocator = undefined; fn tokenize(input: []const u8) !ArrayList(Token) { const State = enum { @@ -73,7 +73,7 @@ const ParseError = error{ OutOfMemory, }; -fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node { +fn parse(tokens: *const ArrayList(Token), token_index: *usize) ParseError!Node { const first_token = tokens.items[token_index.*]; token_index.* += 1; @@ -109,7 +109,7 @@ fn parse(tokens: &const ArrayList(Token), token_index: &usize) ParseError!Node { } } -fn expandString(input: []const u8, output: &Buffer) !void { +fn expandString(input: []const u8, output: *Buffer) !void { const tokens = try tokenize(input); if (tokens.len == 1) { return output.resize(0); @@ -139,7 +139,7 @@ fn expandString(input: []const u8, output: &Buffer) !void { const ExpandNodeError = error{OutOfMemory}; -fn expandNode(node: &const Node, output: &ArrayList(Buffer)) ExpandNodeError!void { +fn expandNode(node: *const Node, output: *ArrayList(Buffer)) ExpandNodeError!void { assert(output.len == 0); switch (node.*) { Node.Scalar => |scalar| { diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig index f3ab327006..733b3729c1 100644 --- a/test/standalone/issue_339/build.zig +++ b/test/standalone/issue_339/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const obj = b.addObject("test", "test.zig"); const test_step = b.step("test", "Test the program"); diff --git a/test/standalone/issue_339/test.zig b/test/standalone/issue_339/test.zig index da0747b8e6..f4068dcfac 100644 --- a/test/standalone/issue_339/test.zig +++ b/test/standalone/issue_339/test.zig @@ -1,5 +1,5 @@ const StackTrace = @import("builtin").StackTrace; -pub fn panic(msg: []const u8, stack_trace: ?&StackTrace) noreturn { +pub fn panic(msg: []const u8, stack_trace: ?*StackTrace) noreturn { @breakpoint(); while (true) {} } diff --git a/test/standalone/issue_794/build.zig b/test/standalone/issue_794/build.zig index 4f5dcd7ff4..06c37a83a3 100644 --- a/test/standalone/issue_794/build.zig +++ b/test/standalone/issue_794/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const test_artifact = b.addTest("main.zig"); test_artifact.addIncludeDir("a_directory"); diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig index bb9416d3c4..e0b3885dc3 100644 --- a/test/standalone/pkg_import/build.zig +++ b/test/standalone/pkg_import/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { const exe = b.addExecutable("test", "test.zig"); exe.addPackagePath("my_pkg", "pkg.zig"); diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig index ecbba297d8..c700d43db9 100644 --- a/test/standalone/use_alias/build.zig +++ b/test/standalone/use_alias/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) void { +pub fn build(b: *Builder) void { b.addCIncludePath("."); const main = b.addTest("main.zig"); diff --git a/test/tests.zig b/test/tests.zig index b59b954122..cc562331fe 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -47,7 +47,7 @@ const test_targets = []TestTarget{ const max_stdout_size = 1 * 1024 * 1024; // 1 MB -pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addCompareOutputTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(CompareOutputContext) catch unreachable; cases.* = CompareOutputContext{ .b = b, @@ -61,7 +61,7 @@ pub fn addCompareOutputTests(b: &build.Builder, test_filter: ?[]const u8) &build return cases.step; } -pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addRuntimeSafetyTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(CompareOutputContext) catch unreachable; cases.* = CompareOutputContext{ .b = b, @@ -75,7 +75,7 @@ pub fn addRuntimeSafetyTests(b: &build.Builder, test_filter: ?[]const u8) &build return cases.step; } -pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addCompileErrorTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(CompileErrorContext) catch unreachable; cases.* = CompileErrorContext{ .b = b, @@ -89,7 +89,7 @@ pub fn addCompileErrorTests(b: &build.Builder, test_filter: ?[]const u8) &build. return cases.step; } -pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addBuildExampleTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(BuildExamplesContext) catch unreachable; cases.* = BuildExamplesContext{ .b = b, @@ -103,7 +103,7 @@ pub fn addBuildExampleTests(b: &build.Builder, test_filter: ?[]const u8) &build. return cases.step; } -pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addAssembleAndLinkTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(CompareOutputContext) catch unreachable; cases.* = CompareOutputContext{ .b = b, @@ -117,7 +117,7 @@ pub fn addAssembleAndLinkTests(b: &build.Builder, test_filter: ?[]const u8) &bui return cases.step; } -pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addTranslateCTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(TranslateCContext) catch unreachable; cases.* = TranslateCContext{ .b = b, @@ -131,7 +131,7 @@ pub fn addTranslateCTests(b: &build.Builder, test_filter: ?[]const u8) &build.St return cases.step; } -pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { +pub fn addGenHTests(b: *build.Builder, test_filter: ?[]const u8) *build.Step { const cases = b.allocator.create(GenHContext) catch unreachable; cases.* = GenHContext{ .b = b, @@ -145,7 +145,7 @@ pub fn addGenHTests(b: &build.Builder, test_filter: ?[]const u8) &build.Step { return cases.step; } -pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) &build.Step { +pub fn addPkgTests(b: *build.Builder, test_filter: ?[]const u8, root_src: []const u8, name: []const u8, desc: []const u8, with_lldb: bool) *build.Step { const step = b.step(b.fmt("test-{}", name), desc); for (test_targets) |test_target| { const is_native = (test_target.os == builtin.os and test_target.arch == builtin.arch); @@ -193,8 +193,8 @@ pub fn addPkgTests(b: &build.Builder, test_filter: ?[]const u8, root_src: []cons } pub const CompareOutputContext = struct { - b: &build.Builder, - step: &build.Step, + b: *build.Builder, + step: *build.Step, test_index: usize, test_filter: ?[]const u8, @@ -217,28 +217,28 @@ pub const CompareOutputContext = struct { source: []const u8, }; - pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void { + pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void { self.sources.append(SourceFile{ .filename = filename, .source = source, }) catch unreachable; } - pub fn setCommandLineArgs(self: &TestCase, args: []const []const u8) void { + pub fn setCommandLineArgs(self: *TestCase, args: []const []const u8) void { self.cli_args = args; } }; const RunCompareOutputStep = struct { step: build.Step, - context: &CompareOutputContext, + context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, test_index: usize, cli_args: []const []const u8, - pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) &RunCompareOutputStep { + pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8, expected_output: []const u8, cli_args: []const []const u8) *RunCompareOutputStep { const allocator = context.b.allocator; const ptr = allocator.create(RunCompareOutputStep) catch unreachable; ptr.* = RunCompareOutputStep{ @@ -254,7 +254,7 @@ pub const CompareOutputContext = struct { return ptr; } - fn make(step: &build.Step) !void { + fn make(step: *build.Step) !void { const self = @fieldParentPtr(RunCompareOutputStep, "step", step); const b = self.context.b; @@ -321,12 +321,12 @@ pub const CompareOutputContext = struct { const RuntimeSafetyRunStep = struct { step: build.Step, - context: &CompareOutputContext, + context: *CompareOutputContext, exe_path: []const u8, name: []const u8, test_index: usize, - pub fn create(context: &CompareOutputContext, exe_path: []const u8, name: []const u8) &RuntimeSafetyRunStep { + pub fn create(context: *CompareOutputContext, exe_path: []const u8, name: []const u8) *RuntimeSafetyRunStep { const allocator = context.b.allocator; const ptr = allocator.create(RuntimeSafetyRunStep) catch unreachable; ptr.* = RuntimeSafetyRunStep{ @@ -340,7 +340,7 @@ pub const CompareOutputContext = struct { return ptr; } - fn make(step: &build.Step) !void { + fn make(step: *build.Step) !void { const self = @fieldParentPtr(RuntimeSafetyRunStep, "step", step); const b = self.context.b; @@ -382,7 +382,7 @@ pub const CompareOutputContext = struct { } }; - pub fn createExtra(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase { + pub fn createExtra(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8, special: Special) TestCase { var tc = TestCase{ .name = name, .sources = ArrayList(TestCase.SourceFile).init(self.b.allocator), @@ -396,32 +396,32 @@ pub const CompareOutputContext = struct { return tc; } - pub fn create(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase { + pub fn create(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) TestCase { return createExtra(self, name, source, expected_output, Special.None); } - pub fn addC(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { + pub fn addC(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { var tc = self.create(name, source, expected_output); tc.link_libc = true; self.addCase(tc); } - pub fn add(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { + pub fn add(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { const tc = self.create(name, source, expected_output); self.addCase(tc); } - pub fn addAsm(self: &CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { + pub fn addAsm(self: *CompareOutputContext, name: []const u8, source: []const u8, expected_output: []const u8) void { const tc = self.createExtra(name, source, expected_output, Special.Asm); self.addCase(tc); } - pub fn addRuntimeSafety(self: &CompareOutputContext, name: []const u8, source: []const u8) void { + pub fn addRuntimeSafety(self: *CompareOutputContext, name: []const u8, source: []const u8) void { const tc = self.createExtra(name, source, undefined, Special.RuntimeSafety); self.addCase(tc); } - pub fn addCase(self: &CompareOutputContext, case: &const TestCase) void { + pub fn addCase(self: *CompareOutputContext, case: *const TestCase) void { const b = self.b; const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable; @@ -504,8 +504,8 @@ pub const CompareOutputContext = struct { }; pub const CompileErrorContext = struct { - b: &build.Builder, - step: &build.Step, + b: *build.Builder, + step: *build.Step, test_index: usize, test_filter: ?[]const u8, @@ -521,27 +521,27 @@ pub const CompileErrorContext = struct { source: []const u8, }; - pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void { + pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void { self.sources.append(SourceFile{ .filename = filename, .source = source, }) catch unreachable; } - pub fn addExpectedError(self: &TestCase, text: []const u8) void { + pub fn addExpectedError(self: *TestCase, text: []const u8) void { self.expected_errors.append(text) catch unreachable; } }; const CompileCmpOutputStep = struct { step: build.Step, - context: &CompileErrorContext, + context: *CompileErrorContext, name: []const u8, test_index: usize, - case: &const TestCase, + case: *const TestCase, build_mode: Mode, - pub fn create(context: &CompileErrorContext, name: []const u8, case: &const TestCase, build_mode: Mode) &CompileCmpOutputStep { + pub fn create(context: *CompileErrorContext, name: []const u8, case: *const TestCase, build_mode: Mode) *CompileCmpOutputStep { const allocator = context.b.allocator; const ptr = allocator.create(CompileCmpOutputStep) catch unreachable; ptr.* = CompileCmpOutputStep{ @@ -556,7 +556,7 @@ pub const CompileErrorContext = struct { return ptr; } - fn make(step: &build.Step) !void { + fn make(step: *build.Step) !void { const self = @fieldParentPtr(CompileCmpOutputStep, "step", step); const b = self.context.b; @@ -661,7 +661,7 @@ pub const CompileErrorContext = struct { warn("\n"); } - pub fn create(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) &TestCase { + pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase { const tc = self.b.allocator.create(TestCase) catch unreachable; tc.* = TestCase{ .name = name, @@ -678,24 +678,24 @@ pub const CompileErrorContext = struct { return tc; } - pub fn addC(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn addC(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { var tc = self.create(name, source, expected_lines); tc.link_libc = true; self.addCase(tc); } - pub fn addExe(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn addExe(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { var tc = self.create(name, source, expected_lines); tc.is_exe = true; self.addCase(tc); } - pub fn add(self: &CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn add(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) void { const tc = self.create(name, source, expected_lines); self.addCase(tc); } - pub fn addCase(self: &CompileErrorContext, case: &const TestCase) void { + pub fn addCase(self: *CompileErrorContext, case: *const TestCase) void { const b = self.b; for ([]Mode{ @@ -720,20 +720,20 @@ pub const CompileErrorContext = struct { }; pub const BuildExamplesContext = struct { - b: &build.Builder, - step: &build.Step, + b: *build.Builder, + step: *build.Step, test_index: usize, test_filter: ?[]const u8, - pub fn addC(self: &BuildExamplesContext, root_src: []const u8) void { + pub fn addC(self: *BuildExamplesContext, root_src: []const u8) void { self.addAllArgs(root_src, true); } - pub fn add(self: &BuildExamplesContext, root_src: []const u8) void { + pub fn add(self: *BuildExamplesContext, root_src: []const u8) void { self.addAllArgs(root_src, false); } - pub fn addBuildFile(self: &BuildExamplesContext, build_file: []const u8) void { + pub fn addBuildFile(self: *BuildExamplesContext, build_file: []const u8) void { const b = self.b; const annotated_case_name = b.fmt("build {} (Debug)", build_file); @@ -763,7 +763,7 @@ pub const BuildExamplesContext = struct { self.step.dependOn(&log_step.step); } - pub fn addAllArgs(self: &BuildExamplesContext, root_src: []const u8, link_libc: bool) void { + pub fn addAllArgs(self: *BuildExamplesContext, root_src: []const u8, link_libc: bool) void { const b = self.b; for ([]Mode{ @@ -792,8 +792,8 @@ pub const BuildExamplesContext = struct { }; pub const TranslateCContext = struct { - b: &build.Builder, - step: &build.Step, + b: *build.Builder, + step: *build.Step, test_index: usize, test_filter: ?[]const u8, @@ -808,26 +808,26 @@ pub const TranslateCContext = struct { source: []const u8, }; - pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void { + pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void { self.sources.append(SourceFile{ .filename = filename, .source = source, }) catch unreachable; } - pub fn addExpectedLine(self: &TestCase, text: []const u8) void { + pub fn addExpectedLine(self: *TestCase, text: []const u8) void { self.expected_lines.append(text) catch unreachable; } }; const TranslateCCmpOutputStep = struct { step: build.Step, - context: &TranslateCContext, + context: *TranslateCContext, name: []const u8, test_index: usize, - case: &const TestCase, + case: *const TestCase, - pub fn create(context: &TranslateCContext, name: []const u8, case: &const TestCase) &TranslateCCmpOutputStep { + pub fn create(context: *TranslateCContext, name: []const u8, case: *const TestCase) *TranslateCCmpOutputStep { const allocator = context.b.allocator; const ptr = allocator.create(TranslateCCmpOutputStep) catch unreachable; ptr.* = TranslateCCmpOutputStep{ @@ -841,7 +841,7 @@ pub const TranslateCContext = struct { return ptr; } - fn make(step: &build.Step) !void { + fn make(step: *build.Step) !void { const self = @fieldParentPtr(TranslateCCmpOutputStep, "step", step); const b = self.context.b; @@ -935,7 +935,7 @@ pub const TranslateCContext = struct { warn("\n"); } - pub fn create(self: &TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase { + pub fn create(self: *TranslateCContext, allow_warnings: bool, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase { const tc = self.b.allocator.create(TestCase) catch unreachable; tc.* = TestCase{ .name = name, @@ -951,22 +951,22 @@ pub const TranslateCContext = struct { return tc; } - pub fn add(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn add(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { const tc = self.create(false, "source.h", name, source, expected_lines); self.addCase(tc); } - pub fn addC(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn addC(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { const tc = self.create(false, "source.c", name, source, expected_lines); self.addCase(tc); } - pub fn addAllowWarnings(self: &TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn addAllowWarnings(self: *TranslateCContext, name: []const u8, source: []const u8, expected_lines: ...) void { const tc = self.create(true, "source.h", name, source, expected_lines); self.addCase(tc); } - pub fn addCase(self: &TranslateCContext, case: &const TestCase) void { + pub fn addCase(self: *TranslateCContext, case: *const TestCase) void { const b = self.b; const annotated_case_name = fmt.allocPrint(self.b.allocator, "translate-c {}", case.name) catch unreachable; @@ -986,8 +986,8 @@ pub const TranslateCContext = struct { }; pub const GenHContext = struct { - b: &build.Builder, - step: &build.Step, + b: *build.Builder, + step: *build.Step, test_index: usize, test_filter: ?[]const u8, @@ -1001,27 +1001,27 @@ pub const GenHContext = struct { source: []const u8, }; - pub fn addSourceFile(self: &TestCase, filename: []const u8, source: []const u8) void { + pub fn addSourceFile(self: *TestCase, filename: []const u8, source: []const u8) void { self.sources.append(SourceFile{ .filename = filename, .source = source, }) catch unreachable; } - pub fn addExpectedLine(self: &TestCase, text: []const u8) void { + pub fn addExpectedLine(self: *TestCase, text: []const u8) void { self.expected_lines.append(text) catch unreachable; } }; const GenHCmpOutputStep = struct { step: build.Step, - context: &GenHContext, + context: *GenHContext, h_path: []const u8, name: []const u8, test_index: usize, - case: &const TestCase, + case: *const TestCase, - pub fn create(context: &GenHContext, h_path: []const u8, name: []const u8, case: &const TestCase) &GenHCmpOutputStep { + pub fn create(context: *GenHContext, h_path: []const u8, name: []const u8, case: *const TestCase) *GenHCmpOutputStep { const allocator = context.b.allocator; const ptr = allocator.create(GenHCmpOutputStep) catch unreachable; ptr.* = GenHCmpOutputStep{ @@ -1036,7 +1036,7 @@ pub const GenHContext = struct { return ptr; } - fn make(step: &build.Step) !void { + fn make(step: *build.Step) !void { const self = @fieldParentPtr(GenHCmpOutputStep, "step", step); const b = self.context.b; @@ -1069,7 +1069,7 @@ pub const GenHContext = struct { warn("\n"); } - pub fn create(self: &GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) &TestCase { + pub fn create(self: *GenHContext, filename: []const u8, name: []const u8, source: []const u8, expected_lines: ...) *TestCase { const tc = self.b.allocator.create(TestCase) catch unreachable; tc.* = TestCase{ .name = name, @@ -1084,12 +1084,12 @@ pub const GenHContext = struct { return tc; } - pub fn add(self: &GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void { + pub fn add(self: *GenHContext, name: []const u8, source: []const u8, expected_lines: ...) void { const tc = self.create("test.zig", name, source, expected_lines); self.addCase(tc); } - pub fn addCase(self: &GenHContext, case: &const TestCase) void { + pub fn addCase(self: *GenHContext, case: *const TestCase) void { const b = self.b; const root_src = os.path.join(b.allocator, b.cache_root, case.sources.items[0].filename) catch unreachable; diff --git a/test/translate_c.zig b/test/translate_c.zig index 4cf1e047fa..9a07bc343d 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -1,6 +1,6 @@ const tests = @import("tests.zig"); -pub fn addCases(cases: &tests.TranslateCContext) void { +pub fn addCases(cases: *tests.TranslateCContext) void { cases.add("double define struct", \\typedef struct Bar Bar; \\typedef struct Foo Foo; @@ -14,11 +14,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\}; , \\pub const struct_Foo = extern struct { - \\ a: ?&Foo, + \\ a: ?*Foo, \\}; \\pub const Foo = struct_Foo; \\pub const struct_Bar = extern struct { - \\ a: ?&Foo, + \\ a: ?*Foo, \\}; ); @@ -99,7 +99,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { cases.add("restrict -> noalias", \\void foo(void *restrict bar, void *restrict); , - \\pub extern fn foo(noalias bar: ?&c_void, noalias arg1: ?&c_void) void; + \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void; ); cases.add("simple struct", @@ -110,7 +110,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { , \\const struct_Foo = extern struct { \\ x: c_int, - \\ y: ?&u8, + \\ y: ?*u8, \\}; , \\pub const Foo = struct_Foo; @@ -141,7 +141,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { , \\pub const BarB = enum_Bar.B; , - \\pub extern fn func(a: ?&struct_Foo, b: ?&(?&enum_Bar)) void; + \\pub extern fn func(a: ?*struct_Foo, b: ?*(?*enum_Bar)) void; , \\pub const Foo = struct_Foo; , @@ -151,7 +151,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { cases.add("constant size array", \\void func(int array[20]); , - \\pub extern fn func(array: ?&c_int) void; + \\pub extern fn func(array: ?*c_int) void; ); cases.add("self referential struct with function pointer", @@ -160,7 +160,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\}; , \\pub const struct_Foo = extern struct { - \\ derp: ?extern fn(?&struct_Foo) void, + \\ derp: ?extern fn(?*struct_Foo) void, \\}; , \\pub const Foo = struct_Foo; @@ -172,7 +172,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { , \\pub const struct_Foo = @OpaqueType(); , - \\pub extern fn some_func(foo: ?&struct_Foo, x: c_int) ?&struct_Foo; + \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo; , \\pub const Foo = struct_Foo; ); @@ -219,11 +219,11 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\}; , \\pub const struct_Bar = extern struct { - \\ next: ?&struct_Foo, + \\ next: ?*struct_Foo, \\}; , \\pub const struct_Foo = extern struct { - \\ next: ?&struct_Bar, + \\ next: ?*struct_Bar, \\}; ); @@ -233,7 +233,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { , \\pub const Foo = c_void; , - \\pub extern fn fun(a: ?&Foo) Foo; + \\pub extern fn fun(a: ?*Foo) Foo; ); cases.add("generate inline func for #define global extern fn", @@ -505,7 +505,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return 6; \\} , - \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?&c_void) c_int { + \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ if ((a != 0) and (b != 0)) return 0; \\ if ((b != 0) and (c != null)) return 1; \\ if ((a != 0) and (c != null)) return 2; @@ -607,7 +607,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\pub const struct_Foo = extern struct { \\ field: c_int, \\}; - \\pub export fn read_field(foo: ?&struct_Foo) c_int { + \\pub export fn read_field(foo: ?*struct_Foo) c_int { \\ return (??foo).field; \\} ); @@ -653,8 +653,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return x; \\} , - \\pub export fn foo(x: ?&c_ushort) ?&c_void { - \\ return @ptrCast(?&c_void, x); + \\pub export fn foo(x: ?*c_ushort) ?*c_void { + \\ return @ptrCast(?*c_void, x); \\} ); @@ -674,7 +674,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return 0; \\} , - \\pub export fn foo() ?&c_int { + \\pub export fn foo() ?*c_int { \\ return null; \\} ); @@ -983,7 +983,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ *x = 1; \\} , - \\pub export fn foo(x: ?&c_int) void { + \\pub export fn foo(x: ?*c_int) void { \\ (??x).* = 1; \\} ); @@ -1011,7 +1011,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { , \\pub fn foo() c_int { \\ var x: c_int = 1234; - \\ var ptr: ?&c_int = &x; + \\ var ptr: ?*c_int = &x; \\ return (??ptr).*; \\} ); @@ -1021,7 +1021,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return "bar"; \\} , - \\pub fn foo() ?&const u8 { + \\pub fn foo() ?*const u8 { \\ return c"bar"; \\} ); @@ -1150,8 +1150,8 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return (float *)a; \\} , - \\fn ptrcast(a: ?&c_int) ?&f32 { - \\ return @ptrCast(?&f32, a); + \\fn ptrcast(a: ?*c_int) ?*f32 { + \\ return @ptrCast(?*f32, a); \\} ); @@ -1173,7 +1173,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return !c; \\} , - \\pub fn foo(a: c_int, b: f32, c: ?&c_void) c_int { + \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int { \\ return !(a == 0); \\ return !(a != 0); \\ return !(b != 0); @@ -1194,7 +1194,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { cases.add("const ptr initializer", \\static const char *v0 = "0.0.0"; , - \\pub var v0: ?&const u8 = c"0.0.0"; + \\pub var v0: ?*const u8 = c"0.0.0"; ); cases.add("static incomplete array inside function", @@ -1203,14 +1203,14 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\} , \\pub fn foo() void { - \\ const v2: &const u8 = c"2.2.2"; + \\ const v2: *const u8 = c"2.2.2"; \\} ); cases.add("macro pointer cast", \\#define NRF_GPIO ((NRF_GPIO_Type *) NRF_GPIO_BASE) , - \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(&NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(&NRF_GPIO_Type, NRF_GPIO_BASE) else (&NRF_GPIO_Type)(NRF_GPIO_BASE); + \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(*NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(*NRF_GPIO_Type, NRF_GPIO_BASE) else (*NRF_GPIO_Type)(NRF_GPIO_BASE); ); cases.add("if on none bool", @@ -1231,7 +1231,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ B, \\ C, \\}; - \\pub fn if_none_bool(a: c_int, b: f32, c: ?&c_void, d: enum_SomeEnum) c_int { + \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int { \\ if (a != 0) return 0; \\ if (b != 0) return 1; \\ if (c != null) return 2; @@ -1248,7 +1248,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn while_none_bool(a: c_int, b: f32, c: ?&c_void) c_int { + \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; @@ -1264,7 +1264,7 @@ pub fn addCases(cases: &tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn for_none_bool(a: c_int, b: f32, c: ?&c_void) c_int { + \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; -- cgit v1.2.3 From 96164ce61377b36bcaf0c4087ca9b1ab822b9457 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 4 Jun 2018 01:09:15 -0400 Subject: disallow single-item pointer indexing add pointer arithmetic for unknown length pointer --- doc/langref.html.in | 48 ++++++----- src/all_types.hpp | 9 ++ src/analyze.cpp | 53 ++++++++---- src/analyze.hpp | 2 +- src/ast_render.cpp | 8 +- src/codegen.cpp | 49 +++++++---- src/ir.cpp | 177 ++++++++++++++++++++++++++++----------- src/parser.cpp | 1 + std/buffer.zig | 2 +- std/c/darwin.zig | 4 +- std/c/index.zig | 50 +++++------ std/c/linux.zig | 2 +- std/cstr.zig | 10 +-- std/heap.zig | 18 ++-- std/os/child_process.zig | 2 +- std/os/darwin.zig | 45 +++++----- std/os/file.zig | 4 +- std/os/index.zig | 101 +++++++--------------- std/os/linux/index.zig | 123 ++++++++++++++++----------- std/os/linux/test.zig | 3 +- std/os/linux/vdso.zig | 26 +++--- std/os/windows/index.zig | 30 +++---- std/os/windows/util.zig | 2 +- std/segmented_list.zig | 12 +-- std/special/bootstrap.zig | 22 ++--- std/special/builtin.zig | 6 +- test/cases/align.zig | 49 ++++------- test/cases/const_slice_child.zig | 9 +- test/cases/for.zig | 26 +----- test/cases/misc.zig | 11 +-- test/cases/pointers.zig | 30 +++++++ test/cases/struct.zig | 6 +- test/compare_output.zig | 16 ++-- test/compile_errors.zig | 15 +++- test/translate_c.zig | 56 ++++++------- 35 files changed, 584 insertions(+), 443 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 217f02777f..32481ade50 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -458,7 +458,7 @@ test "string literals" { // A C string literal is a null terminated pointer. const null_terminated_bytes = c"hello"; - assert(@typeOf(null_terminated_bytes) == *const u8); + assert(@typeOf(null_terminated_bytes) == [*]const u8); assert(null_terminated_bytes[5] == 0); } {#code_end#} @@ -547,7 +547,7 @@ const c_string_literal = ; {#code_end#}

    - In this example the variable c_string_literal has type *const char and + In this example the variable c_string_literal has type [*]const char and has a terminating null byte.

    {#see_also|@embedFile#} @@ -1288,7 +1288,7 @@ const assert = @import("std").debug.assert; const mem = @import("std").mem; // array literal -const message = []u8{'h', 'e', 'l', 'l', 'o'}; +const message = []u8{ 'h', 'e', 'l', 'l', 'o' }; // get the size of an array comptime { @@ -1324,11 +1324,11 @@ test "modify an array" { // array concatenation works if the values are known // at compile time -const part_one = []i32{1, 2, 3, 4}; -const part_two = []i32{5, 6, 7, 8}; +const part_one = []i32{ 1, 2, 3, 4 }; +const part_two = []i32{ 5, 6, 7, 8 }; const all_of_it = part_one ++ part_two; comptime { - assert(mem.eql(i32, all_of_it, []i32{1,2,3,4,5,6,7,8})); + assert(mem.eql(i32, all_of_it, []i32{ 1, 2, 3, 4, 5, 6, 7, 8 })); } // remember that string literals are arrays @@ -1357,7 +1357,7 @@ comptime { var fancy_array = init: { var initial_value: [10]Point = undefined; for (initial_value) |*pt, i| { - pt.* = Point { + pt.* = Point{ .x = i32(i), .y = i32(i) * 2, }; @@ -1377,7 +1377,7 @@ test "compile-time array initalization" { // call a function to initialize an array var more_points = []Point{makePoint(3)} ** 10; fn makePoint(x: i32) Point { - return Point { + return Point{ .x = x, .y = x * 2, }; @@ -1414,25 +1414,24 @@ test "address of syntax" { } test "pointer array access" { - // Pointers do not support pointer arithmetic. If you - // need such a thing, use array index syntax: + // Taking an address of an individual element gives a + // pointer to a single item. This kind of pointer + // does not support pointer arithmetic. var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - const ptr = &array[1]; + const ptr = &array[2]; + assert(@typeOf(ptr) == *u8); assert(array[2] == 3); - ptr[1] += 1; + ptr.* += 1; assert(array[2] == 4); } test "pointer slicing" { // In Zig, we prefer using slices over null-terminated pointers. - // You can turn a pointer into a slice using slice syntax: + // You can turn an array into a slice using slice syntax: var array = []u8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; - const ptr = &array[1]; - const slice = ptr[1..3]; - - assert(slice.ptr == &ptr[1]); + const slice = array[2..4]; assert(slice.len == 2); // Slices have bounds checking and are therefore protected @@ -1622,18 +1621,27 @@ fn foo(bytes: []u8) u32 { const assert = @import("std").debug.assert; test "basic slices" { - var array = []i32{1, 2, 3, 4}; + var array = []i32{ 1, 2, 3, 4 }; // A slice is a pointer and a length. The difference between an array and // a slice is that the array's length is part of the type and known at // compile-time, whereas the slice's length is known at runtime. // Both can be accessed with the `len` field. const slice = array[0..array.len]; - assert(slice.ptr == &array[0]); + assert(&slice[0] == &array[0]); assert(slice.len == array.len); + // Using the address-of operator on a slice gives a pointer to a single + // item, while using the `ptr` field gives an unknown length pointer. + assert(@typeOf(slice.ptr) == [*]i32); + assert(@typeOf(&slice[0]) == *i32); + assert(@ptrToInt(slice.ptr) == @ptrToInt(&slice[0])); + // Slices have array bounds checking. If you try to access something out // of bounds, you'll get a safety check failure: slice[10] += 1; + + // Note that `slice.ptr` does not invoke safety checking, while `&slice[0]` + // asserts that the slice has len >= 1. } {#code_end#}

    This is one reason we prefer slices to pointers.

    @@ -5937,7 +5945,7 @@ pub const __zig_test_fn_slice = {}; // overwritten later {#header_open|C String Literals#} {#code_begin|exe#} {#link_libc#} -extern fn puts(*const u8) void; +extern fn puts([*]const u8) void; pub fn main() void { puts(c"this has a null terminator"); diff --git a/src/all_types.hpp b/src/all_types.hpp index 8e65cfc789..f1cf96238f 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -974,8 +974,14 @@ struct FnTypeId { uint32_t fn_type_id_hash(FnTypeId*); bool fn_type_id_eql(FnTypeId *a, FnTypeId *b); +enum PtrLen { + PtrLenUnknown, + PtrLenSingle, +}; + struct TypeTableEntryPointer { TypeTableEntry *child_type; + PtrLen ptr_len; bool is_const; bool is_volatile; uint32_t alignment; @@ -1397,6 +1403,7 @@ struct TypeId { union { struct { TypeTableEntry *child_type; + PtrLen ptr_len; bool is_const; bool is_volatile; uint32_t alignment; @@ -2268,6 +2275,7 @@ struct IrInstructionElemPtr { IrInstruction *array_ptr; IrInstruction *elem_index; + PtrLen ptr_len; bool is_const; bool safety_check_on; }; @@ -2419,6 +2427,7 @@ struct IrInstructionPtrType { IrInstruction *child_type; uint32_t bit_offset_start; uint32_t bit_offset_end; + PtrLen ptr_len; bool is_const; bool is_volatile; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index a5011035c5..2b9d776e78 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -381,14 +381,14 @@ TypeTableEntry *get_promise_type(CodeGen *g, TypeTableEntry *result_type) { } TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const, - bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) + bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) { assert(!type_is_invalid(child_type)); TypeId type_id = {}; TypeTableEntry **parent_pointer = nullptr; uint32_t abi_alignment = get_abi_alignment(g, child_type); - if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment) { + if (unaligned_bit_count != 0 || is_volatile || byte_alignment != abi_alignment || ptr_len != PtrLenSingle) { type_id.id = TypeTableEntryIdPointer; type_id.data.pointer.child_type = child_type; type_id.data.pointer.is_const = is_const; @@ -396,6 +396,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type type_id.data.pointer.alignment = byte_alignment; type_id.data.pointer.bit_offset = bit_offset; type_id.data.pointer.unaligned_bit_count = unaligned_bit_count; + type_id.data.pointer.ptr_len = ptr_len; auto existing_entry = g->type_table.maybe_get(type_id); if (existing_entry) @@ -414,16 +415,17 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdPointer); entry->is_copyable = true; + const char *star_str = ptr_len == PtrLenSingle ? "*" : "[*]"; const char *const_str = is_const ? "const " : ""; const char *volatile_str = is_volatile ? "volatile " : ""; buf_resize(&entry->name, 0); if (unaligned_bit_count == 0 && byte_alignment == abi_alignment) { - buf_appendf(&entry->name, "*%s%s%s", const_str, volatile_str, buf_ptr(&child_type->name)); + buf_appendf(&entry->name, "%s%s%s%s", star_str, const_str, volatile_str, buf_ptr(&child_type->name)); } else if (unaligned_bit_count == 0) { - buf_appendf(&entry->name, "*align(%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "%salign(%" PRIu32 ") %s%s%s", star_str, byte_alignment, const_str, volatile_str, buf_ptr(&child_type->name)); } else { - buf_appendf(&entry->name, "*align(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", byte_alignment, + buf_appendf(&entry->name, "%salign(%" PRIu32 ":%" PRIu32 ":%" PRIu32 ") %s%s%s", star_str, byte_alignment, bit_offset, bit_offset + unaligned_bit_count, const_str, volatile_str, buf_ptr(&child_type->name)); } @@ -433,7 +435,9 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type if (!entry->zero_bits) { assert(byte_alignment > 0); - if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment) { + if (is_const || is_volatile || unaligned_bit_count != 0 || byte_alignment != abi_alignment || + ptr_len != PtrLenSingle) + { TypeTableEntry *peer_type = get_pointer_to_type(g, child_type, false); entry->type_ref = peer_type->type_ref; entry->di_type = peer_type->di_type; @@ -451,6 +455,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type entry->di_type = g->builtin_types.entry_void->di_type; } + entry->data.pointer.ptr_len = ptr_len; entry->data.pointer.child_type = child_type; entry->data.pointer.is_const = is_const; entry->data.pointer.is_volatile = is_volatile; @@ -467,7 +472,8 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type } TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const) { - return get_pointer_to_type_extra(g, child_type, is_const, false, get_abi_alignment(g, child_type), 0, 0); + return get_pointer_to_type_extra(g, child_type, is_const, false, PtrLenSingle, + get_abi_alignment(g, child_type), 0, 0); } TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) { @@ -757,6 +763,7 @@ static void slice_type_common_init(CodeGen *g, TypeTableEntry *pointer_type, Typ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { assert(ptr_type->id == TypeTableEntryIdPointer); + assert(ptr_type->data.pointer.ptr_len == PtrLenUnknown); TypeTableEntry **parent_pointer = &ptr_type->data.pointer.slice_parent; if (*parent_pointer) { @@ -768,14 +775,16 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { // replace the & with [] to go from a ptr type name to a slice type name buf_resize(&entry->name, 0); - buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + 1); + size_t name_offset = (ptr_type->data.pointer.ptr_len == PtrLenSingle) ? 1 : 3; + buf_appendf(&entry->name, "[]%s", buf_ptr(&ptr_type->name) + name_offset); TypeTableEntry *child_type = ptr_type->data.pointer.child_type; - uint32_t abi_alignment; + uint32_t abi_alignment = get_abi_alignment(g, child_type); if (ptr_type->data.pointer.is_const || ptr_type->data.pointer.is_volatile || - ptr_type->data.pointer.alignment != (abi_alignment = get_abi_alignment(g, child_type))) + ptr_type->data.pointer.alignment != abi_alignment) { - TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, child_type, false); + TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, child_type, false, false, + PtrLenUnknown, abi_alignment, 0, 0); TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type); slice_type_common_init(g, ptr_type, entry); @@ -799,9 +808,11 @@ TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type) { if (child_ptr_type->data.pointer.is_const || child_ptr_type->data.pointer.is_volatile || child_ptr_type->data.pointer.alignment != get_abi_alignment(g, grand_child_type)) { - TypeTableEntry *bland_child_ptr_type = get_pointer_to_type(g, grand_child_type, false); + TypeTableEntry *bland_child_ptr_type = get_pointer_to_type_extra(g, grand_child_type, false, false, + PtrLenUnknown, get_abi_alignment(g, grand_child_type), 0, 0); TypeTableEntry *bland_child_slice = get_slice_type(g, bland_child_ptr_type); - TypeTableEntry *peer_ptr_type = get_pointer_to_type(g, bland_child_slice, false); + TypeTableEntry *peer_ptr_type = get_pointer_to_type_extra(g, bland_child_slice, false, false, + PtrLenUnknown, get_abi_alignment(g, bland_child_slice), 0, 0); TypeTableEntry *peer_slice_type = get_slice_type(g, peer_ptr_type); entry->type_ref = peer_slice_type->type_ref; @@ -1284,7 +1295,8 @@ static bool analyze_const_align(CodeGen *g, Scope *scope, AstNode *node, uint32_ } static bool analyze_const_string(CodeGen *g, Scope *scope, AstNode *node, Buf **out_buffer) { - TypeTableEntry *ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, ptr_type); IrInstruction *instr = analyze_const_value(g, scope, node, str_type, nullptr); if (type_is_invalid(instr->value.type)) @@ -2954,7 +2966,8 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { if (fn_type_id->param_count != 2) { return wrong_panic_prototype(g, proto_node, fn_type); } - TypeTableEntry *const_u8_ptr = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *const_u8_ptr = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *const_u8_slice = get_slice_type(g, const_u8_ptr); if (fn_type_id->param_info[0].type != const_u8_slice) { return wrong_panic_prototype(g, proto_node, fn_type); @@ -4994,7 +5007,9 @@ void init_const_c_str_lit(CodeGen *g, ConstExprValue *const_val, Buf *str) { // then make the pointer point to it const_val->special = ConstValSpecialStatic; - const_val->type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + // TODO make this `[*]null u8` instead of `[*]u8` + const_val->type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); const_val->data.x_ptr.special = ConstPtrSpecialBaseArray; const_val->data.x_ptr.data.base_array.array_val = array_val; const_val->data.x_ptr.data.base_array.elem_index = 0; @@ -5135,7 +5150,9 @@ void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *arr { assert(array_val->type->id == TypeTableEntryIdArray); - TypeTableEntry *ptr_type = get_pointer_to_type(g, array_val->type->data.array.child_type, is_const); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, array_val->type->data.array.child_type, + is_const, false, PtrLenUnknown, get_abi_alignment(g, array_val->type->data.array.child_type), + 0, 0); const_val->special = ConstValSpecialStatic; const_val->type = get_slice_type(g, ptr_type); @@ -5759,6 +5776,7 @@ uint32_t type_id_hash(TypeId x) { return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type); case TypeTableEntryIdPointer: return hash_ptr(x.data.pointer.child_type) + + ((x.data.pointer.ptr_len == PtrLenSingle) ? (uint32_t)1120226602 : (uint32_t)3200913342) + (x.data.pointer.is_const ? (uint32_t)2749109194 : (uint32_t)4047371087) + (x.data.pointer.is_volatile ? (uint32_t)536730450 : (uint32_t)1685612214) + (((uint32_t)x.data.pointer.alignment) ^ (uint32_t)0x777fbe0e) + @@ -5807,6 +5825,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdPointer: return a.data.pointer.child_type == b.data.pointer.child_type && + a.data.pointer.ptr_len == b.data.pointer.ptr_len && a.data.pointer.is_const == b.data.pointer.is_const && a.data.pointer.is_volatile == b.data.pointer.is_volatile && a.data.pointer.alignment == b.data.pointer.alignment && diff --git a/src/analyze.hpp b/src/analyze.hpp index d538f042ce..905bfa86dd 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -16,7 +16,7 @@ ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, AstNode *node, Buf *m TypeTableEntry *new_type_table_entry(TypeTableEntryId id); TypeTableEntry *get_pointer_to_type(CodeGen *g, TypeTableEntry *child_type, bool is_const); TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type, bool is_const, - bool is_volatile, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count); + bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count); uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry); uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry); TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits); diff --git a/src/ast_render.cpp b/src/ast_render.cpp index f356f406b0..3785cb6ca1 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -625,7 +625,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { case NodeTypePointerType: { if (!grouped) fprintf(ar->f, "("); - fprintf(ar->f, "*"); + const char *star = "[*]"; + if (node->data.pointer_type.star_token != nullptr && + (node->data.pointer_type.star_token->id == TokenIdStar || node->data.pointer_type.star_token->id == TokenIdStarStar)) + { + star = "*"; + } + fprintf(ar->f, "%s", star); if (node->data.pointer_type.align_expr != nullptr) { fprintf(ar->f, "align("); render_node_grouped(ar, node->data.pointer_type.align_expr); diff --git a/src/codegen.cpp b/src/codegen.cpp index d07d427729..64e29a4da4 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -893,7 +893,8 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) { assert(val->global_refs->llvm_global); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(str_type->type_ref, 0)); } @@ -1461,7 +1462,8 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMValueRef full_buf_ptr = LLVMConstInBoundsGEP(global_array, full_buf_ptr_indices, 2); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); LLVMValueRef global_slice_fields[] = { full_buf_ptr, @@ -2212,9 +2214,13 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, IrInstruction *op2 = bin_op_instruction->op2; assert(op1->value.type == op2->value.type || op_id == IrBinOpBitShiftLeftLossy || - op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy || - op_id == IrBinOpBitShiftRightExact || - (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet)); + op_id == IrBinOpBitShiftLeftExact || op_id == IrBinOpBitShiftRightLossy || + op_id == IrBinOpBitShiftRightExact || + (op1->value.type->id == TypeTableEntryIdErrorSet && op2->value.type->id == TypeTableEntryIdErrorSet) || + (op1->value.type->id == TypeTableEntryIdPointer && + (op_id == IrBinOpAdd || op_id == IrBinOpSub) && + op1->value.type->data.pointer.ptr_len == PtrLenUnknown) + ); TypeTableEntry *type_entry = op1->value.type; bool want_runtime_safety = bin_op_instruction->safety_check_on && @@ -2222,6 +2228,8 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, LLVMValueRef op1_value = ir_llvm_value(g, op1); LLVMValueRef op2_value = ir_llvm_value(g, op2); + + switch (op_id) { case IrBinOpInvalid: case IrBinOpArrayCat: @@ -2260,7 +2268,11 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, } case IrBinOpAdd: case IrBinOpAddWrap: - if (type_entry->id == TypeTableEntryIdFloat) { + if (type_entry->id == TypeTableEntryIdPointer) { + assert(type_entry->data.pointer.ptr_len == PtrLenUnknown); + // TODO runtime safety + return LLVMBuildInBoundsGEP(g->builder, op1_value, &op2_value, 1, ""); + } else if (type_entry->id == TypeTableEntryIdFloat) { ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base)); return LLVMBuildFAdd(g->builder, op1_value, op2_value, ""); } else if (type_entry->id == TypeTableEntryIdInt) { @@ -2323,7 +2335,12 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, IrExecutable *executable, } case IrBinOpSub: case IrBinOpSubWrap: - if (type_entry->id == TypeTableEntryIdFloat) { + if (type_entry->id == TypeTableEntryIdPointer) { + assert(type_entry->data.pointer.ptr_len == PtrLenUnknown); + // TODO runtime safety + LLVMValueRef subscript_value = LLVMBuildNeg(g->builder, op2_value, ""); + return LLVMBuildInBoundsGEP(g->builder, op1_value, &subscript_value, 1, ""); + } else if (type_entry->id == TypeTableEntryIdFloat) { ZigLLVMSetFastMath(g->builder, ir_want_fast_math(g, &bin_op_instruction->base)); return LLVMBuildFSub(g->builder, op1_value, op2_value, ""); } else if (type_entry->id == TypeTableEntryIdInt) { @@ -2770,7 +2787,7 @@ static LLVMValueRef ir_render_decl_var(CodeGen *g, IrExecutable *executable, if (have_init_expr) { assert(var->value->type == init_value->value.type); TypeTableEntry *var_ptr_type = get_pointer_to_type_extra(g, var->value->type, false, false, - var->align_bytes, 0, 0); + PtrLenSingle, var->align_bytes, 0, 0); gen_assign_raw(g, var->value_ref, var_ptr_type, ir_llvm_value(g, init_value)); } else { bool want_safe = ir_want_runtime_safety(g, &decl_var_instruction->base); @@ -4172,7 +4189,7 @@ static LLVMValueRef ir_render_struct_init(CodeGen *g, IrExecutable *executable, uint32_t field_align_bytes = get_abi_alignment(g, type_struct_field->type_entry); TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_struct_field->type_entry, - false, false, field_align_bytes, + false, false, PtrLenSingle, field_align_bytes, (uint32_t)type_struct_field->packed_bits_offset, (uint32_t)type_struct_field->unaligned_bit_count); gen_assign_raw(g, field_ptr, ptr_type, value); @@ -4188,7 +4205,7 @@ static LLVMValueRef ir_render_union_init(CodeGen *g, IrExecutable *executable, I uint32_t field_align_bytes = get_abi_alignment(g, type_union_field->type_entry); TypeTableEntry *ptr_type = get_pointer_to_type_extra(g, type_union_field->type_entry, - false, false, field_align_bytes, + false, false, PtrLenSingle, field_align_bytes, 0, 0); LLVMValueRef uncasted_union_ptr; @@ -4435,7 +4452,8 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f LLVMPositionBuilderAtEnd(g->builder, ok_block); LLVMValueRef payload_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_payload_index, ""); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, false); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, false, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *slice_type = get_slice_type(g, u8_ptr_type); size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, payload_ptr, ptr_field_index, ""); @@ -5377,7 +5395,8 @@ static void generate_error_name_table(CodeGen *g) { assert(g->errors_by_index.length > 0); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); LLVMValueRef *values = allocate(g->errors_by_index.length); @@ -5415,7 +5434,8 @@ static void generate_error_name_table(CodeGen *g) { } static void generate_enum_name_tables(CodeGen *g) { - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); TypeTableEntry *usize = g->builtin_types.entry_usize; @@ -6869,7 +6889,8 @@ static void create_test_compile_var_and_add_test_runner(CodeGen *g) { exit(0); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(g, g->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(g, g->builtin_types.entry_u8, true, false, + PtrLenUnknown, get_abi_alignment(g, g->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(g, u8_ptr_type); TypeTableEntry *fn_type = get_test_fn_type(g); diff --git a/src/ir.cpp b/src/ir.cpp index 5cada29076..a230c60456 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1009,12 +1009,13 @@ static IrInstruction *ir_build_var_ptr(IrBuilder *irb, Scope *scope, AstNode *so } static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *array_ptr, - IrInstruction *elem_index, bool safety_check_on) + IrInstruction *elem_index, bool safety_check_on, PtrLen ptr_len) { IrInstructionElemPtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->array_ptr = array_ptr; instruction->elem_index = elem_index; instruction->safety_check_on = safety_check_on; + instruction->ptr_len = ptr_len; ir_ref_instruction(array_ptr, irb->current_basic_block); ir_ref_instruction(elem_index, irb->current_basic_block); @@ -1022,15 +1023,6 @@ static IrInstruction *ir_build_elem_ptr(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_elem_ptr_from(IrBuilder *irb, IrInstruction *old_instruction, - IrInstruction *array_ptr, IrInstruction *elem_index, bool safety_check_on) -{ - IrInstruction *new_instruction = ir_build_elem_ptr(irb, old_instruction->scope, - old_instruction->source_node, array_ptr, elem_index, safety_check_on); - ir_link_new_instruction(new_instruction, old_instruction); - return new_instruction; -} - static IrInstruction *ir_build_field_ptr_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *container_ptr, IrInstruction *field_name_expr) { @@ -1188,14 +1180,15 @@ static IrInstruction *ir_build_br_from(IrBuilder *irb, IrInstruction *old_instru } static IrInstruction *ir_build_ptr_type(IrBuilder *irb, Scope *scope, AstNode *source_node, - IrInstruction *child_type, bool is_const, bool is_volatile, IrInstruction *align_value, - uint32_t bit_offset_start, uint32_t bit_offset_end) + IrInstruction *child_type, bool is_const, bool is_volatile, PtrLen ptr_len, + IrInstruction *align_value, uint32_t bit_offset_start, uint32_t bit_offset_end) { IrInstructionPtrType *ptr_type_of_instruction = ir_build_instruction(irb, scope, source_node); ptr_type_of_instruction->align_value = align_value; ptr_type_of_instruction->child_type = child_type; ptr_type_of_instruction->is_const = is_const; ptr_type_of_instruction->is_volatile = is_volatile; + ptr_type_of_instruction->ptr_len = ptr_len; ptr_type_of_instruction->bit_offset_start = bit_offset_start; ptr_type_of_instruction->bit_offset_end = bit_offset_end; @@ -3547,7 +3540,7 @@ static IrInstruction *ir_gen_array_access(IrBuilder *irb, Scope *scope, AstNode return subscript_instruction; IrInstruction *ptr_instruction = ir_build_elem_ptr(irb, scope, node, array_ref_instruction, - subscript_instruction, true); + subscript_instruction, true, PtrLenSingle); if (lval.is_ptr) return ptr_instruction; @@ -4626,6 +4619,11 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction * static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypePointerType); + // The null check here is for C imports which don't set a token on the AST node. We could potentially + // update that code to create a fake token and then remove this check. + PtrLen ptr_len = (node->data.pointer_type.star_token != nullptr && + (node->data.pointer_type.star_token->id == TokenIdStar || + node->data.pointer_type.star_token->id == TokenIdStarStar)) ? PtrLenSingle : PtrLenUnknown; bool is_const = node->data.pointer_type.is_const; bool is_volatile = node->data.pointer_type.is_volatile; AstNode *expr_node = node->data.pointer_type.op_expr; @@ -4675,7 +4673,7 @@ static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode } return ir_build_ptr_type(irb, scope, node, child_type, is_const, is_volatile, - align_value, bit_offset_start, bit_offset_end); + ptr_len, align_value, bit_offset_start, bit_offset_end); } static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode *source_node, AstNode *expr_node, @@ -5172,7 +5170,7 @@ static IrInstruction *ir_gen_for_expr(IrBuilder *irb, Scope *parent_scope, AstNo ir_mark_gen(ir_build_cond_br(irb, child_scope, node, cond, body_block, else_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, body_block); - IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false); + IrInstruction *elem_ptr = ir_build_elem_ptr(irb, child_scope, node, array_val_ptr, index_val, false, PtrLenSingle); IrInstruction *elem_val; if (node->data.for_expr.elem_is_ptr) { elem_val = elem_ptr; @@ -6811,9 +6809,13 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_normal_final); if (type_has_bits(return_type)) { + IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, + get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8), + 0, 0)); IrInstruction *result_ptr = ir_build_load_ptr(irb, scope, node, irb->exec->coro_result_ptr_field_ptr); - IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, result_ptr); - IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, + IrInstruction *result_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, result_ptr); + IrInstruction *return_value_ptr_as_u8_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, irb->exec->coro_result_field_ptr); IrInstruction *return_type_inst = ir_build_const_type(irb, scope, node, fn_entry->type_entry->data.fn.fn_type_id.return_type); @@ -7691,6 +7693,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry // pointer const if (expected_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer && + (actual_type->data.pointer.ptr_len == expected_type->data.pointer.ptr_len) && (!actual_type->data.pointer.is_const || expected_type->data.pointer.is_const) && (!actual_type->data.pointer.is_volatile || expected_type->data.pointer.is_volatile) && actual_type->data.pointer.bit_offset == expected_type->data.pointer.bit_offset && @@ -8644,7 +8647,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod if (convert_to_const_slice) { assert(prev_inst->value.type->id == TypeTableEntryIdArray); - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, prev_inst->value.type->data.array.child_type, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra( + ira->codegen, prev_inst->value.type->data.array.child_type, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, prev_inst->value.type->data.array.child_type), + 0, 0); TypeTableEntry *slice_type = get_slice_type(ira->codegen, ptr_type); if (err_set_type != nullptr) { return get_error_union_type(ira->codegen, err_set_type, slice_type); @@ -8961,7 +8968,7 @@ static IrInstruction *ir_get_const_ptr(IrAnalyze *ira, IrInstruction *instructio ConstPtrMut ptr_mut, bool ptr_is_const, bool ptr_is_volatile, uint32_t ptr_align) { TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, pointee_type, - ptr_is_const, ptr_is_volatile, ptr_align, 0, 0); + ptr_is_const, ptr_is_volatile, PtrLenSingle, ptr_align, 0, 0); IrInstruction *const_instr = ir_get_const(ira, instruction); ConstExprValue *const_val = &const_instr->value; const_val->type = ptr_type; @@ -9302,7 +9309,7 @@ static IrInstruction *ir_get_ref(IrAnalyze *ira, IrInstruction *source_instructi } TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, value->value.type, - is_const, is_volatile, get_abi_alignment(ira->codegen, value->value.type), 0, 0); + is_const, is_volatile, PtrLenSingle, get_abi_alignment(ira->codegen, value->value.type), 0, 0); IrInstruction *new_instruction = ir_build_ref(&ira->new_irb, source_instruction->scope, source_instruction->source_node, value, is_const, is_volatile); new_instruction->value.type = ptr_type; @@ -10399,7 +10406,9 @@ static Buf *ir_resolve_str(IrAnalyze *ira, IrInstruction *value) { if (type_is_invalid(value->value.type)) return nullptr; - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, ptr_type); IrInstruction *casted_value = ir_implicit_cast(ira, value, str_type); if (type_is_invalid(casted_value->value.type)) @@ -11054,11 +11063,27 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) { IrInstruction *op1 = bin_op_instruction->op1->other; IrInstruction *op2 = bin_op_instruction->op2->other; + IrBinOp op_id = bin_op_instruction->op_id; + + // look for pointer math + if (op1->value.type->id == TypeTableEntryIdPointer && op1->value.type->data.pointer.ptr_len == PtrLenUnknown && + (op_id == IrBinOpAdd || op_id == IrBinOpSub)) + { + IrInstruction *casted_op2 = ir_implicit_cast(ira, op2, ira->codegen->builtin_types.entry_usize); + if (casted_op2 == ira->codegen->invalid_instruction) + return ira->codegen->builtin_types.entry_invalid; + + IrInstruction *result = ir_build_bin_op(&ira->new_irb, bin_op_instruction->base.scope, + bin_op_instruction->base.source_node, op_id, op1, casted_op2, true); + result->value.type = op1->value.type; + ir_link_new_instruction(result, &bin_op_instruction->base); + return result->value.type; + } + IrInstruction *instructions[] = {op1, op2}; TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; - IrBinOp op_id = bin_op_instruction->op_id; bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdNumLitInt; bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdNumLitFloat; @@ -11331,7 +11356,8 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp * out_array_val = out_val; } else if (is_slice(op1_type) || is_slice(op2_type)) { - TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, child_type, true); + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, child_type, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0); result_type = get_slice_type(ira->codegen, ptr_type); out_array_val = create_const_vals(1); out_array_val->special = ConstValSpecialStatic; @@ -11351,7 +11377,9 @@ static TypeTableEntry *ir_analyze_array_cat(IrAnalyze *ira, IrInstructionBinOp * } else { new_len += 1; // null byte - result_type = get_pointer_to_type(ira->codegen, child_type, true); + // TODO make this `[*]null T` instead of `[*]T` + result_type = get_pointer_to_type_extra(ira->codegen, child_type, true, false, + PtrLenUnknown, get_abi_alignment(ira->codegen, child_type), 0, 0); out_array_val = create_const_vals(1); out_array_val->special = ConstValSpecialStatic; @@ -12173,7 +12201,7 @@ no_mem_slot: IrInstruction *var_ptr_instruction = ir_build_var_ptr(&ira->new_irb, instruction->scope, instruction->source_node, var); var_ptr_instruction->value.type = get_pointer_to_type_extra(ira->codegen, var->value->type, - var->src_is_const, is_volatile, var->align_bytes, 0, 0); + var->src_is_const, is_volatile, PtrLenSingle, var->align_bytes, 0, 0); type_ensure_zero_bits_known(ira->codegen, var->value->type); bool in_fn_scope = (scope_fn_entry(var->parent_scope) != nullptr); @@ -12352,7 +12380,9 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *casted_new_stack = nullptr; if (call_instruction->new_stack != nullptr) { - TypeTableEntry *u8_ptr = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); + TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr); IrInstruction *new_stack = call_instruction->new_stack->other; if (type_is_invalid(new_stack->value.type)) @@ -13112,10 +13142,21 @@ static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, ui return get_pointer_to_type_extra(g, ptr_type->data.pointer.child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + ptr_type->data.pointer.ptr_len, new_align, ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count); } +static TypeTableEntry *adjust_ptr_len(CodeGen *g, TypeTableEntry *ptr_type, PtrLen ptr_len) { + assert(ptr_type->id == TypeTableEntryIdPointer); + return get_pointer_to_type_extra(g, + ptr_type->data.pointer.child_type, + ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + ptr_len, + ptr_type->data.pointer.alignment, + ptr_type->data.pointer.bit_offset, ptr_type->data.pointer.unaligned_bit_count); +} + static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstructionElemPtr *elem_ptr_instruction) { IrInstruction *array_ptr = elem_ptr_instruction->array_ptr->other; if (type_is_invalid(array_ptr->value.type)) @@ -13146,6 +13187,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc if (ptr_type->data.pointer.unaligned_bit_count == 0) { return_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + elem_ptr_instruction->ptr_len, ptr_type->data.pointer.alignment, 0, 0); } else { uint64_t elem_val_scalar; @@ -13157,12 +13199,19 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc return_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + elem_ptr_instruction->ptr_len, 1, (uint32_t)bit_offset, (uint32_t)bit_width); } } else if (array_type->id == TypeTableEntryIdPointer) { - return_type = array_type; + if (array_type->data.pointer.ptr_len == PtrLenSingle) { + ir_add_error_node(ira, elem_ptr_instruction->base.source_node, + buf_sprintf("indexing not allowed on pointer to single item")); + return ira->codegen->builtin_types.entry_invalid; + } + return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len); } else if (is_slice(array_type)) { - return_type = array_type->data.structure.fields[slice_ptr_index].type_entry; + return_type = adjust_ptr_len(ira->codegen, array_type->data.structure.fields[slice_ptr_index].type_entry, + elem_ptr_instruction->ptr_len); } else if (array_type->id == TypeTableEntryIdArgTuple) { ConstExprValue *ptr_val = ir_resolve_const(ira, array_ptr, UndefBad); if (!ptr_val) @@ -13304,8 +13353,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } else if (is_slice(array_type)) { ConstExprValue *ptr_field = &array_ptr_val->data.x_struct.fields[slice_ptr_index]; if (ptr_field->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) { - ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr, - casted_elem_index, false); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, + array_ptr, casted_elem_index, false, elem_ptr_instruction->ptr_len); + result->value.type = return_type; + ir_link_new_instruction(result, &elem_ptr_instruction->base); return return_type; } ConstExprValue *len_field = &array_ptr_val->data.x_struct.fields[slice_len_index]; @@ -13373,8 +13424,10 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } } - ir_build_elem_ptr_from(&ira->new_irb, &elem_ptr_instruction->base, array_ptr, - casted_elem_index, safety_check_on); + IrInstruction *result = ir_build_elem_ptr(&ira->new_irb, elem_ptr_instruction->base.scope, elem_ptr_instruction->base.source_node, + array_ptr, casted_elem_index, safety_check_on, elem_ptr_instruction->ptr_len); + result->value.type = return_type; + ir_link_new_instruction(result, &elem_ptr_instruction->base); return return_type; } @@ -13449,7 +13502,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ return ira->codegen->invalid_instruction; ConstExprValue *field_val = &struct_val->data.x_struct.fields[field->src_index]; TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_val->type, - is_const, is_volatile, align_bytes, + is_const, is_volatile, PtrLenSingle, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); IrInstruction *result = ir_get_const(ira, source_instr); @@ -13465,6 +13518,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ IrInstruction *result = ir_build_struct_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, + PtrLenSingle, align_bytes, (uint32_t)(ptr_bit_offset + field->packed_bits_offset), (uint32_t)unaligned_bit_count_for_result_type); @@ -13511,7 +13565,9 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ payload_val->type = field_type; } - TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type, is_const, is_volatile, + TypeTableEntry *ptr_type = get_pointer_to_type_extra(ira->codegen, field_type, + is_const, is_volatile, + PtrLenSingle, get_abi_alignment(ira->codegen, field_type), 0, 0); IrInstruction *result = ir_get_const(ira, source_instr); @@ -13526,7 +13582,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ IrInstruction *result = ir_build_union_field_ptr(&ira->new_irb, source_instr->scope, source_instr->source_node, container_ptr, field); result->value.type = get_pointer_to_type_extra(ira->codegen, field->type_entry, is_const, is_volatile, - get_abi_alignment(ira->codegen, field->type_entry), 0, 0); + PtrLenSingle, get_abi_alignment(ira->codegen, field->type_entry), 0, 0); return result; } else { return ir_analyze_container_member_access_inner(ira, bare_type, field_name, @@ -14119,7 +14175,7 @@ static TypeTableEntry *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira, if (type_entry->id == TypeTableEntryIdArray) { ptr_type = get_pointer_to_type(ira->codegen, type_entry->data.array.child_type, false); } else if (is_slice(type_entry)) { - ptr_type = type_entry->data.structure.fields[0].type_entry; + ptr_type = adjust_ptr_len(ira->codegen, type_entry->data.structure.fields[0].type_entry, PtrLenSingle); } else if (type_entry->id == TypeTableEntryIdArgTuple) { ConstExprValue *arg_tuple_val = ir_resolve_const(ira, value, UndefBad); if (!arg_tuple_val) @@ -14367,7 +14423,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, { type_ensure_zero_bits_known(ira->codegen, child_type); TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, child_type, - is_const, is_volatile, align_bytes, 0, 0); + is_const, is_volatile, PtrLenUnknown, align_bytes, 0, 0); TypeTableEntry *result_type = get_slice_type(ira->codegen, slice_ptr_type); ConstExprValue *out_val = ir_build_const_from(ira, &slice_type_instruction->base); out_val->data.x_type = result_type; @@ -14619,6 +14675,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, TypeTableEntry *child_type = type_entry->data.maybe.child_type; TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, child_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + PtrLenSingle, get_abi_alignment(ira->codegen, child_type), 0, 0); if (instr_is_comptime(value)) { @@ -15566,7 +15623,8 @@ static TypeTableEntry *ir_analyze_instruction_err_name(IrAnalyze *ira, IrInstruc if (type_is_invalid(casted_value->value.type)) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type); if (casted_value->value.special == ConstValSpecialStatic) { ErrorTableEntry *err = casted_value->value.data.x_err_set; @@ -15607,7 +15665,11 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn IrInstruction *result = ir_build_tag_name(&ira->new_irb, instruction->base.scope, instruction->base.source_node, target); ir_link_new_instruction(result, &instruction->base); - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra( + ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), + 0, 0); result->value.type = get_slice_type(ira->codegen, u8_ptr_type); return result->value.type; } @@ -15660,6 +15722,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira, TypeTableEntry *field_ptr_type = get_pointer_to_type_extra(ira->codegen, field->type_entry, field_ptr->value.type->data.pointer.is_const, field_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, field_ptr_align, 0, 0); IrInstruction *casted_field_ptr = ir_implicit_cast(ira, field_ptr, field_ptr_type); if (type_is_invalid(casted_field_ptr->value.type)) @@ -15668,6 +15731,7 @@ static TypeTableEntry *ir_analyze_instruction_field_parent_ptr(IrAnalyze *ira, TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, container_type, casted_field_ptr->value.type->data.pointer.is_const, casted_field_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, parent_ptr_align, 0, 0); if (instr_is_comptime(casted_field_ptr)) { @@ -15983,11 +16047,13 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop // lib_name: ?[]const u8 ensure_field_index(fn_def_val->type, "lib_name", 6); fn_def_fields[6].special = ConstValSpecialStatic; - fn_def_fields[6].type = get_maybe_type(ira->codegen, - get_slice_type(ira->codegen, get_pointer_to_type(ira->codegen, - ira->codegen->builtin_types.entry_u8, true))); - if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) - { + TypeTableEntry *u8_ptr = get_pointer_to_type_extra( + ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, + get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), + 0, 0); + fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); + if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) { fn_def_fields[6].data.x_maybe = create_const_vals(1); ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name); init_const_slice(ira->codegen, fn_def_fields[6].data.x_maybe, lib_name, 0, buf_len(fn_node->lib_name), true); @@ -16009,8 +16075,8 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop size_t fn_arg_count = fn_entry->variable_list.length; ConstExprValue *fn_arg_name_array = create_const_vals(1); fn_arg_name_array->special = ConstValSpecialStatic; - fn_arg_name_array->type = get_array_type(ira->codegen, get_slice_type(ira->codegen, - get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true)), fn_arg_count); + fn_arg_name_array->type = get_array_type(ira->codegen, + get_slice_type(ira->codegen, u8_ptr), fn_arg_count); fn_arg_name_array->data.x_array.special = ConstArraySpecialNone; fn_arg_name_array->data.x_array.s_none.parent.id = ConstParentIdNone; fn_arg_name_array->data.x_array.s_none.elements = create_const_vals(fn_arg_count); @@ -17088,7 +17154,8 @@ static TypeTableEntry *ir_analyze_instruction_memset(IrAnalyze *ira, IrInstructi TypeTableEntry *u8 = ira->codegen->builtin_types.entry_u8; uint32_t dest_align = (dest_uncasted_type->id == TypeTableEntryIdPointer) ? dest_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8); - TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0); + TypeTableEntry *u8_ptr = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, + PtrLenUnknown, dest_align, 0, 0); IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr); if (type_is_invalid(casted_dest_ptr->value.type)) @@ -17184,8 +17251,10 @@ static TypeTableEntry *ir_analyze_instruction_memcpy(IrAnalyze *ira, IrInstructi src_uncasted_type->data.pointer.alignment : get_abi_alignment(ira->codegen, u8); TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; - TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, dest_align, 0, 0); - TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile, src_align, 0, 0); + TypeTableEntry *u8_ptr_mut = get_pointer_to_type_extra(ira->codegen, u8, false, dest_is_volatile, + PtrLenUnknown, dest_align, 0, 0); + TypeTableEntry *u8_ptr_const = get_pointer_to_type_extra(ira->codegen, u8, true, src_is_volatile, + PtrLenUnknown, src_align, 0, 0); IrInstruction *casted_dest_ptr = ir_implicit_cast(ira, dest_ptr, u8_ptr_mut); if (type_is_invalid(casted_dest_ptr->value.type)) @@ -17333,11 +17402,13 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.array.child_type, ptr_type->data.pointer.is_const || is_comptime_const, ptr_type->data.pointer.is_volatile, + PtrLenUnknown, byte_alignment, 0, 0); return_type = get_slice_type(ira->codegen, slice_ptr_type); } else if (array_type->id == TypeTableEntryIdPointer) { TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type, array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, + PtrLenUnknown, array_type->data.pointer.alignment, 0, 0); return_type = get_slice_type(ira->codegen, slice_ptr_type); if (!end) { @@ -17774,6 +17845,7 @@ static TypeTableEntry *ir_analyze_instruction_overflow_op(IrAnalyze *ira, IrInst if (result_ptr->value.type->id == TypeTableEntryIdPointer) { expected_ptr_type = get_pointer_to_type_extra(ira->codegen, dest_type, false, result_ptr->value.type->data.pointer.is_volatile, + PtrLenSingle, result_ptr->value.type->data.pointer.alignment, 0, 0); } else { expected_ptr_type = get_pointer_to_type(ira->codegen, dest_type, false); @@ -17929,6 +18001,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_err_payload(IrAnalyze *ira, TypeTableEntry *payload_type = type_entry->data.error_union.payload_type; TypeTableEntry *result_type = get_pointer_to_type_extra(ira->codegen, payload_type, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile, + PtrLenSingle, get_abi_alignment(ira->codegen, payload_type), 0, 0); if (instr_is_comptime(value)) { ConstExprValue *ptr_val = ir_resolve_const(ira, value, UndefBad); @@ -18270,7 +18343,8 @@ static TypeTableEntry *ir_analyze_instruction_panic(IrAnalyze *ira, IrInstructio return ir_unreach_error(ira); } - TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, true); + TypeTableEntry *u8_ptr_type = get_pointer_to_type_extra(ira->codegen, ira->codegen->builtin_types.entry_u8, + true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); TypeTableEntry *str_type = get_slice_type(ira->codegen, u8_ptr_type); IrInstruction *casted_msg = ir_implicit_cast(ira, msg, str_type); if (type_is_invalid(casted_msg->value.type)) @@ -18801,7 +18875,8 @@ static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruc ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_type = get_pointer_to_type_extra(ira->codegen, child_type, - instruction->is_const, instruction->is_volatile, align_bytes, + instruction->is_const, instruction->is_volatile, + instruction->ptr_len, align_bytes, instruction->bit_offset_start, instruction->bit_offset_end - instruction->bit_offset_start); return ira->codegen->builtin_types.entry_type; diff --git a/src/parser.cpp b/src/parser.cpp index 6c900c3bfa..3ad2de906b 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1225,6 +1225,7 @@ static AstNode *ast_parse_prefix_op_expr(ParseContext *pc, size_t *token_index, AstNode *child_node = ast_parse_pointer_type(pc, token_index, token); child_node->column += 1; AstNode *parent_node = ast_create_node(pc, NodeTypePointerType, token); + parent_node->data.pointer_type.star_token = token; parent_node->data.pointer_type.op_expr = child_node; return parent_node; } diff --git a/std/buffer.zig b/std/buffer.zig index 305746e183..3b2936d223 100644 --- a/std/buffer.zig +++ b/std/buffer.zig @@ -122,7 +122,7 @@ pub const Buffer = struct { } /// For passing to C functions. - pub fn ptr(self: *const Buffer) *u8 { + pub fn ptr(self: *const Buffer) [*]u8 { return self.list.items.ptr; } }; diff --git a/std/c/darwin.zig b/std/c/darwin.zig index 69395e6b27..e3b53d9bea 100644 --- a/std/c/darwin.zig +++ b/std/c/darwin.zig @@ -1,7 +1,7 @@ extern "c" fn __error() *c_int; -pub extern "c" fn _NSGetExecutablePath(buf: *u8, bufsize: *u32) c_int; +pub extern "c" fn _NSGetExecutablePath(buf: [*]u8, bufsize: *u32) c_int; -pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: *u8, buf_len: usize, basep: *i64) usize; +pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize; pub extern "c" fn mach_absolute_time() u64; pub extern "c" fn mach_timebase_info(tinfo: ?*mach_timebase_info_data) void; diff --git a/std/c/index.zig b/std/c/index.zig index 114b79cdae..ade37f36c1 100644 --- a/std/c/index.zig +++ b/std/c/index.zig @@ -9,6 +9,8 @@ pub use switch (builtin.os) { }; const empty_import = @import("../empty.zig"); +// TODO https://github.com/ziglang/zig/issues/265 on this whole file + pub extern "c" fn abort() noreturn; pub extern "c" fn exit(code: c_int) noreturn; pub extern "c" fn isatty(fd: c_int) c_int; @@ -16,45 +18,45 @@ pub extern "c" fn close(fd: c_int) c_int; pub extern "c" fn fstat(fd: c_int, buf: *Stat) c_int; pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int; pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize; -pub extern "c" fn open(path: *const u8, oflag: c_int, ...) c_int; +pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int; pub extern "c" fn raise(sig: c_int) c_int; -pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize; -pub extern "c" fn stat(noalias path: *const u8, noalias buf: *Stat) c_int; -pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize; -pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void; -pub extern "c" fn munmap(addr: *c_void, len: usize) c_int; -pub extern "c" fn unlink(path: *const u8) c_int; -pub extern "c" fn getcwd(buf: *u8, size: usize) ?*u8; +pub extern "c" fn read(fd: c_int, buf: [*]c_void, nbyte: usize) isize; +pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int; +pub extern "c" fn write(fd: c_int, buf: [*]const c_void, nbyte: usize) isize; +pub extern "c" fn mmap(addr: ?[*]c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?[*]c_void; +pub extern "c" fn munmap(addr: [*]c_void, len: usize) c_int; +pub extern "c" fn unlink(path: [*]const u8) c_int; +pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8; pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int; pub extern "c" fn fork() c_int; -pub extern "c" fn access(path: *const u8, mode: c_uint) c_int; -pub extern "c" fn pipe(fds: *c_int) c_int; -pub extern "c" fn mkdir(path: *const u8, mode: c_uint) c_int; -pub extern "c" fn symlink(existing: *const u8, new: *const u8) c_int; -pub extern "c" fn rename(old: *const u8, new: *const u8) c_int; -pub extern "c" fn chdir(path: *const u8) c_int; -pub extern "c" fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) c_int; +pub extern "c" fn access(path: [*]const u8, mode: c_uint) c_int; +pub extern "c" fn pipe(fds: *[2]c_int) c_int; +pub extern "c" fn mkdir(path: [*]const u8, mode: c_uint) c_int; +pub extern "c" fn symlink(existing: [*]const u8, new: [*]const u8) c_int; +pub extern "c" fn rename(old: [*]const u8, new: [*]const u8) c_int; +pub extern "c" fn chdir(path: [*]const u8) c_int; +pub extern "c" fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) c_int; pub extern "c" fn dup(fd: c_int) c_int; pub extern "c" fn dup2(old_fd: c_int, new_fd: c_int) c_int; -pub extern "c" fn readlink(noalias path: *const u8, noalias buf: *u8, bufsize: usize) isize; -pub extern "c" fn realpath(noalias file_name: *const u8, noalias resolved_name: *u8) ?*u8; +pub extern "c" fn readlink(noalias path: [*]const u8, noalias buf: [*]u8, bufsize: usize) isize; +pub extern "c" fn realpath(noalias file_name: [*]const u8, noalias resolved_name: [*]u8) ?[*]u8; pub extern "c" fn sigprocmask(how: c_int, noalias set: *const sigset_t, noalias oset: ?*sigset_t) c_int; pub extern "c" fn gettimeofday(tv: ?*timeval, tz: ?*timezone) c_int; pub extern "c" fn sigaction(sig: c_int, noalias act: *const Sigaction, noalias oact: ?*Sigaction) c_int; pub extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int; pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int; pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int; -pub extern "c" fn rmdir(path: *const u8) c_int; +pub extern "c" fn rmdir(path: [*]const u8) c_int; -pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void; -pub extern "c" fn malloc(usize) ?*c_void; -pub extern "c" fn realloc(*c_void, usize) ?*c_void; -pub extern "c" fn free(*c_void) void; -pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int; +pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?[*]c_void; +pub extern "c" fn malloc(usize) ?[*]c_void; +pub extern "c" fn realloc([*]c_void, usize) ?[*]c_void; +pub extern "c" fn free([*]c_void) void; +pub extern "c" fn posix_memalign(memptr: *[*]c_void, alignment: usize, size: usize) c_int; pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int; pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int; -pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int; +pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: [*]c_void, stacksize: usize) c_int; pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int; pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int; diff --git a/std/c/linux.zig b/std/c/linux.zig index 0ab043533e..2699e9bd09 100644 --- a/std/c/linux.zig +++ b/std/c/linux.zig @@ -1,6 +1,6 @@ pub use @import("../os/linux/errno.zig"); -pub extern "c" fn getrandom(buf_ptr: *u8, buf_len: usize, flags: c_uint) c_int; +pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) c_int; extern "c" fn __errno_location() *c_int; pub const _errno = __errno_location; diff --git a/std/cstr.zig b/std/cstr.zig index d60adf8faa..d9106769c1 100644 --- a/std/cstr.zig +++ b/std/cstr.zig @@ -57,7 +57,7 @@ pub fn addNullByte(allocator: *mem.Allocator, slice: []const u8) ![]u8 { pub const NullTerminated2DArray = struct { allocator: *mem.Allocator, byte_count: usize, - ptr: ?*?*u8, + ptr: ?[*]?[*]u8, /// Takes N lists of strings, concatenates the lists together, and adds a null terminator /// Caller must deinit result @@ -79,12 +79,12 @@ pub const NullTerminated2DArray = struct { errdefer allocator.free(buf); var write_index = index_size; - const index_buf = ([]?*u8)(buf); + const index_buf = ([]?[*]u8)(buf); var i: usize = 0; for (slices) |slice| { for (slice) |inner| { - index_buf[i] = &buf[write_index]; + index_buf[i] = buf.ptr + write_index; i += 1; mem.copy(u8, buf[write_index..], inner); write_index += inner.len; @@ -97,12 +97,12 @@ pub const NullTerminated2DArray = struct { return NullTerminated2DArray{ .allocator = allocator, .byte_count = byte_count, - .ptr = @ptrCast(?*?*u8, buf.ptr), + .ptr = @ptrCast(?[*]?[*]u8, buf.ptr), }; } pub fn deinit(self: *NullTerminated2DArray) void { - const buf = @ptrCast(*u8, self.ptr); + const buf = @ptrCast([*]u8, self.ptr); self.allocator.free(buf[0..self.byte_count]); } }; diff --git a/std/heap.zig b/std/heap.zig index d15a99a757..0b8f4aeb3f 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -18,11 +18,11 @@ var c_allocator_state = Allocator{ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 { assert(alignment <= @alignOf(c_longdouble)); - return if (c.malloc(n)) |buf| @ptrCast(*u8, buf)[0..n] else error.OutOfMemory; + return if (c.malloc(n)) |buf| @ptrCast([*]u8, buf)[0..n] else error.OutOfMemory; } fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { - const old_ptr = @ptrCast(*c_void, old_mem.ptr); + const old_ptr = @ptrCast([*]c_void, old_mem.ptr); if (c.realloc(old_ptr, new_size)) |buf| { return @ptrCast(*u8, buf)[0..new_size]; } else if (new_size <= old_mem.len) { @@ -33,7 +33,7 @@ fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![ } fn cFree(self: *Allocator, old_mem: []u8) void { - const old_ptr = @ptrCast(*c_void, old_mem.ptr); + const old_ptr = @ptrCast([*]c_void, old_mem.ptr); c.free(old_ptr); } @@ -74,7 +74,7 @@ pub const DirectAllocator = struct { const addr = p.mmap(null, alloc_size, p.PROT_READ | p.PROT_WRITE, p.MAP_PRIVATE | p.MAP_ANONYMOUS, -1, 0); if (addr == p.MAP_FAILED) return error.OutOfMemory; - if (alloc_size == n) return @intToPtr(*u8, addr)[0..n]; + if (alloc_size == n) return @intToPtr([*]u8, addr)[0..n]; var aligned_addr = addr & ~usize(alignment - 1); aligned_addr += alignment; @@ -93,7 +93,7 @@ pub const DirectAllocator = struct { //It is impossible that there is an unoccupied page at the top of our // mmap. - return @intToPtr(*u8, aligned_addr)[0..n]; + return @intToPtr([*]u8, aligned_addr)[0..n]; }, Os.windows => { const amt = n + alignment + @sizeOf(usize); @@ -109,7 +109,7 @@ pub const DirectAllocator = struct { const adjusted_addr = root_addr + march_forward_bytes; const record_addr = adjusted_addr + n; @intToPtr(*align(1) usize, record_addr).* = root_addr; - return @intToPtr(*u8, adjusted_addr)[0..n]; + return @intToPtr([*]u8, adjusted_addr)[0..n]; }, else => @compileError("Unsupported OS"), } @@ -140,7 +140,7 @@ pub const DirectAllocator = struct { const old_adjusted_addr = @ptrToInt(old_mem.ptr); const old_record_addr = old_adjusted_addr + old_mem.len; const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; - const old_ptr = @intToPtr(os.windows.LPVOID, root_addr); + const old_ptr = @intToPtr([*]c_void, root_addr); const amt = new_size + alignment + @sizeOf(usize); const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: { if (new_size > old_mem.len) return error.OutOfMemory; @@ -154,7 +154,7 @@ pub const DirectAllocator = struct { assert(new_adjusted_addr % alignment == 0); const new_record_addr = new_adjusted_addr + new_size; @intToPtr(*align(1) usize, new_record_addr).* = new_root_addr; - return @intToPtr(*u8, new_adjusted_addr)[0..new_size]; + return @intToPtr([*]u8, new_adjusted_addr)[0..new_size]; }, else => @compileError("Unsupported OS"), } @@ -170,7 +170,7 @@ pub const DirectAllocator = struct { Os.windows => { const record_addr = @ptrToInt(bytes.ptr) + bytes.len; const root_addr = @intToPtr(*align(1) usize, record_addr).*; - const ptr = @intToPtr(os.windows.LPVOID, root_addr); + const ptr = @intToPtr([*]c_void, root_addr); _ = os.windows.HeapFree(??self.heap_handle, 0, ptr); }, else => @compileError("Unsupported OS"), diff --git a/std/os/child_process.zig b/std/os/child_process.zig index 0e80ae09c1..822ade2eb8 100644 --- a/std/os/child_process.zig +++ b/std/os/child_process.zig @@ -639,7 +639,7 @@ pub const ChildProcess = struct { } }; -fn windowsCreateProcess(app_name: *u8, cmd_line: *u8, envp_ptr: ?*u8, cwd_ptr: ?*u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void { +fn windowsCreateProcess(app_name: [*]u8, cmd_line: [*]u8, envp_ptr: ?[*]u8, cwd_ptr: ?[*]u8, lpStartupInfo: *windows.STARTUPINFOA, lpProcessInformation: *windows.PROCESS_INFORMATION) !void { if (windows.CreateProcessA(app_name, cmd_line, null, null, windows.TRUE, 0, @ptrCast(?*c_void, envp_ptr), cwd_ptr, lpStartupInfo, lpProcessInformation) == 0) { const err = windows.GetLastError(); return switch (err) { diff --git a/std/os/darwin.zig b/std/os/darwin.zig index 77e8b6bb6a..b8e18561cc 100644 --- a/std/os/darwin.zig +++ b/std/os/darwin.zig @@ -317,7 +317,8 @@ pub fn lseek(fd: i32, offset: isize, whence: c_int) usize { return errnoWrap(c.lseek(fd, offset, whence)); } -pub fn open(path: *const u8, flags: u32, mode: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 on the whole file +pub fn open(path: [*]const u8, flags: u32, mode: usize) usize { return errnoWrap(c.open(path, @bitCast(c_int, flags), mode)); } @@ -325,33 +326,33 @@ pub fn raise(sig: i32) usize { return errnoWrap(c.raise(sig)); } -pub fn read(fd: i32, buf: *u8, nbyte: usize) usize { - return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); +pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize { + return errnoWrap(c.read(fd, @ptrCast([*]c_void, buf), nbyte)); } -pub fn stat(noalias path: *const u8, noalias buf: *stat) usize { +pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { return errnoWrap(c.stat(path, buf)); } -pub fn write(fd: i32, buf: *const u8, nbyte: usize) usize { - return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); +pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize { + return errnoWrap(c.write(fd, @ptrCast([*]const c_void, buf), nbyte)); } -pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { - const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); +pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { + const ptr_result = c.mmap(@ptrCast([*]c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); const isize_result = @bitCast(isize, @ptrToInt(ptr_result)); return errnoWrap(isize_result); } pub fn munmap(address: usize, length: usize) usize { - return errnoWrap(c.munmap(@intToPtr(*c_void, address), length)); + return errnoWrap(c.munmap(@intToPtr([*]c_void, address), length)); } -pub fn unlink(path: *const u8) usize { +pub fn unlink(path: [*]const u8) usize { return errnoWrap(c.unlink(path)); } -pub fn getcwd(buf: *u8, size: usize) usize { +pub fn getcwd(buf: [*]u8, size: usize) usize { return if (c.getcwd(buf, size) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } @@ -364,40 +365,40 @@ pub fn fork() usize { return errnoWrap(c.fork()); } -pub fn access(path: *const u8, mode: u32) usize { +pub fn access(path: [*]const u8, mode: u32) usize { return errnoWrap(c.access(path, mode)); } pub fn pipe(fds: *[2]i32) usize { comptime assert(i32.bit_count == c_int.bit_count); - return errnoWrap(c.pipe(@ptrCast(*c_int, fds))); + return errnoWrap(c.pipe(@ptrCast(*[2]c_int, fds))); } -pub fn getdirentries64(fd: i32, buf_ptr: *u8, buf_len: usize, basep: *i64) usize { +pub fn getdirentries64(fd: i32, buf_ptr: [*]u8, buf_len: usize, basep: *i64) usize { return errnoWrap(@bitCast(isize, c.__getdirentries64(fd, buf_ptr, buf_len, basep))); } -pub fn mkdir(path: *const u8, mode: u32) usize { +pub fn mkdir(path: [*]const u8, mode: u32) usize { return errnoWrap(c.mkdir(path, mode)); } -pub fn symlink(existing: *const u8, new: *const u8) usize { +pub fn symlink(existing: [*]const u8, new: [*]const u8) usize { return errnoWrap(c.symlink(existing, new)); } -pub fn rename(old: *const u8, new: *const u8) usize { +pub fn rename(old: [*]const u8, new: [*]const u8) usize { return errnoWrap(c.rename(old, new)); } -pub fn rmdir(path: *const u8) usize { +pub fn rmdir(path: [*]const u8) usize { return errnoWrap(c.rmdir(path)); } -pub fn chdir(path: *const u8) usize { +pub fn chdir(path: [*]const u8) usize { return errnoWrap(c.chdir(path)); } -pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize { +pub fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) usize { return errnoWrap(c.execve(path, argv, envp)); } @@ -405,7 +406,7 @@ pub fn dup2(old: i32, new: i32) usize { return errnoWrap(c.dup2(old, new)); } -pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize { +pub fn readlink(noalias path: [*]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { return errnoWrap(c.readlink(path, buf_ptr, buf_len)); } @@ -417,7 +418,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { return errnoWrap(c.nanosleep(req, rem)); } -pub fn realpath(noalias filename: *const u8, noalias resolved_name: *u8) usize { +pub fn realpath(noalias filename: [*]const u8, noalias resolved_name: [*]u8) usize { return if (c.realpath(filename, resolved_name) == null) @bitCast(usize, -isize(c._errno().*)) else 0; } diff --git a/std/os/file.zig b/std/os/file.zig index d943da30ca..378782507b 100644 --- a/std/os/file.zig +++ b/std/os/file.zig @@ -313,7 +313,7 @@ pub const File = struct { if (is_posix) { var index: usize = 0; while (index < buffer.len) { - const amt_read = posix.read(self.handle, &buffer[index], buffer.len - index); + const amt_read = posix.read(self.handle, buffer.ptr + index, buffer.len - index); const read_err = posix.getErrno(amt_read); if (read_err > 0) { switch (read_err) { @@ -334,7 +334,7 @@ pub const File = struct { while (index < buffer.len) { const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index)); var amt_read: windows.DWORD = undefined; - if (windows.ReadFile(self.handle, @ptrCast(*c_void, &buffer[index]), want_read_count, &amt_read, null) == 0) { + if (windows.ReadFile(self.handle, @ptrCast([*]c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.OPERATION_ABORTED => continue, diff --git a/std/os/index.zig b/std/os/index.zig index ff638c670b..7e908af9eb 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -134,20 +134,7 @@ pub fn getRandomBytes(buf: []u8) !void { } }, Os.zen => { - const randomness = []u8{ - 42, - 1, - 7, - 12, - 22, - 17, - 99, - 16, - 26, - 87, - 41, - 45, - }; + const randomness = []u8{ 42, 1, 7, 12, 22, 17, 99, 16, 26, 87, 41, 45 }; var i: usize = 0; while (i < buf.len) : (i += 1) { if (i > randomness.len) return error.Unknown; @@ -238,7 +225,7 @@ pub fn posixRead(fd: i32, buf: []u8) !void { var index: usize = 0; while (index < buf.len) { const want_to_read = math.min(buf.len - index, usize(max_buf_len)); - const rc = posix.read(fd, &buf[index], want_to_read); + const rc = posix.read(fd, buf.ptr + index, want_to_read); const err = posix.getErrno(rc); if (err > 0) { return switch (err) { @@ -278,7 +265,7 @@ pub fn posixWrite(fd: i32, bytes: []const u8) !void { var index: usize = 0; while (index < bytes.len) { const amt_to_write = math.min(bytes.len - index, usize(max_bytes_len)); - const rc = posix.write(fd, &bytes[index], amt_to_write); + const rc = posix.write(fd, bytes.ptr + index, amt_to_write); const write_err = posix.getErrno(rc); if (write_err > 0) { return switch (write_err) { @@ -328,7 +315,8 @@ pub fn posixOpen(allocator: *Allocator, file_path: []const u8, flags: u32, perm: return posixOpenC(path_with_null.ptr, flags, perm); } -pub fn posixOpenC(file_path: *const u8, flags: u32, perm: usize) !i32 { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn posixOpenC(file_path: [*]const u8, flags: u32, perm: usize) !i32 { while (true) { const result = posix.open(file_path, flags, perm); const err = posix.getErrno(result); @@ -374,19 +362,19 @@ pub fn posixDup2(old_fd: i32, new_fd: i32) !void { } } -pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?*u8 { +pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) ![]?[*]u8 { const envp_count = env_map.count(); - const envp_buf = try allocator.alloc(?*u8, envp_count + 1); - mem.set(?*u8, envp_buf, null); + const envp_buf = try allocator.alloc(?[*]u8, envp_count + 1); + mem.set(?[*]u8, envp_buf, null); errdefer freeNullDelimitedEnvMap(allocator, envp_buf); { var it = env_map.iterator(); var i: usize = 0; while (it.next()) |pair| : (i += 1) { const env_buf = try allocator.alloc(u8, pair.key.len + pair.value.len + 2); - @memcpy(&env_buf[0], pair.key.ptr, pair.key.len); + @memcpy(env_buf.ptr, pair.key.ptr, pair.key.len); env_buf[pair.key.len] = '='; - @memcpy(&env_buf[pair.key.len + 1], pair.value.ptr, pair.value.len); + @memcpy(env_buf.ptr + pair.key.len + 1, pair.value.ptr, pair.value.len); env_buf[env_buf.len - 1] = 0; envp_buf[i] = env_buf.ptr; @@ -397,7 +385,7 @@ pub fn createNullDelimitedEnvMap(allocator: *Allocator, env_map: *const BufMap) return envp_buf; } -pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void { +pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?[*]u8) void { for (envp_buf) |env| { const env_buf = if (env) |ptr| ptr[0 .. cstr.len(ptr) + 1] else break; allocator.free(env_buf); @@ -411,8 +399,8 @@ pub fn freeNullDelimitedEnvMap(allocator: *Allocator, envp_buf: []?*u8) void { /// `argv[0]` is the executable path. /// This function also uses the PATH environment variable to get the full path to the executable. pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: *Allocator) !void { - const argv_buf = try allocator.alloc(?*u8, argv.len + 1); - mem.set(?*u8, argv_buf, null); + const argv_buf = try allocator.alloc(?[*]u8, argv.len + 1); + mem.set(?[*]u8, argv_buf, null); defer { for (argv_buf) |arg| { const arg_buf = if (arg) |ptr| cstr.toSlice(ptr) else break; @@ -422,7 +410,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: } for (argv) |arg, i| { const arg_buf = try allocator.alloc(u8, arg.len + 1); - @memcpy(&arg_buf[0], arg.ptr, arg.len); + @memcpy(arg_buf.ptr, arg.ptr, arg.len); arg_buf[arg.len] = 0; argv_buf[i] = arg_buf.ptr; @@ -494,7 +482,7 @@ fn posixExecveErrnoToErr(err: usize) PosixExecveError { } pub var linux_aux_raw = []usize{0} ** 38; -pub var posix_environ_raw: []*u8 = undefined; +pub var posix_environ_raw: [][*]u8 = undefined; /// Caller must free result when done. pub fn getEnvMap(allocator: *Allocator) !BufMap { @@ -1311,7 +1299,7 @@ pub const Dir = struct { const next_index = self.index + linux_entry.d_reclen; self.index = next_index; - const name = cstr.toSlice(&linux_entry.d_name); + const name = cstr.toSlice(@ptrCast([*]u8, &linux_entry.d_name)); // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { @@ -1485,12 +1473,12 @@ pub const ArgIteratorPosix = struct { /// This is marked as public but actually it's only meant to be used /// internally by zig's startup code. - pub var raw: []*u8 = undefined; + pub var raw: [][*]u8 = undefined; }; pub const ArgIteratorWindows = struct { index: usize, - cmd_line: *const u8, + cmd_line: [*]const u8, in_quote: bool, quote_count: usize, seen_quote_count: usize, @@ -1501,7 +1489,7 @@ pub const ArgIteratorWindows = struct { return initWithCmdLine(windows.GetCommandLineA()); } - pub fn initWithCmdLine(cmd_line: *const u8) ArgIteratorWindows { + pub fn initWithCmdLine(cmd_line: [*]const u8) ArgIteratorWindows { return ArgIteratorWindows{ .index = 0, .cmd_line = cmd_line, @@ -1616,7 +1604,7 @@ pub const ArgIteratorWindows = struct { } } - fn countQuotes(cmd_line: *const u8) usize { + fn countQuotes(cmd_line: [*]const u8) usize { var result: usize = 0; var backslash_count: usize = 0; var index: usize = 0; @@ -1722,39 +1710,12 @@ pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void { } test "windows arg parsing" { - testWindowsCmdLine(c"a b\tc d", [][]const u8{ - "a", - "b", - "c", - "d", - }); - testWindowsCmdLine(c"\"abc\" d e", [][]const u8{ - "abc", - "d", - "e", - }); - testWindowsCmdLine(c"a\\\\\\b d\"e f\"g h", [][]const u8{ - "a\\\\\\b", - "de fg", - "h", - }); - testWindowsCmdLine(c"a\\\\\\\"b c d", [][]const u8{ - "a\\\"b", - "c", - "d", - }); - testWindowsCmdLine(c"a\\\\\\\\\"b c\" d e", [][]const u8{ - "a\\\\b c", - "d", - "e", - }); - testWindowsCmdLine(c"a b\tc \"d f", [][]const u8{ - "a", - "b", - "c", - "\"d", - "f", - }); + testWindowsCmdLine(c"a b\tc d", [][]const u8{ "a", "b", "c", "d" }); + testWindowsCmdLine(c"\"abc\" d e", [][]const u8{ "abc", "d", "e" }); + testWindowsCmdLine(c"a\\\\\\b d\"e f\"g h", [][]const u8{ "a\\\\\\b", "de fg", "h" }); + testWindowsCmdLine(c"a\\\\\\\"b c d", [][]const u8{ "a\\\"b", "c", "d" }); + testWindowsCmdLine(c"a\\\\\\\\\"b c\" d e", [][]const u8{ "a\\\\b c", "d", "e" }); + testWindowsCmdLine(c"a b\tc \"d f", [][]const u8{ "a", "b", "c", "\"d", "f" }); testWindowsCmdLine(c"\".\\..\\zig-cache\\build\" \"bin\\zig.exe\" \".\\..\" \".\\..\\zig-cache\" \"--help\"", [][]const u8{ ".\\..\\zig-cache\\build", @@ -1765,7 +1726,7 @@ test "windows arg parsing" { }); } -fn testWindowsCmdLine(input_cmd_line: *const u8, expected_args: []const []const u8) void { +fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void { var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line); for (expected_args) |expected_arg| { const arg = ??it.next(debug.global_allocator) catch unreachable; @@ -2350,7 +2311,7 @@ pub fn posixConnectAsync(sockfd: i32, sockaddr: *const posix.sockaddr) PosixConn pub fn posixGetSockOptConnectError(sockfd: i32) PosixConnectError!void { var err_code: i32 = undefined; var size: u32 = @sizeOf(i32); - const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast(*u8, &err_code), &size); + const rc = posix.getsockopt(sockfd, posix.SOL_SOCKET, posix.SO_ERROR, @ptrCast([*]u8, &err_code), &size); assert(size == 4); const err = posix.getErrno(rc); switch (err) { @@ -2401,7 +2362,7 @@ pub const Thread = struct { }, builtin.Os.windows => struct { handle: windows.HANDLE, - alloc_start: *c_void, + alloc_start: [*]c_void, heap_handle: windows.HANDLE, }, else => @compileError("Unsupported OS"), @@ -2500,7 +2461,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext); const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory; errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0); - const bytes = @ptrCast(*u8, bytes_ptr)[0..byte_count]; + const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count]; const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable; outer_context.inner = context; outer_context.thread.data.heap_handle = heap_handle; @@ -2572,7 +2533,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread // align to page stack_end -= stack_end % os.page_size; - assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0); + assert(c.pthread_attr_setstack(&attr, @intToPtr([*]c_void, stack_addr), stack_end - stack_addr) == 0); const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg)); switch (err) { diff --git a/std/os/linux/index.zig b/std/os/linux/index.zig index 3e7b836ac7..0e77371ec2 100644 --- a/std/os/linux/index.zig +++ b/std/os/linux/index.zig @@ -665,15 +665,18 @@ pub fn dup2(old: i32, new: i32) usize { return syscall2(SYS_dup2, usize(old), usize(new)); } -pub fn chdir(path: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn chdir(path: [*]const u8) usize { return syscall1(SYS_chdir, @ptrToInt(path)); } -pub fn chroot(path: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn chroot(path: [*]const u8) usize { return syscall1(SYS_chroot, @ptrToInt(path)); } -pub fn execve(path: *const u8, argv: *const ?*const u8, envp: *const ?*const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn execve(path: [*]const u8, argv: [*]const ?[*]const u8, envp: [*]const ?[*]const u8) usize { return syscall3(SYS_execve, @ptrToInt(path), @ptrToInt(argv), @ptrToInt(envp)); } @@ -685,11 +688,11 @@ pub fn futex_wait(uaddr: usize, futex_op: u32, val: i32, timeout: ?*timespec) us return syscall4(SYS_futex, uaddr, futex_op, @bitCast(u32, val), @ptrToInt(timeout)); } -pub fn getcwd(buf: *u8, size: usize) usize { +pub fn getcwd(buf: [*]u8, size: usize) usize { return syscall2(SYS_getcwd, @ptrToInt(buf), size); } -pub fn getdents(fd: i32, dirp: *u8, count: usize) usize { +pub fn getdents(fd: i32, dirp: [*]u8, count: usize) usize { return syscall3(SYS_getdents, usize(fd), @ptrToInt(dirp), count); } @@ -698,27 +701,32 @@ pub fn isatty(fd: i32) bool { return syscall3(SYS_ioctl, usize(fd), TIOCGWINSZ, @ptrToInt(&wsz)) == 0; } -pub fn readlink(noalias path: *const u8, noalias buf_ptr: *u8, buf_len: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn readlink(noalias path: [*]const u8, noalias buf_ptr: [*]u8, buf_len: usize) usize { return syscall3(SYS_readlink, @ptrToInt(path), @ptrToInt(buf_ptr), buf_len); } -pub fn mkdir(path: *const u8, mode: u32) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn mkdir(path: [*]const u8, mode: u32) usize { return syscall2(SYS_mkdir, @ptrToInt(path), mode); } -pub fn mount(special: *const u8, dir: *const u8, fstype: *const u8, flags: usize, data: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn mount(special: [*]const u8, dir: [*]const u8, fstype: [*]const u8, flags: usize, data: usize) usize { return syscall5(SYS_mount, @ptrToInt(special), @ptrToInt(dir), @ptrToInt(fstype), flags, data); } -pub fn umount(special: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn umount(special: [*]const u8) usize { return syscall2(SYS_umount2, @ptrToInt(special), 0); } -pub fn umount2(special: *const u8, flags: u32) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn umount2(special: [*]const u8, flags: u32) usize { return syscall2(SYS_umount2, @ptrToInt(special), flags); } -pub fn mmap(address: ?*u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { +pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { return syscall6(SYS_mmap, @ptrToInt(address), length, prot, flags, usize(fd), @bitCast(usize, offset)); } @@ -726,23 +734,26 @@ pub fn munmap(address: usize, length: usize) usize { return syscall2(SYS_munmap, address, length); } -pub fn read(fd: i32, buf: *u8, count: usize) usize { +pub fn read(fd: i32, buf: [*]u8, count: usize) usize { return syscall3(SYS_read, usize(fd), @ptrToInt(buf), count); } -pub fn rmdir(path: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn rmdir(path: [*]const u8) usize { return syscall1(SYS_rmdir, @ptrToInt(path)); } -pub fn symlink(existing: *const u8, new: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn symlink(existing: [*]const u8, new: [*]const u8) usize { return syscall2(SYS_symlink, @ptrToInt(existing), @ptrToInt(new)); } -pub fn pread(fd: i32, buf: *u8, count: usize, offset: usize) usize { +pub fn pread(fd: i32, buf: [*]u8, count: usize, offset: usize) usize { return syscall4(SYS_pread, usize(fd), @ptrToInt(buf), count, offset); } -pub fn access(path: *const u8, mode: u32) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn access(path: [*]const u8, mode: u32) usize { return syscall2(SYS_access, @ptrToInt(path), mode); } @@ -754,27 +765,31 @@ pub fn pipe2(fd: *[2]i32, flags: usize) usize { return syscall2(SYS_pipe2, @ptrToInt(fd), flags); } -pub fn write(fd: i32, buf: *const u8, count: usize) usize { +pub fn write(fd: i32, buf: [*]const u8, count: usize) usize { return syscall3(SYS_write, usize(fd), @ptrToInt(buf), count); } -pub fn pwrite(fd: i32, buf: *const u8, count: usize, offset: usize) usize { +pub fn pwrite(fd: i32, buf: [*]const u8, count: usize, offset: usize) usize { return syscall4(SYS_pwrite, usize(fd), @ptrToInt(buf), count, offset); } -pub fn rename(old: *const u8, new: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn rename(old: [*]const u8, new: [*]const u8) usize { return syscall2(SYS_rename, @ptrToInt(old), @ptrToInt(new)); } -pub fn open(path: *const u8, flags: u32, perm: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn open(path: [*]const u8, flags: u32, perm: usize) usize { return syscall3(SYS_open, @ptrToInt(path), flags, perm); } -pub fn create(path: *const u8, perm: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn create(path: [*]const u8, perm: usize) usize { return syscall2(SYS_creat, @ptrToInt(path), perm); } -pub fn openat(dirfd: i32, path: *const u8, flags: usize, mode: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn openat(dirfd: i32, path: [*]const u8, flags: usize, mode: usize) usize { return syscall4(SYS_openat, usize(dirfd), @ptrToInt(path), flags, mode); } @@ -801,7 +816,7 @@ pub fn exit(status: i32) noreturn { unreachable; } -pub fn getrandom(buf: *u8, count: usize, flags: u32) usize { +pub fn getrandom(buf: [*]u8, count: usize, flags: u32) usize { return syscall3(SYS_getrandom, @ptrToInt(buf), count, usize(flags)); } @@ -809,7 +824,8 @@ pub fn kill(pid: i32, sig: i32) usize { return syscall2(SYS_kill, @bitCast(usize, isize(pid)), usize(sig)); } -pub fn unlink(path: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn unlink(path: [*]const u8) usize { return syscall1(SYS_unlink, @ptrToInt(path)); } @@ -942,8 +958,8 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti .restorer = @ptrCast(extern fn () void, restore_rt), }; var ksa_old: k_sigaction = undefined; - @memcpy(@ptrCast(*u8, *ksa.mask), @ptrCast(*const u8, *act.mask), 8); - const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(*ksa), @ptrToInt(*ksa_old), @sizeOf(@typeOf(ksa.mask))); + @memcpy(@ptrCast([*]u8, &ksa.mask), @ptrCast([*]const u8, &act.mask), 8); + const result = syscall4(SYS_rt_sigaction, sig, @ptrToInt(&ksa), @ptrToInt(&ksa_old), @sizeOf(@typeOf(ksa.mask))); const err = getErrno(result); if (err != 0) { return result; @@ -951,7 +967,7 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti if (oact) |old| { old.handler = ksa_old.handler; old.flags = @truncate(u32, ksa_old.flags); - @memcpy(@ptrCast(*u8, *old.mask), @ptrCast(*const u8, *ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask))); + @memcpy(@ptrCast([*]u8, &old.mask), @ptrCast([*]const u8, &ksa_old.mask), @sizeOf(@typeOf(ksa_old.mask))); } return 0; } @@ -1036,7 +1052,7 @@ pub const sockaddr_in6 = extern struct { }; pub const iovec = extern struct { - iov_base: *u8, + iov_base: [*]u8, iov_len: usize, }; @@ -1052,11 +1068,11 @@ pub fn socket(domain: u32, socket_type: u32, protocol: u32) usize { return syscall3(SYS_socket, domain, socket_type, protocol); } -pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: *const u8, optlen: socklen_t) usize { +pub fn setsockopt(fd: i32, level: u32, optname: u32, optval: [*]const u8, optlen: socklen_t) usize { return syscall5(SYS_setsockopt, usize(fd), level, optname, usize(optval), @ptrToInt(optlen)); } -pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: *u8, noalias optlen: *socklen_t) usize { +pub fn getsockopt(fd: i32, level: u32, optname: u32, noalias optval: [*]u8, noalias optlen: *socklen_t) usize { return syscall5(SYS_getsockopt, usize(fd), level, optname, @ptrToInt(optval), @ptrToInt(optlen)); } @@ -1072,7 +1088,7 @@ pub fn recvmsg(fd: i32, msg: *msghdr, flags: u32) usize { return syscall3(SYS_recvmsg, usize(fd), @ptrToInt(msg), flags); } -pub fn recvfrom(fd: i32, noalias buf: *u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize { +pub fn recvfrom(fd: i32, noalias buf: [*]u8, len: usize, flags: u32, noalias addr: ?*sockaddr, noalias alen: ?*socklen_t) usize { return syscall6(SYS_recvfrom, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), @ptrToInt(alen)); } @@ -1088,7 +1104,7 @@ pub fn listen(fd: i32, backlog: u32) usize { return syscall2(SYS_listen, usize(fd), backlog); } -pub fn sendto(fd: i32, buf: *const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize { +pub fn sendto(fd: i32, buf: [*]const u8, len: usize, flags: u32, addr: ?*const sockaddr, alen: socklen_t) usize { return syscall6(SYS_sendto, usize(fd), @ptrToInt(buf), len, flags, @ptrToInt(addr), usize(alen)); } @@ -1108,59 +1124,72 @@ pub fn fstat(fd: i32, stat_buf: *Stat) usize { return syscall2(SYS_fstat, usize(fd), @ptrToInt(stat_buf)); } -pub fn stat(pathname: *const u8, statbuf: *Stat) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn stat(pathname: [*]const u8, statbuf: *Stat) usize { return syscall2(SYS_stat, @ptrToInt(pathname), @ptrToInt(statbuf)); } -pub fn lstat(pathname: *const u8, statbuf: *Stat) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn lstat(pathname: [*]const u8, statbuf: *Stat) usize { return syscall2(SYS_lstat, @ptrToInt(pathname), @ptrToInt(statbuf)); } -pub fn listxattr(path: *const u8, list: *u8, size: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn listxattr(path: [*]const u8, list: [*]u8, size: usize) usize { return syscall3(SYS_listxattr, @ptrToInt(path), @ptrToInt(list), size); } -pub fn llistxattr(path: *const u8, list: *u8, size: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn llistxattr(path: [*]const u8, list: [*]u8, size: usize) usize { return syscall3(SYS_llistxattr, @ptrToInt(path), @ptrToInt(list), size); } -pub fn flistxattr(fd: usize, list: *u8, size: usize) usize { +pub fn flistxattr(fd: usize, list: [*]u8, size: usize) usize { return syscall3(SYS_flistxattr, fd, @ptrToInt(list), size); } -pub fn getxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn getxattr(path: [*]const u8, name: [*]const u8, value: [*]u8, size: usize) usize { return syscall4(SYS_getxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size); } -pub fn lgetxattr(path: *const u8, name: *const u8, value: *void, size: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn lgetxattr(path: [*]const u8, name: [*]const u8, value: [*]u8, size: usize) usize { return syscall4(SYS_lgetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size); } -pub fn fgetxattr(fd: usize, name: *const u8, value: *void, size: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn fgetxattr(fd: usize, name: [*]const u8, value: [*]u8, size: usize) usize { return syscall4(SYS_lgetxattr, fd, @ptrToInt(name), @ptrToInt(value), size); } -pub fn setxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn setxattr(path: [*]const u8, name: [*]const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_setxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn lsetxattr(path: *const u8, name: *const u8, value: *const void, size: usize, flags: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn lsetxattr(path: [*]const u8, name: [*]const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_lsetxattr, @ptrToInt(path), @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn fsetxattr(fd: usize, name: *const u8, value: *const void, size: usize, flags: usize) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn fsetxattr(fd: usize, name: [*]const u8, value: *const void, size: usize, flags: usize) usize { return syscall5(SYS_fsetxattr, fd, @ptrToInt(name), @ptrToInt(value), size, flags); } -pub fn removexattr(path: *const u8, name: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn removexattr(path: [*]const u8, name: [*]const u8) usize { return syscall2(SYS_removexattr, @ptrToInt(path), @ptrToInt(name)); } -pub fn lremovexattr(path: *const u8, name: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn lremovexattr(path: [*]const u8, name: [*]const u8) usize { return syscall2(SYS_lremovexattr, @ptrToInt(path), @ptrToInt(name)); } -pub fn fremovexattr(fd: usize, name: *const u8) usize { +// TODO https://github.com/ziglang/zig/issues/265 +pub fn fremovexattr(fd: usize, name: [*]const u8) usize { return syscall2(SYS_fremovexattr, fd, @ptrToInt(name)); } @@ -1188,7 +1217,7 @@ pub fn epoll_ctl(epoll_fd: i32, op: u32, fd: i32, ev: *epoll_event) usize { return syscall4(SYS_epoll_ctl, usize(epoll_fd), usize(op), usize(fd), @ptrToInt(ev)); } -pub fn epoll_wait(epoll_fd: i32, events: *epoll_event, maxevents: u32, timeout: i32) usize { +pub fn epoll_wait(epoll_fd: i32, events: [*]epoll_event, maxevents: u32, timeout: i32) usize { return syscall4(SYS_epoll_wait, usize(epoll_fd), @ptrToInt(events), usize(maxevents), usize(timeout)); } diff --git a/std/os/linux/test.zig b/std/os/linux/test.zig index 06aae1968f..948a3ac96b 100644 --- a/std/os/linux/test.zig +++ b/std/os/linux/test.zig @@ -35,5 +35,6 @@ test "timer" { const events_one: linux.epoll_event = undefined; var events = []linux.epoll_event{events_one} ** 8; - err = linux.epoll_wait(i32(epoll_fd), &events[0], 8, -1); + // TODO implicit cast from *[N]T to [*]T + err = linux.epoll_wait(i32(epoll_fd), @ptrCast([*]linux.epoll_event, &events), 8, -1); } diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig index 1317da6388..2ab4d0cbc1 100644 --- a/std/os/linux/vdso.zig +++ b/std/os/linux/vdso.zig @@ -12,7 +12,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { var ph_addr: usize = vdso_addr + eh.e_phoff; const ph = @intToPtr(*elf.Phdr, ph_addr); - var maybe_dynv: ?*usize = null; + var maybe_dynv: ?[*]usize = null; var base: usize = @maxValue(usize); { var i: usize = 0; @@ -23,7 +23,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const this_ph = @intToPtr(*elf.Phdr, ph_addr); switch (this_ph.p_type) { elf.PT_LOAD => base = vdso_addr + this_ph.p_offset - this_ph.p_vaddr, - elf.PT_DYNAMIC => maybe_dynv = @intToPtr(*usize, vdso_addr + this_ph.p_offset), + elf.PT_DYNAMIC => maybe_dynv = @intToPtr([*]usize, vdso_addr + this_ph.p_offset), else => {}, } } @@ -31,10 +31,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { const dynv = maybe_dynv ?? return 0; if (base == @maxValue(usize)) return 0; - var maybe_strings: ?*u8 = null; - var maybe_syms: ?*elf.Sym = null; - var maybe_hashtab: ?*linux.Elf_Symndx = null; - var maybe_versym: ?*u16 = null; + var maybe_strings: ?[*]u8 = null; + var maybe_syms: ?[*]elf.Sym = null; + var maybe_hashtab: ?[*]linux.Elf_Symndx = null; + var maybe_versym: ?[*]u16 = null; var maybe_verdef: ?*elf.Verdef = null; { @@ -42,10 +42,10 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { while (dynv[i] != 0) : (i += 2) { const p = base + dynv[i + 1]; switch (dynv[i]) { - elf.DT_STRTAB => maybe_strings = @intToPtr(*u8, p), - elf.DT_SYMTAB => maybe_syms = @intToPtr(*elf.Sym, p), - elf.DT_HASH => maybe_hashtab = @intToPtr(*linux.Elf_Symndx, p), - elf.DT_VERSYM => maybe_versym = @intToPtr(*u16, p), + elf.DT_STRTAB => maybe_strings = @intToPtr([*]u8, p), + elf.DT_SYMTAB => maybe_syms = @intToPtr([*]elf.Sym, p), + elf.DT_HASH => maybe_hashtab = @intToPtr([*]linux.Elf_Symndx, p), + elf.DT_VERSYM => maybe_versym = @intToPtr([*]u16, p), elf.DT_VERDEF => maybe_verdef = @intToPtr(*elf.Verdef, p), else => {}, } @@ -65,7 +65,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { if (0 == (u32(1) << u5(syms[i].st_info & 0xf) & OK_TYPES)) continue; if (0 == (u32(1) << u5(syms[i].st_info >> 4) & OK_BINDS)) continue; if (0 == syms[i].st_shndx) continue; - if (!mem.eql(u8, name, cstr.toSliceConst(&strings[syms[i].st_name]))) continue; + if (!mem.eql(u8, name, cstr.toSliceConst(strings + syms[i].st_name))) continue; if (maybe_versym) |versym| { if (!checkver(??maybe_verdef, versym[i], vername, strings)) continue; @@ -76,7 +76,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { return 0; } -fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: *u8) bool { +fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: [*]u8) bool { var def = def_arg; const vsym = @bitCast(u32, vsym_arg) & 0x7fff; while (true) { @@ -87,5 +87,5 @@ fn checkver(def_arg: *elf.Verdef, vsym_arg: i32, vername: []const u8, strings: * def = @intToPtr(*elf.Verdef, @ptrToInt(def) + def.vd_next); } const aux = @intToPtr(*elf.Verdaux, @ptrToInt(def) + def.vd_aux); - return mem.eql(u8, vername, cstr.toSliceConst(&strings[aux.vda_name])); + return mem.eql(u8, vername, cstr.toSliceConst(strings + aux.vda_name)); } diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig index 85f69836d5..c491ae6538 100644 --- a/std/os/windows/index.zig +++ b/std/os/windows/index.zig @@ -10,7 +10,7 @@ pub extern "advapi32" stdcallcc fn CryptAcquireContextA( pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL; -pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: *BYTE) BOOL; +pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: [*]BYTE) BOOL; pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; @@ -61,7 +61,7 @@ pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL; pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn; -pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: LPCH) BOOL; +pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL; pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR; @@ -69,7 +69,7 @@ pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD; -pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?LPCH; +pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8; pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD; @@ -101,17 +101,17 @@ pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void; pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE; pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; -pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; -pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; -pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void, dwBytes: SIZE_T) ?[*]c_void; +pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) SIZE_T; +pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) BOOL; pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE; -pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void; +pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?[*]c_void; -pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void) BOOL; pub extern "kernel32" stdcallcc fn MoveFileExA( lpExistingFileName: LPCSTR, @@ -127,7 +127,7 @@ pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL; pub extern "kernel32" stdcallcc fn ReadFile( in_hFile: HANDLE, - out_lpBuffer: *c_void, + out_lpBuffer: [*]c_void, in_nNumberOfBytesToRead: DWORD, out_lpNumberOfBytesRead: *DWORD, in_out_lpOverlapped: ?*OVERLAPPED, @@ -150,7 +150,7 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis pub extern "kernel32" stdcallcc fn WriteFile( in_hFile: HANDLE, - in_lpBuffer: *const c_void, + in_lpBuffer: [*]const c_void, in_nNumberOfBytesToWrite: DWORD, out_lpNumberOfBytesWritten: ?*DWORD, in_out_lpOverlapped: ?*OVERLAPPED, @@ -178,16 +178,16 @@ pub const HMODULE = *@OpaqueType(); pub const INT = c_int; pub const LPBYTE = *BYTE; pub const LPCH = *CHAR; -pub const LPCSTR = *const CHAR; -pub const LPCTSTR = *const TCHAR; +pub const LPCSTR = [*]const CHAR; +pub const LPCTSTR = [*]const TCHAR; pub const LPCVOID = *const c_void; pub const LPDWORD = *DWORD; -pub const LPSTR = *CHAR; +pub const LPSTR = [*]CHAR; pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR; pub const LPVOID = *c_void; -pub const LPWSTR = *WCHAR; +pub const LPWSTR = [*]WCHAR; pub const PVOID = *c_void; -pub const PWSTR = *WCHAR; +pub const PWSTR = [*]WCHAR; pub const SIZE_T = usize; pub const TCHAR = if (UNICODE) WCHAR else u8; pub const UINT = c_uint; diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig index 7170346108..5a40567310 100644 --- a/std/os/windows/util.zig +++ b/std/os/windows/util.zig @@ -42,7 +42,7 @@ pub const WriteError = error{ }; pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void { - if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { + if (windows.WriteFile(handle, @ptrCast([*]const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources, diff --git a/std/segmented_list.zig b/std/segmented_list.zig index be9a2071a0..a2f3607ad8 100644 --- a/std/segmented_list.zig +++ b/std/segmented_list.zig @@ -87,7 +87,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type const ShelfIndex = std.math.Log2Int(usize); prealloc_segment: [prealloc_item_count]T, - dynamic_segments: []*T, + dynamic_segments: [][*]T, allocator: *Allocator, len: usize, @@ -99,7 +99,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type .allocator = allocator, .len = 0, .prealloc_segment = undefined, - .dynamic_segments = []*T{}, + .dynamic_segments = [][*]T{}, }; } @@ -160,11 +160,11 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type const new_cap_shelf_count = shelfCount(new_capacity); const old_shelf_count = ShelfIndex(self.dynamic_segments.len); if (new_cap_shelf_count > old_shelf_count) { - self.dynamic_segments = try self.allocator.realloc(*T, self.dynamic_segments, new_cap_shelf_count); + self.dynamic_segments = try self.allocator.realloc([*]T, self.dynamic_segments, new_cap_shelf_count); var i = old_shelf_count; errdefer { self.freeShelves(i, old_shelf_count); - self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, old_shelf_count); + self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, old_shelf_count); } while (i < new_cap_shelf_count) : (i += 1) { self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr; @@ -178,7 +178,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type const len = ShelfIndex(self.dynamic_segments.len); self.freeShelves(len, 0); self.allocator.free(self.dynamic_segments); - self.dynamic_segments = []*T{}; + self.dynamic_segments = [][*]T{}; return; } @@ -190,7 +190,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type } self.freeShelves(old_shelf_count, new_cap_shelf_count); - self.dynamic_segments = self.allocator.shrink(*T, self.dynamic_segments, new_cap_shelf_count); + self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count); } pub fn uncheckedAt(self: *Self, index: usize) *T { diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig index 5ed7874ca5..64eae79ce4 100644 --- a/std/special/bootstrap.zig +++ b/std/special/bootstrap.zig @@ -5,7 +5,7 @@ const root = @import("@root"); const std = @import("std"); const builtin = @import("builtin"); -var argc_ptr: *usize = undefined; +var argc_ptr: [*]usize = undefined; comptime { const strong_linkage = builtin.GlobalLinkage.Strong; @@ -28,12 +28,12 @@ nakedcc fn _start() noreturn { switch (builtin.arch) { builtin.Arch.x86_64 => { argc_ptr = asm ("lea (%%rsp), %[argc]" - : [argc] "=r" (-> *usize) + : [argc] "=r" (-> [*]usize) ); }, builtin.Arch.i386 => { argc_ptr = asm ("lea (%%esp), %[argc]" - : [argc] "=r" (-> *usize) + : [argc] "=r" (-> [*]usize) ); }, else => @compileError("unsupported arch"), @@ -49,15 +49,17 @@ extern fn WinMainCRTStartup() noreturn { std.os.windows.ExitProcess(callMain()); } +// TODO https://github.com/ziglang/zig/issues/265 fn posixCallMainAndExit() noreturn { const argc = argc_ptr.*; - const argv = @ptrCast(**u8, &argc_ptr[1]); - const envp_nullable = @ptrCast(*?*u8, &argv[argc + 1]); + const argv = @ptrCast([*][*]u8, argc_ptr + 1); + + const envp_nullable = @ptrCast([*]?[*]u8, argv + argc + 1); var envp_count: usize = 0; while (envp_nullable[envp_count]) |_| : (envp_count += 1) {} - const envp = @ptrCast(**u8, envp_nullable)[0..envp_count]; + const envp = @ptrCast([*][*]u8, envp_nullable)[0..envp_count]; if (builtin.os == builtin.Os.linux) { - const auxv = &@ptrCast(*usize, envp.ptr)[envp_count + 1]; + const auxv = @ptrCast([*]usize, envp.ptr + envp_count + 1); var i: usize = 0; while (auxv[i] != 0) : (i += 2) { if (auxv[i] < std.os.linux_aux_raw.len) std.os.linux_aux_raw[auxv[i]] = auxv[i + 1]; @@ -68,16 +70,16 @@ fn posixCallMainAndExit() noreturn { std.os.posix.exit(callMainWithArgs(argc, argv, envp)); } -fn callMainWithArgs(argc: usize, argv: **u8, envp: []*u8) u8 { +fn callMainWithArgs(argc: usize, argv: [*][*]u8, envp: [][*]u8) u8 { std.os.ArgIteratorPosix.raw = argv[0..argc]; std.os.posix_environ_raw = envp; return callMain(); } -extern fn main(c_argc: i32, c_argv: **u8, c_envp: *?*u8) i32 { +extern fn main(c_argc: i32, c_argv: [*][*]u8, c_envp: [*]?[*]u8) i32 { var env_count: usize = 0; while (c_envp[env_count] != null) : (env_count += 1) {} - const envp = @ptrCast(**u8, c_envp)[0..env_count]; + const envp = @ptrCast([*][*]u8, c_envp)[0..env_count]; return callMainWithArgs(usize(c_argc), c_argv, envp); } diff --git a/std/special/builtin.zig b/std/special/builtin.zig index 9c9cd35103..e537078924 100644 --- a/std/special/builtin.zig +++ b/std/special/builtin.zig @@ -14,7 +14,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn } } -export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 { +export fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8 { @setRuntimeSafety(false); var index: usize = 0; @@ -24,7 +24,7 @@ export fn memset(dest: ?*u8, c: u8, n: usize) ?*u8 { return dest; } -export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 { +export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*]u8 { @setRuntimeSafety(false); var index: usize = 0; @@ -34,7 +34,7 @@ export fn memcpy(noalias dest: ?*u8, noalias src: ?*const u8, n: usize) ?*u8 { return dest; } -export fn memmove(dest: ?*u8, src: ?*const u8, n: usize) ?*u8 { +export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8 { @setRuntimeSafety(false); if (@ptrToInt(dest) < @ptrToInt(src)) { diff --git a/test/cases/align.zig b/test/cases/align.zig index 99bdcdf940..b80727258e 100644 --- a/test/cases/align.zig +++ b/test/cases/align.zig @@ -167,54 +167,41 @@ test "@ptrCast preserves alignment of bigger source" { assert(@typeOf(ptr) == *align(16) u8); } -test "compile-time known array index has best alignment possible" { +test "runtime known array index has best alignment possible" { // take full advantage of over-alignment - var array align(4) = []u8{ - 1, - 2, - 3, - 4, - }; + var array align(4) = []u8{ 1, 2, 3, 4 }; assert(@typeOf(&array[0]) == *align(4) u8); assert(@typeOf(&array[1]) == *u8); assert(@typeOf(&array[2]) == *align(2) u8); assert(@typeOf(&array[3]) == *u8); // because align is too small but we still figure out to use 2 - var bigger align(2) = []u64{ - 1, - 2, - 3, - 4, - }; + var bigger align(2) = []u64{ 1, 2, 3, 4 }; assert(@typeOf(&bigger[0]) == *align(2) u64); assert(@typeOf(&bigger[1]) == *align(2) u64); assert(@typeOf(&bigger[2]) == *align(2) u64); assert(@typeOf(&bigger[3]) == *align(2) u64); // because pointer is align 2 and u32 align % 2 == 0 we can assume align 2 - var smaller align(2) = []u32{ - 1, - 2, - 3, - 4, - }; - testIndex(&smaller[0], 0, *align(2) u32); - testIndex(&smaller[0], 1, *align(2) u32); - testIndex(&smaller[0], 2, *align(2) u32); - testIndex(&smaller[0], 3, *align(2) u32); + var smaller align(2) = []u32{ 1, 2, 3, 4 }; + comptime assert(@typeOf(smaller[0..]) == []align(2) u32); + comptime assert(@typeOf(smaller[0..].ptr) == [*]align(2) u32); + testIndex(smaller[0..].ptr, 0, *align(2) u32); + testIndex(smaller[0..].ptr, 1, *align(2) u32); + testIndex(smaller[0..].ptr, 2, *align(2) u32); + testIndex(smaller[0..].ptr, 3, *align(2) u32); // has to use ABI alignment because index known at runtime only - testIndex2(&array[0], 0, *u8); - testIndex2(&array[0], 1, *u8); - testIndex2(&array[0], 2, *u8); - testIndex2(&array[0], 3, *u8); + testIndex2(array[0..].ptr, 0, *u8); + testIndex2(array[0..].ptr, 1, *u8); + testIndex2(array[0..].ptr, 2, *u8); + testIndex2(array[0..].ptr, 3, *u8); } -fn testIndex(smaller: *align(2) u32, index: usize, comptime T: type) void { - assert(@typeOf(&smaller[index]) == T); +fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) void { + comptime assert(@typeOf(&smaller[index]) == T); } -fn testIndex2(ptr: *align(4) u8, index: usize, comptime T: type) void { - assert(@typeOf(&ptr[index]) == T); +fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) void { + comptime assert(@typeOf(&ptr[index]) == T); } test "alignstack" { diff --git a/test/cases/const_slice_child.zig b/test/cases/const_slice_child.zig index e012c729a0..07d02d5df0 100644 --- a/test/cases/const_slice_child.zig +++ b/test/cases/const_slice_child.zig @@ -1,15 +1,16 @@ const debug = @import("std").debug; const assert = debug.assert; -var argv: *const *const u8 = undefined; +var argv: [*]const [*]const u8 = undefined; test "const slice child" { - const strs = ([]*const u8){ + const strs = ([][*]const u8){ c"one", c"two", c"three", }; - argv = &strs[0]; + // TODO this should implicitly cast + argv = @ptrCast([*]const [*]const u8, &strs); bar(strs.len); } @@ -29,7 +30,7 @@ fn bar(argc: usize) void { foo(args); } -fn strlen(ptr: *const u8) usize { +fn strlen(ptr: [*]const u8) usize { var count: usize = 0; while (ptr[count] != 0) : (count += 1) {} return count; diff --git a/test/cases/for.zig b/test/cases/for.zig index c624035708..bdbab312f6 100644 --- a/test/cases/for.zig +++ b/test/cases/for.zig @@ -35,34 +35,12 @@ fn mangleString(s: []u8) void { } test "basic for loop" { - const expected_result = []u8{ - 9, - 8, - 7, - 6, - 0, - 1, - 2, - 3, - 9, - 8, - 7, - 6, - 0, - 1, - 2, - 3, - }; + const expected_result = []u8{ 9, 8, 7, 6, 0, 1, 2, 3, 9, 8, 7, 6, 0, 1, 2, 3 }; var buffer: [expected_result.len]u8 = undefined; var buf_index: usize = 0; - const array = []u8{ - 9, - 8, - 7, - 6, - }; + const array = []u8{ 9, 8, 7, 6 }; for (array) |item| { buffer[buf_index] = item; buf_index += 1; diff --git a/test/cases/misc.zig b/test/cases/misc.zig index 919b978f9f..5899f20f9c 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -171,8 +171,8 @@ test "memcpy and memset intrinsics" { var foo: [20]u8 = undefined; var bar: [20]u8 = undefined; - @memset(&foo[0], 'A', foo.len); - @memcpy(&bar[0], &foo[0], bar.len); + @memset(foo[0..].ptr, 'A', foo.len); + @memcpy(bar[0..].ptr, foo[0..].ptr, bar.len); if (bar[11] != 'A') unreachable; } @@ -194,7 +194,7 @@ test "slicing" { if (slice.len != 5) unreachable; const ptr = &slice[0]; - if (ptr[0] != 1234) unreachable; + if (ptr.* != 1234) unreachable; var slice_rest = array[10..]; if (slice_rest.len != 10) unreachable; @@ -464,8 +464,9 @@ test "array 2D const double ptr" { } fn testArray2DConstDoublePtr(ptr: *const f32) void { - assert(ptr[0] == 1.0); - assert(ptr[1] == 2.0); + const ptr2 = @ptrCast([*]const f32, ptr); + assert(ptr2[0] == 1.0); + assert(ptr2[1] == 2.0); } const Tid = builtin.TypeId; diff --git a/test/cases/pointers.zig b/test/cases/pointers.zig index 87b3d25a74..47afb60a2e 100644 --- a/test/cases/pointers.zig +++ b/test/cases/pointers.zig @@ -12,3 +12,33 @@ fn testDerefPtr() void { y.* += 1; assert(x == 1235); } + +test "pointer arithmetic" { + var ptr = c"abcd"; + + assert(ptr[0] == 'a'); + ptr += 1; + assert(ptr[0] == 'b'); + ptr += 1; + assert(ptr[0] == 'c'); + ptr += 1; + assert(ptr[0] == 'd'); + ptr += 1; + assert(ptr[0] == 0); + ptr -= 1; + assert(ptr[0] == 'd'); + ptr -= 1; + assert(ptr[0] == 'c'); + ptr -= 1; + assert(ptr[0] == 'b'); + ptr -= 1; + assert(ptr[0] == 'a'); +} + +test "double pointer parsing" { + comptime assert(PtrOf(PtrOf(i32)) == **i32); +} + +fn PtrOf(comptime T: type) type { + return *T; +} diff --git a/test/cases/struct.zig b/test/cases/struct.zig index 0712e508de..6f7d44e09b 100644 --- a/test/cases/struct.zig +++ b/test/cases/struct.zig @@ -43,7 +43,7 @@ const VoidStructFieldsFoo = struct { test "structs" { var foo: StructFoo = undefined; - @memset(@ptrCast(*u8, &foo), 0, @sizeOf(StructFoo)); + @memset(@ptrCast([*]u8, &foo), 0, @sizeOf(StructFoo)); foo.a += 1; foo.b = foo.a == 1; testFoo(foo); @@ -396,8 +396,8 @@ const Bitfields = packed struct { test "native bit field understands endianness" { var all: u64 = 0x7765443322221111; var bytes: [8]u8 = undefined; - @memcpy(&bytes[0], @ptrCast(*u8, &all), 8); - var bitfields = @ptrCast(*Bitfields, &bytes[0]).*; + @memcpy(bytes[0..].ptr, @ptrCast([*]u8, &all), 8); + var bitfields = @ptrCast(*Bitfields, bytes[0..].ptr).*; assert(bitfields.f1 == 0x1111); assert(bitfields.f2 == 0x2222); diff --git a/test/compare_output.zig b/test/compare_output.zig index 00ad4a709b..8d5dc68d45 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -6,7 +6,7 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addC("hello world with libc", \\const c = @cImport(@cInclude("stdio.h")); - \\export fn main(argc: c_int, argv: **u8) c_int { + \\export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ _ = c.puts(c"Hello, world!"); \\ return 0; \\} @@ -139,7 +139,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ @cInclude("stdio.h"); \\}); \\ - \\export fn main(argc: c_int, argv: **u8) c_int { + \\export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); @@ -284,9 +284,9 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addC("expose function pointer to C land", \\const c = @cImport(@cInclude("stdlib.h")); \\ - \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int { - \\ const a_int = @ptrCast(*align(1) const i32, a ?? unreachable); - \\ const b_int = @ptrCast(*align(1) const i32, b ?? unreachable); + \\export fn compare_fn(a: ?[*]const c_void, b: ?[*]const c_void) c_int { + \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a)); + \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b)); \\ if (a_int.* < b_int.*) { \\ return -1; \\ } else if (a_int.* > b_int.*) { @@ -297,9 +297,9 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\} \\ \\export fn main() c_int { - \\ var array = []u32 { 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; + \\ var array = []u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ - \\ c.qsort(@ptrCast(*c_void, &array[0]), c_ulong(array.len), @sizeOf(i32), compare_fn); + \\ c.qsort(@ptrCast(?[*]c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn); \\ \\ for (array) |item, i| { \\ if (item != i) { @@ -324,7 +324,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ @cInclude("stdio.h"); \\}); \\ - \\export fn main(argc: c_int, argv: **u8) c_int { + \\export fn main(argc: c_int, argv: [*][*]u8) c_int { \\ if (is_windows) { \\ // we want actual \n, not \r\n \\ _ = c._setmode(1, c._O_BINARY); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index ea1357f5bb..7e9ef82e42 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,15 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "indexing single-item pointer", + \\export fn entry(ptr: *i32) i32 { + \\ return ptr[1]; + \\} + , + ".tmp_source.zig:2:15: error: indexing not allowed on pointer to single item", + ); + cases.add( "invalid deref on switch target", \\const NextError = error{NextError}; @@ -1002,7 +1011,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ return a; \\} , - ".tmp_source.zig:3:12: error: expected type 'i32', found '*const u8'", + ".tmp_source.zig:3:12: error: expected type 'i32', found '[*]const u8'", ); cases.add( @@ -2442,13 +2451,13 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\var s_buffer: [10]u8 = undefined; \\pub fn pass(in: []u8) []u8 { \\ var out = &s_buffer; - \\ out[0].* = in[0]; + \\ out.*.* = in[0]; \\ return out.*[0..1]; \\} \\ \\export fn entry() usize { return @sizeOf(@typeOf(pass)); } , - ".tmp_source.zig:4:11: error: attempt to dereference non pointer type '[10]u8'", + ".tmp_source.zig:4:10: error: attempt to dereference non pointer type '[10]u8'", ); cases.add( diff --git a/test/translate_c.zig b/test/translate_c.zig index 9a07bc343d..ac0a98e6cc 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -14,11 +14,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\}; , \\pub const struct_Foo = extern struct { - \\ a: ?*Foo, + \\ a: ?[*]Foo, \\}; \\pub const Foo = struct_Foo; \\pub const struct_Bar = extern struct { - \\ a: ?*Foo, + \\ a: ?[*]Foo, \\}; ); @@ -99,7 +99,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { cases.add("restrict -> noalias", \\void foo(void *restrict bar, void *restrict); , - \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void; + \\pub extern fn foo(noalias bar: ?[*]c_void, noalias arg1: ?[*]c_void) void; ); cases.add("simple struct", @@ -110,7 +110,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\const struct_Foo = extern struct { \\ x: c_int, - \\ y: ?*u8, + \\ y: ?[*]u8, \\}; , \\pub const Foo = struct_Foo; @@ -141,7 +141,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub const BarB = enum_Bar.B; , - \\pub extern fn func(a: ?*struct_Foo, b: ?*(?*enum_Bar)) void; + \\pub extern fn func(a: ?[*]struct_Foo, b: ?[*](?[*]enum_Bar)) void; , \\pub const Foo = struct_Foo; , @@ -151,7 +151,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { cases.add("constant size array", \\void func(int array[20]); , - \\pub extern fn func(array: ?*c_int) void; + \\pub extern fn func(array: ?[*]c_int) void; ); cases.add("self referential struct with function pointer", @@ -160,7 +160,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\}; , \\pub const struct_Foo = extern struct { - \\ derp: ?extern fn(?*struct_Foo) void, + \\ derp: ?extern fn(?[*]struct_Foo) void, \\}; , \\pub const Foo = struct_Foo; @@ -172,7 +172,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub const struct_Foo = @OpaqueType(); , - \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo; + \\pub extern fn some_func(foo: ?[*]struct_Foo, x: c_int) ?[*]struct_Foo; , \\pub const Foo = struct_Foo; ); @@ -219,11 +219,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\}; , \\pub const struct_Bar = extern struct { - \\ next: ?*struct_Foo, + \\ next: ?[*]struct_Foo, \\}; , \\pub const struct_Foo = extern struct { - \\ next: ?*struct_Bar, + \\ next: ?[*]struct_Bar, \\}; ); @@ -233,7 +233,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub const Foo = c_void; , - \\pub extern fn fun(a: ?*Foo) Foo; + \\pub extern fn fun(a: ?[*]Foo) Foo; ); cases.add("generate inline func for #define global extern fn", @@ -505,7 +505,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 6; \\} , - \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { + \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { \\ if ((a != 0) and (b != 0)) return 0; \\ if ((b != 0) and (c != null)) return 1; \\ if ((a != 0) and (c != null)) return 2; @@ -607,7 +607,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub const struct_Foo = extern struct { \\ field: c_int, \\}; - \\pub export fn read_field(foo: ?*struct_Foo) c_int { + \\pub export fn read_field(foo: ?[*]struct_Foo) c_int { \\ return (??foo).field; \\} ); @@ -653,8 +653,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return x; \\} , - \\pub export fn foo(x: ?*c_ushort) ?*c_void { - \\ return @ptrCast(?*c_void, x); + \\pub export fn foo(x: ?[*]c_ushort) ?[*]c_void { + \\ return @ptrCast(?[*]c_void, x); \\} ); @@ -674,7 +674,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 0; \\} , - \\pub export fn foo() ?*c_int { + \\pub export fn foo() ?[*]c_int { \\ return null; \\} ); @@ -983,7 +983,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ *x = 1; \\} , - \\pub export fn foo(x: ?*c_int) void { + \\pub export fn foo(x: ?[*]c_int) void { \\ (??x).* = 1; \\} ); @@ -1011,7 +1011,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub fn foo() c_int { \\ var x: c_int = 1234; - \\ var ptr: ?*c_int = &x; + \\ var ptr: ?[*]c_int = &x; \\ return (??ptr).*; \\} ); @@ -1021,7 +1021,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return "bar"; \\} , - \\pub fn foo() ?*const u8 { + \\pub fn foo() ?[*]const u8 { \\ return c"bar"; \\} ); @@ -1150,8 +1150,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return (float *)a; \\} , - \\fn ptrcast(a: ?*c_int) ?*f32 { - \\ return @ptrCast(?*f32, a); + \\fn ptrcast(a: ?[*]c_int) ?[*]f32 { + \\ return @ptrCast(?[*]f32, a); \\} ); @@ -1173,7 +1173,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return !c; \\} , - \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int { + \\pub fn foo(a: c_int, b: f32, c: ?[*]c_void) c_int { \\ return !(a == 0); \\ return !(a != 0); \\ return !(b != 0); @@ -1194,7 +1194,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { cases.add("const ptr initializer", \\static const char *v0 = "0.0.0"; , - \\pub var v0: ?*const u8 = c"0.0.0"; + \\pub var v0: ?[*]const u8 = c"0.0.0"; ); cases.add("static incomplete array inside function", @@ -1203,14 +1203,14 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , \\pub fn foo() void { - \\ const v2: *const u8 = c"2.2.2"; + \\ const v2: [*]const u8 = c"2.2.2"; \\} ); cases.add("macro pointer cast", \\#define NRF_GPIO ((NRF_GPIO_Type *) NRF_GPIO_BASE) , - \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(*NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(*NRF_GPIO_Type, NRF_GPIO_BASE) else (*NRF_GPIO_Type)(NRF_GPIO_BASE); + \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast([*]NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr([*]NRF_GPIO_Type, NRF_GPIO_BASE) else ([*]NRF_GPIO_Type)(NRF_GPIO_BASE); ); cases.add("if on none bool", @@ -1231,7 +1231,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ B, \\ C, \\}; - \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int { + \\pub fn if_none_bool(a: c_int, b: f32, c: ?[*]c_void, d: enum_SomeEnum) c_int { \\ if (a != 0) return 0; \\ if (b != 0) return 1; \\ if (c != null) return 2; @@ -1248,7 +1248,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { + \\pub fn while_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; @@ -1264,7 +1264,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { + \\pub fn for_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; -- cgit v1.2.3 From 32e0dfd4f0dab351a024e7680280343db5d7c43e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 4 Jun 2018 14:09:31 -0400 Subject: never call malloc with size 0 instead we return nullptr. this makes the behavior consistent across all platforms. closes #1044 closes #1045 --- src/analyze.cpp | 4 ++-- src/util.hpp | 19 ++++++++++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 2b9d776e78..3165227033 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1860,7 +1860,7 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { } assert(!struct_type->data.structure.zero_bits_loop_flag); - assert(struct_type->data.structure.fields); + assert(struct_type->data.structure.fields || struct_type->data.structure.src_field_count == 0); assert(decl_node->type == NodeTypeContainerDecl); size_t field_count = struct_type->data.structure.src_field_count; @@ -2677,8 +2677,8 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { return; } tag_type = enum_type; + abi_alignment_so_far = get_abi_alignment(g, enum_type); // this populates src_field_count covered_enum_fields = allocate(enum_type->data.enumeration.src_field_count); - abi_alignment_so_far = get_abi_alignment(g, enum_type); } else { tag_type = nullptr; abi_alignment_so_far = 0; diff --git a/src/util.hpp b/src/util.hpp index 25141d8435..52baab7ace 100644 --- a/src/util.hpp +++ b/src/util.hpp @@ -65,6 +65,11 @@ static inline int clzll(unsigned long long mask) { template ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (count == 0) + return nullptr; +#endif T *ptr = reinterpret_cast(malloc(count * sizeof(T))); if (!ptr) zig_panic("allocation failed"); @@ -73,6 +78,11 @@ ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate_nonzero(size_t count) { template ATTRIBUTE_RETURNS_NOALIAS static inline T *allocate(size_t count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (count == 0) + return nullptr; +#endif T *ptr = reinterpret_cast(calloc(count, sizeof(T))); if (!ptr) zig_panic("allocation failed"); @@ -93,9 +103,7 @@ static inline void safe_memcpy(T *dest, const T *src, size_t count) { template static inline T *reallocate(T *old, size_t old_count, size_t new_count) { - T *ptr = reinterpret_cast(realloc(old, new_count * sizeof(T))); - if (!ptr) - zig_panic("allocation failed"); + T *ptr = reallocate_nonzero(old, old_count, new_count); if (new_count > old_count) { memset(&ptr[old_count], 0, (new_count - old_count) * sizeof(T)); } @@ -104,6 +112,11 @@ static inline T *reallocate(T *old, size_t old_count, size_t new_count) { template static inline T *reallocate_nonzero(T *old, size_t old_count, size_t new_count) { +#ifndef NDEBUG + // make behavior when size == 0 portable + if (new_count == 0 && old == nullptr) + return nullptr; +#endif T *ptr = reinterpret_cast(realloc(old, new_count * sizeof(T))); if (!ptr) zig_panic("allocation failed"); -- cgit v1.2.3 From e53b683bd3958a7b1c517e2391edce42b9d4e48b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 4 Jun 2018 22:11:14 -0400 Subject: Pointer Reform: proper slicing and indexing (#1053) * enable slicing for single-item ptr to arrays * disable slicing for other single-item pointers * enable indexing for single-item ptr to arrays * disable indexing for other single-item pointers see #770 closes #386 --- doc/langref.html.in | 13 ++-- example/mix_o_files/base64.zig | 2 +- src/all_types.hpp | 5 ++ src/analyze.cpp | 14 ++-- src/analyze.hpp | 5 +- src/codegen.cpp | 27 +++++-- src/ir.cpp | 157 +++++++++++++++++++++++++++++++++++------ std/fmt/errol/index.zig | 2 +- std/fmt/index.zig | 8 +-- std/heap.zig | 2 +- std/io.zig | 4 +- std/macho.zig | 2 +- std/mem.zig | 34 +++++---- std/net.zig | 2 +- std/os/index.zig | 4 +- test/cases/align.zig | 4 +- test/cases/array.zig | 29 ++++++++ test/cases/eval.zig | 6 +- test/cases/misc.zig | 4 +- test/cases/slice.zig | 2 +- test/compile_errors.zig | 21 ++++-- 21 files changed, 268 insertions(+), 79 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 32481ade50..28fdf4d8b9 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -1565,7 +1565,7 @@ var foo: u8 align(4) = 100; test "global variable alignment" { assert(@typeOf(&foo).alignment == 4); assert(@typeOf(&foo) == *align(4) u8); - const slice = (&foo)[0..1]; + const slice = (*[1]u8)(&foo)[0..]; assert(@typeOf(slice) == []align(4) u8); } @@ -1671,7 +1671,7 @@ test "using slices for strings" { test "slice pointer" { var array: [10]u8 = undefined; - const ptr = &array[0]; + const ptr = &array; // You can use slicing syntax to convert a pointer into a slice: const slice = ptr[0..5]; @@ -6004,9 +6004,12 @@ const c = @cImport({ {#code_begin|syntax#} const base64 = @import("std").base64; -export fn decode_base_64(dest_ptr: *u8, dest_len: usize, - source_ptr: *const u8, source_len: usize) usize -{ +export fn decode_base_64( + dest_ptr: [*]u8, + dest_len: usize, + source_ptr: [*]const u8, + source_len: usize, +) usize { const src = source_ptr[0..source_len]; const dest = dest_ptr[0..dest_len]; const base64_decoder = base64.standard_decoder_unsafe; diff --git a/example/mix_o_files/base64.zig b/example/mix_o_files/base64.zig index 35b090825b..7ded9824a0 100644 --- a/example/mix_o_files/base64.zig +++ b/example/mix_o_files/base64.zig @@ -1,6 +1,6 @@ const base64 = @import("std").base64; -export fn decode_base_64(dest_ptr: *u8, dest_len: usize, source_ptr: *const u8, source_len: usize) usize { +export fn decode_base_64(dest_ptr: [*]u8, dest_len: usize, source_ptr: [*]const u8, source_len: usize) usize { const src = source_ptr[0..source_len]; const dest = dest_ptr[0..dest_len]; const base64_decoder = base64.standard_decoder_unsafe; diff --git a/src/all_types.hpp b/src/all_types.hpp index f1cf96238f..d237eb00bb 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -83,6 +83,7 @@ enum ConstParentId { ConstParentIdStruct, ConstParentIdArray, ConstParentIdUnion, + ConstParentIdScalar, }; struct ConstParent { @@ -100,6 +101,9 @@ struct ConstParent { struct { ConstExprValue *union_val; } p_union; + struct { + ConstExprValue *scalar_val; + } p_scalar; } data; }; @@ -578,6 +582,7 @@ enum CastOp { CastOpBytesToSlice, CastOpNumLitToConcrete, CastOpErrSet, + CastOpBitCast, }; struct AstNodeFnCallExpr { diff --git a/src/analyze.cpp b/src/analyze.cpp index 3165227033..31c0726459 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5158,7 +5158,8 @@ void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *arr const_val->type = get_slice_type(g, ptr_type); const_val->data.x_struct.fields = create_const_vals(2); - init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const); + init_const_ptr_array(g, &const_val->data.x_struct.fields[slice_ptr_index], array_val, start, is_const, + PtrLenUnknown); init_const_usize(g, &const_val->data.x_struct.fields[slice_len_index], len); } @@ -5169,21 +5170,24 @@ ConstExprValue *create_const_slice(CodeGen *g, ConstExprValue *array_val, size_t } void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, - size_t elem_index, bool is_const) + size_t elem_index, bool is_const, PtrLen ptr_len) { assert(array_val->type->id == TypeTableEntryIdArray); TypeTableEntry *child_type = array_val->type->data.array.child_type; const_val->special = ConstValSpecialStatic; - const_val->type = get_pointer_to_type(g, child_type, is_const); + const_val->type = get_pointer_to_type_extra(g, child_type, is_const, false, + ptr_len, get_abi_alignment(g, child_type), 0, 0); const_val->data.x_ptr.special = ConstPtrSpecialBaseArray; const_val->data.x_ptr.data.base_array.array_val = array_val; const_val->data.x_ptr.data.base_array.elem_index = elem_index; } -ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const) { +ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const, + PtrLen ptr_len) +{ ConstExprValue *const_val = create_const_vals(1); - init_const_ptr_array(g, const_val, array_val, elem_index, is_const); + init_const_ptr_array(g, const_val, array_val, elem_index, is_const, ptr_len); return const_val; } diff --git a/src/analyze.hpp b/src/analyze.hpp index 905bfa86dd..25bda198d6 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -152,8 +152,9 @@ ConstExprValue *create_const_ptr_hard_coded_addr(CodeGen *g, TypeTableEntry *poi size_t addr, bool is_const); void init_const_ptr_array(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, - size_t elem_index, bool is_const); -ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, bool is_const); + size_t elem_index, bool is_const, PtrLen ptr_len); +ConstExprValue *create_const_ptr_array(CodeGen *g, ConstExprValue *array_val, size_t elem_index, + bool is_const, PtrLen ptr_len); void init_const_slice(CodeGen *g, ConstExprValue *const_val, ConstExprValue *array_val, size_t start, size_t len, bool is_const); diff --git a/src/codegen.cpp b/src/codegen.cpp index 64e29a4da4..49c93feaa5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2574,6 +2574,8 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable, add_error_range_check(g, wanted_type, g->err_tag_type, expr_val); } return expr_val; + case CastOpBitCast: + return LLVMBuildBitCast(g->builder, expr_val, wanted_type->type_ref, ""); } zig_unreachable(); } @@ -2884,7 +2886,13 @@ static LLVMValueRef ir_render_elem_ptr(CodeGen *g, IrExecutable *executable, IrI bool safety_check_on = ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on; - if (array_type->id == TypeTableEntryIdArray) { + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.child_type->id == TypeTableEntryIdArray); + array_type = array_type->data.pointer.child_type; + } if (safety_check_on) { LLVMValueRef end = LLVMConstInt(g->builtin_types.entry_usize->type_ref, array_type->data.array.len, false); @@ -3794,7 +3802,12 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst bool want_runtime_safety = instruction->safety_check_on && ir_want_runtime_safety(g, &instruction->base); - if (array_type->id == TypeTableEntryIdArray) { + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + array_type = array_type->data.pointer.child_type; + } LLVMValueRef start_val = ir_llvm_value(g, instruction->start); LLVMValueRef end_val; if (instruction->end) { @@ -3835,6 +3848,7 @@ static LLVMValueRef ir_render_slice(CodeGen *g, IrExecutable *executable, IrInst return tmp_struct_ptr; } else if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.ptr_len == PtrLenUnknown); LLVMValueRef start_val = ir_llvm_value(g, instruction->start); LLVMValueRef end_val = ir_llvm_value(g, instruction->end); @@ -4812,7 +4826,7 @@ static void ir_render(CodeGen *g, FnTableEntry *fn_entry) { static LLVMValueRef gen_const_ptr_struct_recursive(CodeGen *g, ConstExprValue *struct_const_val, size_t field_index); static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *array_const_val, size_t index); -static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *array_const_val); +static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *union_const_val); static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent *parent) { switch (parent->id) { @@ -4828,6 +4842,10 @@ static LLVMValueRef gen_parent_ptr(CodeGen *g, ConstExprValue *val, ConstParent parent->data.p_array.elem_index); case ConstParentIdUnion: return gen_const_ptr_union_recursive(g, parent->data.p_union.union_val); + case ConstParentIdScalar: + render_const_val(g, parent->data.p_scalar.scalar_val, ""); + render_const_val_global(g, parent->data.p_scalar.scalar_val, ""); + return parent->data.p_scalar.scalar_val->global_refs->llvm_global; } zig_unreachable(); } @@ -4853,7 +4871,8 @@ static LLVMValueRef gen_const_ptr_array_recursive(CodeGen *g, ConstExprValue *ar }; return LLVMConstInBoundsGEP(base_ptr, indices, 2); } else { - zig_unreachable(); + assert(parent->id == ConstParentIdScalar); + return base_ptr; } } diff --git a/src/ir.cpp b/src/ir.cpp index a230c60456..5cea04ea55 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -107,6 +107,7 @@ static IrInstruction *ir_analyze_container_field_ptr(IrAnalyze *ira, Buf *field_ static IrInstruction *ir_get_var_ptr(IrAnalyze *ira, IrInstruction *instruction, VariableTableEntry *var); static TypeTableEntry *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstruction *op); static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction *value, LVal lval); +static TypeTableEntry *adjust_ptr_align(CodeGen *g, TypeTableEntry *ptr_type, uint32_t new_align); ConstExprValue *const_ptr_pointee(CodeGen *g, ConstExprValue *const_val) { assert(const_val->type->id == TypeTableEntryIdPointer); @@ -6849,7 +6850,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr); IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle); - IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type, coro_mem_ptr_maybe); + IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node, + get_pointer_to_type_extra(irb->codegen, irb->codegen->builtin_types.entry_u8, + false, false, PtrLenUnknown, get_abi_alignment(irb->codegen, irb->codegen->builtin_types.entry_u8), + 0, 0)); + IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, scope, node, u8_ptr_type_unknown_len, coro_mem_ptr_maybe); IrInstruction *coro_mem_ptr_ref = ir_build_ref(irb, scope, node, coro_mem_ptr, true, false); IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var); IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr); @@ -8729,6 +8734,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, case CastOpNoCast: zig_unreachable(); case CastOpErrSet: + case CastOpBitCast: zig_panic("TODO"); case CastOpNoop: { @@ -9750,6 +9756,49 @@ static IrInstruction *ir_analyze_err_to_int(IrAnalyze *ira, IrInstruction *sourc return result; } +static IrInstruction *ir_analyze_ptr_to_array(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *target, + TypeTableEntry *wanted_type) +{ + assert(wanted_type->id == TypeTableEntryIdPointer); + wanted_type = adjust_ptr_align(ira->codegen, wanted_type, target->value.type->data.pointer.alignment); + TypeTableEntry *array_type = wanted_type->data.pointer.child_type; + assert(array_type->id == TypeTableEntryIdArray); + assert(array_type->data.array.len == 1); + + if (instr_is_comptime(target)) { + ConstExprValue *val = ir_resolve_const(ira, target, UndefBad); + if (!val) + return ira->codegen->invalid_instruction; + + assert(val->type->id == TypeTableEntryIdPointer); + ConstExprValue *pointee = const_ptr_pointee(ira->codegen, val); + if (pointee->special != ConstValSpecialRuntime) { + ConstExprValue *array_val = create_const_vals(1); + array_val->special = ConstValSpecialStatic; + array_val->type = array_type; + array_val->data.x_array.special = ConstArraySpecialNone; + array_val->data.x_array.s_none.elements = pointee; + array_val->data.x_array.s_none.parent.id = ConstParentIdScalar; + array_val->data.x_array.s_none.parent.data.p_scalar.scalar_val = pointee; + + IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, + source_instr->scope, source_instr->source_node); + const_instruction->base.value.type = wanted_type; + const_instruction->base.value.special = ConstValSpecialStatic; + const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialRef; + const_instruction->base.value.data.x_ptr.data.ref.pointee = array_val; + const_instruction->base.value.data.x_ptr.mut = val->data.x_ptr.mut; + return &const_instruction->base; + } + } + + // pointer to array and pointer to single item are represented the same way at runtime + IrInstruction *result = ir_build_cast(&ira->new_irb, target->scope, target->source_node, + wanted_type, target, CastOpBitCast); + result->value.type = wanted_type; + return result; +} + static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_instr, TypeTableEntry *wanted_type, IrInstruction *value) { @@ -10156,6 +10205,30 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } + // explicit cast from *T to *[1]T + if (wanted_type->id == TypeTableEntryIdPointer && wanted_type->data.pointer.ptr_len == PtrLenSingle && + actual_type->id == TypeTableEntryIdPointer && actual_type->data.pointer.ptr_len == PtrLenSingle) + { + TypeTableEntry *array_type = wanted_type->data.pointer.child_type; + if (array_type->id == TypeTableEntryIdArray && array_type->data.array.len == 1 && + types_match_const_cast_only(ira, array_type->data.array.child_type, + actual_type->data.pointer.child_type, source_node).id == ConstCastResultIdOk) + { + if (wanted_type->data.pointer.alignment > actual_type->data.pointer.alignment) { + ErrorMsg *msg = ir_add_error(ira, source_instr, buf_sprintf("cast increases pointer alignment")); + add_error_note(ira->codegen, msg, value->source_node, + buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&actual_type->name), + actual_type->data.pointer.alignment)); + add_error_note(ira->codegen, msg, source_instr->source_node, + buf_sprintf("'%s' has alignment %" PRIu32, buf_ptr(&wanted_type->name), + wanted_type->data.pointer.alignment)); + return ira->codegen->invalid_instruction; + } + return ir_analyze_ptr_to_array(ira, source_instr, value, wanted_type); + } + } + + // explicit cast from undefined to anything if (actual_type->id == TypeTableEntryIdUndefLit) { return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type); @@ -13162,11 +13235,13 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc if (type_is_invalid(array_ptr->value.type)) return ira->codegen->builtin_types.entry_invalid; + ConstExprValue *orig_array_ptr_val = &array_ptr->value; + IrInstruction *elem_index = elem_ptr_instruction->elem_index->other; if (type_is_invalid(elem_index->value.type)) return ira->codegen->builtin_types.entry_invalid; - TypeTableEntry *ptr_type = array_ptr->value.type; + TypeTableEntry *ptr_type = orig_array_ptr_val->type; assert(ptr_type->id == TypeTableEntryIdPointer); TypeTableEntry *array_type = ptr_type->data.pointer.child_type; @@ -13177,7 +13252,18 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc if (type_is_invalid(array_type)) { return array_type; - } else if (array_type->id == TypeTableEntryIdArray) { + } else if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && + array_type->data.pointer.ptr_len == PtrLenSingle && + array_type->data.pointer.child_type->id == TypeTableEntryIdArray)) + { + if (array_type->id == TypeTableEntryIdPointer) { + array_type = array_type->data.pointer.child_type; + ptr_type = ptr_type->data.pointer.child_type; + if (orig_array_ptr_val->special != ConstValSpecialRuntime) { + orig_array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val); + } + } if (array_type->data.array.len == 0) { ir_add_error_node(ira, elem_ptr_instruction->base.source_node, buf_sprintf("index 0 outside array of size 0")); @@ -13205,7 +13291,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } else if (array_type->id == TypeTableEntryIdPointer) { if (array_type->data.pointer.ptr_len == PtrLenSingle) { ir_add_error_node(ira, elem_ptr_instruction->base.source_node, - buf_sprintf("indexing not allowed on pointer to single item")); + buf_sprintf("index of single-item pointer")); return ira->codegen->builtin_types.entry_invalid; } return_type = adjust_ptr_len(ira->codegen, array_type, elem_ptr_instruction->ptr_len); @@ -13294,9 +13380,9 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } ConstExprValue *array_ptr_val; - if (array_ptr->value.special != ConstValSpecialRuntime && - (array_ptr->value.data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) && - (array_ptr_val = const_ptr_pointee(ira->codegen, &array_ptr->value)) && + if (orig_array_ptr_val->special != ConstValSpecialRuntime && + (orig_array_ptr_val->data.x_ptr.mut != ConstPtrMutRuntimeVar || array_type->id == TypeTableEntryIdArray) && + (array_ptr_val = const_ptr_pointee(ira->codegen, orig_array_ptr_val)) && array_ptr_val->special != ConstValSpecialRuntime && (array_type->id != TypeTableEntryIdPointer || array_ptr_val->data.x_ptr.special != ConstPtrSpecialHardCodedAddr)) @@ -13401,7 +13487,7 @@ static TypeTableEntry *ir_analyze_instruction_elem_ptr(IrAnalyze *ira, IrInstruc } else if (array_type->id == TypeTableEntryIdArray) { ConstExprValue *out_val = ir_build_const_from(ira, &elem_ptr_instruction->base); out_val->data.x_ptr.special = ConstPtrSpecialBaseArray; - out_val->data.x_ptr.mut = array_ptr->value.data.x_ptr.mut; + out_val->data.x_ptr.mut = orig_array_ptr_val->data.x_ptr.mut; out_val->data.x_ptr.data.base_array.array_val = array_ptr_val; out_val->data.x_ptr.data.base_array.elem_index = index; return return_type; @@ -17406,14 +17492,29 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio byte_alignment, 0, 0); return_type = get_slice_type(ira->codegen, slice_ptr_type); } else if (array_type->id == TypeTableEntryIdPointer) { - TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type, - array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, - PtrLenUnknown, - array_type->data.pointer.alignment, 0, 0); - return_type = get_slice_type(ira->codegen, slice_ptr_type); - if (!end) { - ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value")); - return ira->codegen->builtin_types.entry_invalid; + if (array_type->data.pointer.ptr_len == PtrLenSingle) { + TypeTableEntry *main_type = array_type->data.pointer.child_type; + if (main_type->id == TypeTableEntryIdArray) { + TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, + main_type->data.pointer.child_type, + array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, + PtrLenUnknown, + array_type->data.pointer.alignment, 0, 0); + return_type = get_slice_type(ira->codegen, slice_ptr_type); + } else { + ir_add_error(ira, &instruction->base, buf_sprintf("slice of single-item pointer")); + return ira->codegen->builtin_types.entry_invalid; + } + } else { + TypeTableEntry *slice_ptr_type = get_pointer_to_type_extra(ira->codegen, array_type->data.pointer.child_type, + array_type->data.pointer.is_const, array_type->data.pointer.is_volatile, + PtrLenUnknown, + array_type->data.pointer.alignment, 0, 0); + return_type = get_slice_type(ira->codegen, slice_ptr_type); + if (!end) { + ir_add_error(ira, &instruction->base, buf_sprintf("slice of pointer must include end value")); + return ira->codegen->builtin_types.entry_invalid; + } } } else if (is_slice(array_type)) { TypeTableEntry *ptr_type = array_type->data.structure.fields[slice_ptr_index].type_entry; @@ -17433,12 +17534,24 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio size_t abs_offset; size_t rel_end; bool ptr_is_undef = false; - if (array_type->id == TypeTableEntryIdArray) { - array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value); - abs_offset = 0; - rel_end = array_type->data.array.len; - parent_ptr = nullptr; + if (array_type->id == TypeTableEntryIdArray || + (array_type->id == TypeTableEntryIdPointer && array_type->data.pointer.ptr_len == PtrLenSingle)) + { + if (array_type->id == TypeTableEntryIdPointer) { + TypeTableEntry *child_array_type = array_type->data.pointer.child_type; + assert(child_array_type->id == TypeTableEntryIdArray); + parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value); + array_val = const_ptr_pointee(ira->codegen, parent_ptr); + rel_end = child_array_type->data.array.len; + abs_offset = 0; + } else { + array_val = const_ptr_pointee(ira->codegen, &ptr_ptr->value); + rel_end = array_type->data.array.len; + parent_ptr = nullptr; + abs_offset = 0; + } } else if (array_type->id == TypeTableEntryIdPointer) { + assert(array_type->data.pointer.ptr_len == PtrLenUnknown); parent_ptr = const_ptr_pointee(ira->codegen, &ptr_ptr->value); if (parent_ptr->special == ConstValSpecialUndef) { array_val = nullptr; @@ -17537,7 +17650,7 @@ static TypeTableEntry *ir_analyze_instruction_slice(IrAnalyze *ira, IrInstructio if (array_val) { size_t index = abs_offset + start_scalar; bool is_const = slice_is_const(return_type); - init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const); + init_const_ptr_array(ira->codegen, ptr_val, array_val, index, is_const, PtrLenUnknown); if (array_type->id == TypeTableEntryIdArray) { ptr_val->data.x_ptr.mut = ptr_ptr->value.data.x_ptr.mut; } else if (is_slice(array_type)) { diff --git a/std/fmt/errol/index.zig b/std/fmt/errol/index.zig index 933958ac18..a906b714ab 100644 --- a/std/fmt/errol/index.zig +++ b/std/fmt/errol/index.zig @@ -59,7 +59,7 @@ pub fn roundToPrecision(float_decimal: *FloatDecimal, precision: usize, mode: Ro float_decimal.exp += 1; // Re-size the buffer to use the reserved leading byte. - const one_before = @intToPtr(*u8, @ptrToInt(&float_decimal.digits[0]) - 1); + const one_before = @intToPtr([*]u8, @ptrToInt(&float_decimal.digits[0]) - 1); float_decimal.digits = one_before[0 .. float_decimal.digits.len + 1]; float_decimal.digits[0] = '1'; return; diff --git a/std/fmt/index.zig b/std/fmt/index.zig index 21991e9ba3..047a154bb8 100644 --- a/std/fmt/index.zig +++ b/std/fmt/index.zig @@ -278,7 +278,7 @@ pub fn formatAsciiChar( comptime Errors: type, output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { - return output(context, (&c)[0..1]); + return output(context, (*[1]u8)(&c)[0..]); } pub fn formatBuf( @@ -603,7 +603,7 @@ fn formatIntSigned( const uint = @IntType(false, @typeOf(value).bit_count); if (value < 0) { const minus_sign: u8 = '-'; - try output(context, (&minus_sign)[0..1]); + try output(context, (*[1]u8)(&minus_sign)[0..]); const new_value = uint(-(value + 1)) + 1; const new_width = if (width == 0) 0 else (width - 1); return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output); @@ -611,7 +611,7 @@ fn formatIntSigned( return formatIntUnsigned(uint(value), base, uppercase, width, context, Errors, output); } else { const plus_sign: u8 = '+'; - try output(context, (&plus_sign)[0..1]); + try output(context, (*[1]u8)(&plus_sign)[0..]); const new_value = uint(value); const new_width = if (width == 0) 0 else (width - 1); return formatIntUnsigned(new_value, base, uppercase, new_width, context, Errors, output); @@ -648,7 +648,7 @@ fn formatIntUnsigned( const zero_byte: u8 = '0'; var leftover_padding = padding - index; while (true) { - try output(context, (&zero_byte)[0..1]); + try output(context, (*[1]u8)(&zero_byte)[0..]); leftover_padding -= 1; if (leftover_padding == 0) break; } diff --git a/std/heap.zig b/std/heap.zig index 0b8f4aeb3f..4444a2307a 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -24,7 +24,7 @@ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 { fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { const old_ptr = @ptrCast([*]c_void, old_mem.ptr); if (c.realloc(old_ptr, new_size)) |buf| { - return @ptrCast(*u8, buf)[0..new_size]; + return @ptrCast([*]u8, buf)[0..new_size]; } else if (new_size <= old_mem.len) { return old_mem[0..new_size]; } else { diff --git a/std/io.zig b/std/io.zig index e20a284e4e..a603d0cf5e 100644 --- a/std/io.zig +++ b/std/io.zig @@ -219,12 +219,12 @@ pub fn OutStream(comptime WriteError: type) type { } pub fn writeByte(self: *Self, byte: u8) !void { - const slice = (&byte)[0..1]; + const slice = (*[1]u8)(&byte)[0..]; return self.writeFn(self, slice); } pub fn writeByteNTimes(self: *Self, byte: u8, n: usize) !void { - const slice = (&byte)[0..1]; + const slice = (*[1]u8)(&byte)[0..]; var i: usize = 0; while (i < n) : (i += 1) { try self.writeFn(self, slice); diff --git a/std/macho.zig b/std/macho.zig index e71ac76b1a..d6eef9a325 100644 --- a/std/macho.zig +++ b/std/macho.zig @@ -164,7 +164,7 @@ fn readNoEof(in: *io.FileInStream, comptime T: type, result: []T) !void { return in.stream.readNoEof(([]u8)(result)); } fn readOneNoEof(in: *io.FileInStream, comptime T: type, result: *T) !void { - return readNoEof(in, T, result[0..1]); + return readNoEof(in, T, (*[1]T)(result)[0..]); } fn isSymbol(sym: *const Nlist64) bool { diff --git a/std/mem.zig b/std/mem.zig index aec24e8491..423460e73b 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -31,14 +31,16 @@ pub const Allocator = struct { /// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn` freeFn: fn (self: *Allocator, old_mem: []u8) void, - fn create(self: *Allocator, comptime T: type) !*T { + /// Call destroy with the result + pub fn create(self: *Allocator, comptime T: type) !*T { if (@sizeOf(T) == 0) return *{}; const slice = try self.alloc(T, 1); return &slice[0]; } - // TODO once #733 is solved, this will replace create - fn construct(self: *Allocator, init: var) t: { + /// Call destroy with the result + /// TODO once #733 is solved, this will replace create + pub fn construct(self: *Allocator, init: var) t: { // TODO this is a workaround for type getting parsed as Error!&const T const T = @typeOf(init).Child; break :t Error!*T; @@ -51,17 +53,19 @@ pub const Allocator = struct { return ptr; } - fn destroy(self: *Allocator, ptr: var) void { - self.free(ptr[0..1]); + /// `ptr` should be the return value of `construct` or `create` + pub fn destroy(self: *Allocator, ptr: var) void { + const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); + self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]); } - fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T { + pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T { return self.alignedAlloc(T, @alignOf(T), n); } - fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T { + pub fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T { if (n == 0) { - return (*align(alignment) T)(undefined)[0..0]; + return ([*]align(alignment) T)(undefined)[0..0]; } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; const byte_slice = try self.allocFn(self, byte_count, alignment); @@ -73,17 +77,17 @@ pub const Allocator = struct { return ([]align(alignment) T)(@alignCast(alignment, byte_slice)); } - fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T { + pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T { return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n); } - fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T { + pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T { if (old_mem.len == 0) { return self.alloc(T, n); } if (n == 0) { self.free(old_mem); - return (*align(alignment) T)(undefined)[0..0]; + return ([*]align(alignment) T)(undefined)[0..0]; } const old_byte_slice = ([]u8)(old_mem); @@ -102,11 +106,11 @@ pub const Allocator = struct { /// Reallocate, but `n` must be less than or equal to `old_mem.len`. /// Unlike `realloc`, this function cannot fail. /// Shrinking to 0 is the same as calling `free`. - fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T { + pub fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T { return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n); } - fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T { + pub fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T { if (n == 0) { self.free(old_mem); return old_mem[0..0]; @@ -123,10 +127,10 @@ pub const Allocator = struct { return ([]align(alignment) T)(@alignCast(alignment, byte_slice)); } - fn free(self: *Allocator, memory: var) void { + pub fn free(self: *Allocator, memory: var) void { const bytes = ([]const u8)(memory); if (bytes.len == 0) return; - const non_const_ptr = @intToPtr(*u8, @ptrToInt(bytes.ptr)); + const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); self.freeFn(self, non_const_ptr[0..bytes.len]); } }; diff --git a/std/net.zig b/std/net.zig index bfe4b1c2a0..f21611ff91 100644 --- a/std/net.zig +++ b/std/net.zig @@ -68,7 +68,7 @@ pub const Address = struct { pub fn parseIp4(buf: []const u8) !u32 { var result: u32 = undefined; - const out_ptr = ([]u8)((&result)[0..1]); + const out_ptr = ([]u8)((*[1]u32)(&result)[0..]); var x: u8 = 0; var index: u8 = 0; diff --git a/std/os/index.zig b/std/os/index.zig index 7e908af9eb..6023929b04 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -1240,7 +1240,7 @@ pub const Dir = struct { const next_index = self.index + darwin_entry.d_reclen; self.index = next_index; - const name = (&darwin_entry.d_name)[0..darwin_entry.d_namlen]; + const name = @ptrCast([*]u8, &darwin_entry.d_name)[0..darwin_entry.d_namlen]; // skip . and .. entries if (mem.eql(u8, name, ".") or mem.eql(u8, name, "..")) { @@ -1704,7 +1704,7 @@ pub fn argsFree(allocator: *mem.Allocator, args_alloc: []const []u8) void { for (args_alloc) |arg| { total_bytes += @sizeOf([]u8) + arg.len; } - const unaligned_allocated_buf = @ptrCast(*const u8, args_alloc.ptr)[0..total_bytes]; + const unaligned_allocated_buf = @ptrCast([*]const u8, args_alloc.ptr)[0..total_bytes]; const aligned_allocated_buf = @alignCast(@alignOf([]u8), unaligned_allocated_buf); return allocator.free(aligned_allocated_buf); } diff --git a/test/cases/align.zig b/test/cases/align.zig index b80727258e..682c185e86 100644 --- a/test/cases/align.zig +++ b/test/cases/align.zig @@ -6,7 +6,7 @@ var foo: u8 align(4) = 100; test "global variable alignment" { assert(@typeOf(&foo).alignment == 4); assert(@typeOf(&foo) == *align(4) u8); - const slice = (&foo)[0..1]; + const slice = (*[1]u8)(&foo)[0..]; assert(@typeOf(slice) == []align(4) u8); } @@ -60,7 +60,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { test "implicitly decreasing slice alignment" { const a: u32 align(4) = 3; const b: u32 align(8) = 4; - assert(addUnalignedSlice((&a)[0..1], (&b)[0..1]) == 7); + assert(addUnalignedSlice((*[1]u32)(&a)[0..], (*[1]u32)(&b)[0..]) == 7); } fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { return a[0] + b[0]; diff --git a/test/cases/array.zig b/test/cases/array.zig index 9a405216d8..ef919b27bd 100644 --- a/test/cases/array.zig +++ b/test/cases/array.zig @@ -115,3 +115,32 @@ test "array len property" { var x: [5]i32 = undefined; assert(@typeOf(x).len == 5); } + +test "single-item pointer to array indexing and slicing" { + testSingleItemPtrArrayIndexSlice(); + comptime testSingleItemPtrArrayIndexSlice(); +} + +fn testSingleItemPtrArrayIndexSlice() void { + var array = "aaaa"; + doSomeMangling(&array); + assert(mem.eql(u8, "azya", array)); +} + +fn doSomeMangling(array: *[4]u8) void { + array[1] = 'z'; + array[2..3][0] = 'y'; +} + +test "implicit cast single-item pointer" { + testImplicitCastSingleItemPtr(); + comptime testImplicitCastSingleItemPtr(); +} + +fn testImplicitCastSingleItemPtr() void { + var byte: u8 = 100; + const slice = (*[1]u8)(&byte)[0..]; + slice[0] += 1; + assert(byte == 101); +} + diff --git a/test/cases/eval.zig b/test/cases/eval.zig index b6d6a4f37b..461408afea 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -418,9 +418,9 @@ test "string literal used as comptime slice is memoized" { } test "comptime slice of undefined pointer of length 0" { - const slice1 = (*i32)(undefined)[0..0]; + const slice1 = ([*]i32)(undefined)[0..0]; assert(slice1.len == 0); - const slice2 = (*i32)(undefined)[100..100]; + const slice2 = ([*]i32)(undefined)[100..100]; assert(slice2.len == 0); } @@ -508,7 +508,7 @@ test "comptime slice of slice preserves comptime var" { test "comptime slice of pointer preserves comptime var" { comptime { var buff: [10]u8 = undefined; - var a = &buff[0]; + var a = buff[0..].ptr; a[0..1][0] = 1; assert(buff[0..][0..][0] == 1); } diff --git a/test/cases/misc.zig b/test/cases/misc.zig index 5899f20f9c..e007ec4c46 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -274,7 +274,7 @@ test "generic malloc free" { } var some_mem: [100]u8 = undefined; fn memAlloc(comptime T: type, n: usize) error![]T { - return @ptrCast(*T, &some_mem[0])[0..n]; + return @ptrCast([*]T, &some_mem[0])[0..n]; } fn memFree(comptime T: type, memory: []T) void {} @@ -588,7 +588,7 @@ var global_ptr = &gdt[0]; // can't really run this test but we can make sure it has no compile error // and generates code -const vram = @intToPtr(*volatile u8, 0x20000000)[0..0x8000]; +const vram = @intToPtr([*]volatile u8, 0x20000000)[0..0x8000]; export fn writeToVRam() void { vram[0] = 'X'; } diff --git a/test/cases/slice.zig b/test/cases/slice.zig index 24e5239e2d..b4b43bdd19 100644 --- a/test/cases/slice.zig +++ b/test/cases/slice.zig @@ -1,7 +1,7 @@ const assert = @import("std").debug.assert; const mem = @import("std").mem; -const x = @intToPtr(*i32, 0x1000)[0..0x500]; +const x = @intToPtr([*]i32, 0x1000)[0..0x500]; const y = x[0x100..]; test "compile time slice of pointer to hard coded address" { assert(@ptrToInt(x.ptr) == 0x1000); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 7e9ef82e42..17136e150f 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,13 +1,22 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "slicing single-item pointer", + \\export fn entry(ptr: *i32) void { + \\ const slice = ptr[0..2]; + \\} + , + ".tmp_source.zig:2:22: error: slice of single-item pointer", + ); + cases.add( "indexing single-item pointer", \\export fn entry(ptr: *i32) i32 { \\ return ptr[1]; \\} , - ".tmp_source.zig:2:15: error: indexing not allowed on pointer to single item", + ".tmp_source.zig:2:15: error: index of single-item pointer", ); cases.add( @@ -144,10 +153,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { cases.add( "comptime slice of undefined pointer non-zero len", \\export fn entry() void { - \\ const slice = (*i32)(undefined)[0..1]; + \\ const slice = ([*]i32)(undefined)[0..1]; \\} , - ".tmp_source.zig:2:36: error: non-zero length slice of undefined pointer", + ".tmp_source.zig:2:38: error: non-zero length slice of undefined pointer", ); cases.add( @@ -3129,14 +3138,16 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\export fn entry() void { \\ var foo = Foo { .a = 1, .b = 10 }; \\ foo.b += 1; - \\ bar((&foo.b)[0..1]); + \\ bar((*[1]u32)(&foo.b)[0..]); \\} \\ \\fn bar(x: []u32) void { \\ x[0] += 1; \\} , - ".tmp_source.zig:9:17: error: expected type '[]u32', found '[]align(1) u32'", + ".tmp_source.zig:9:18: error: cast increases pointer alignment", + ".tmp_source.zig:9:23: note: '*align(1) u32' has alignment 1", + ".tmp_source.zig:9:18: note: '*[1]u32' has alignment 4", ); cases.add( -- cgit v1.2.3 From 02cb220faf0d527b656a3a87ec96e6738770c8e6 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Tue, 5 Jun 2018 11:14:43 +0200 Subject: Renamed "(int/float literal)" to "comptime_int/float" --- doc/langref.html.in | 18 ++-- src/all_types.hpp | 4 +- src/analyze.cpp | 94 ++++++++++---------- src/codegen.cpp | 38 ++++---- src/ir.cpp | 230 ++++++++++++++++++++++++------------------------ std/math/ln.zig | 4 +- std/math/log.zig | 6 +- std/math/log10.zig | 4 +- std/math/log2.zig | 4 +- std/math/sqrt.zig | 4 +- test/cases/math.zig | 24 +++-- test/cases/misc.zig | 4 +- test/compile_errors.zig | 12 +-- 13 files changed, 230 insertions(+), 216 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 28fdf4d8b9..0689baa6f9 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -4893,8 +4893,8 @@ pub const TypeId = enum { Pointer, Array, Struct, - FloatLiteral, - IntLiteral, + ComptimeFloat, + ComptimeInt, UndefinedLiteral, NullLiteral, Nullable, @@ -4927,8 +4927,8 @@ pub const TypeInfo = union(TypeId) { Pointer: Pointer, Array: Array, Struct: Struct, - FloatLiteral: void, - IntLiteral: void, + ComptimeFloat: void, + ComptimeInt: void, UndefinedLiteral: void, NullLiteral: void, Nullable: Nullable, @@ -5685,8 +5685,8 @@ pub const TypeId = enum { Pointer, Array, Struct, - FloatLiteral, - IntLiteral, + ComptimeFloat, + ComptimeInt, UndefinedLiteral, NullLiteral, Nullable, @@ -5713,10 +5713,10 @@ pub const TypeInfo = union(TypeId) { Pointer: Pointer, Array: Array, Struct: Struct, - FloatLiteral: void, - IntLiteral: void, + ComptimeFloat: void, + ComptimeInt: void, UndefinedLiteral: void, - NullLiteral: void, + Null: void, Nullable: Nullable, ErrorUnion: ErrorUnion, ErrorSet: ErrorSet, diff --git a/src/all_types.hpp b/src/all_types.hpp index d237eb00bb..bf635eae7c 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1159,8 +1159,8 @@ enum TypeTableEntryId { TypeTableEntryIdPointer, TypeTableEntryIdArray, TypeTableEntryIdStruct, - TypeTableEntryIdNumLitFloat, - TypeTableEntryIdNumLitInt, + TypeTableEntryIdComptimeFloat, + TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefLit, TypeTableEntryIdNullLit, TypeTableEntryIdMaybe, diff --git a/src/analyze.cpp b/src/analyze.cpp index 31c0726459..21841d45b6 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -232,8 +232,8 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -268,8 +268,8 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -1333,8 +1333,8 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { zig_unreachable(); case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdErrorUnion: @@ -1374,8 +1374,8 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdInvalid: zig_unreachable(); case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdErrorUnion: @@ -1518,8 +1518,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c add_node_error(g, param_node->data.param_decl.type, buf_sprintf("parameter of type '%s' not allowed", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -1607,8 +1607,8 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c buf_sprintf("return type '%s' not allowed", buf_ptr(&fn_type_id.return_type->name))); return g->builtin_types.entry_invalid; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -3337,8 +3337,6 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdInvalid: return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdBlock: @@ -3347,6 +3345,8 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt add_node_error(g, source_node, buf_sprintf("variable of type '%s' not allowed", buf_ptr(&type_entry->name))); return g->builtin_types.entry_invalid; + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdNamespace: case TypeTableEntryIdMetaType: case TypeTableEntryIdVoid: @@ -3480,8 +3480,8 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var) { add_node_error(g, source_node, buf_sprintf("variable initialization is unreachable")); implicit_type = g->builtin_types.entry_invalid; } else if ((!is_const || linkage == VarLinkageExternal) && - (implicit_type->id == TypeTableEntryIdNumLitFloat || - implicit_type->id == TypeTableEntryIdNumLitInt)) + (implicit_type->id == TypeTableEntryIdComptimeFloat || + implicit_type->id == TypeTableEntryIdComptimeInt)) { add_node_error(g, source_node, buf_sprintf("unable to infer variable type")); implicit_type = g->builtin_types.entry_invalid; @@ -3730,8 +3730,8 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -3779,8 +3779,8 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -4283,8 +4283,8 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { switch (type_entry->id) { case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -4568,7 +4568,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case TypeTableEntryIdVoid: return (uint32_t)4149439618; case TypeTableEntryIdInt: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: { uint32_t result = 1331471175; for (size_t i = 0; i < const_val->data.x_bigint.digit_count; i += 1) { @@ -4609,7 +4609,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: { float128_t f128 = bigfloat_to_f128(&const_val->data.x_bigfloat); uint32_t ints[4]; @@ -4754,8 +4754,8 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -4819,8 +4819,8 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -4930,8 +4930,8 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdInvalid: case TypeTableEntryIdOpaque: zig_unreachable(); - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMetaType: @@ -5070,7 +5070,7 @@ ConstExprValue *create_const_signed(TypeTableEntry *type, int64_t x) { void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double value) { const_val->special = ConstValSpecialStatic; const_val->type = type; - if (type->id == TypeTableEntryIdNumLitFloat) { + if (type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_64(&const_val->data.x_bigfloat, value); } else if (type->id == TypeTableEntryIdFloat) { switch (type->data.floating.bit_count) { @@ -5350,10 +5350,10 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: return bigfloat_cmp(&a->data.x_bigfloat, &b->data.x_bigfloat) == CmpEQ; case TypeTableEntryIdInt: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ; case TypeTableEntryIdPointer: case TypeTableEntryIdFn: @@ -5514,7 +5514,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { case TypeTableEntryIdVoid: buf_appendf(buf, "{}"); return; - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: bigfloat_append_buf(buf, &const_val->data.x_bigfloat); return; case TypeTableEntryIdFloat: @@ -5542,7 +5542,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { default: zig_unreachable(); } - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdInt: bigint_append_buf(buf, &const_val->data.x_bigint, 10); return; @@ -5761,8 +5761,8 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdFloat: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -5807,8 +5807,8 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdFloat: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -5929,8 +5929,8 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdPointer, TypeTableEntryIdArray, TypeTableEntryIdStruct, - TypeTableEntryIdNumLitFloat, - TypeTableEntryIdNumLitInt, + TypeTableEntryIdComptimeFloat, + TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefLit, TypeTableEntryIdNullLit, TypeTableEntryIdMaybe, @@ -5980,9 +5980,9 @@ size_t type_id_index(TypeTableEntry *entry) { if (entry->data.structure.is_slice) return 25; return 8; - case TypeTableEntryIdNumLitFloat: + case TypeTableEntryIdComptimeFloat: return 9; - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeInt: return 10; case TypeTableEntryIdUndefLit: return 11; @@ -6038,10 +6038,10 @@ const char *type_id_name(TypeTableEntryId id) { return "Array"; case TypeTableEntryIdStruct: return "Struct"; - case TypeTableEntryIdNumLitFloat: - return "FloatLiteral"; - case TypeTableEntryIdNumLitInt: - return "IntLiteral"; + case TypeTableEntryIdComptimeFloat: + return "ComptimeFloat"; + case TypeTableEntryIdComptimeInt: + return "ComptimeInt"; case TypeTableEntryIdUndefLit: return "UndefinedLiteral"; case TypeTableEntryIdNullLit: diff --git a/src/codegen.cpp b/src/codegen.cpp index 49c93feaa5..dc915e766d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4916,8 +4916,8 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdErrorUnion: @@ -5362,8 +5362,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -5604,7 +5604,7 @@ static void do_code_gen(CodeGen *g) { TldVar *tld_var = g->global_vars.at(i); VariableTableEntry *var = tld_var->var; - if (var->value->type->id == TypeTableEntryIdNumLitFloat) { + if (var->value->type->id == TypeTableEntryIdComptimeFloat) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->value; assert(const_val->special != ConstValSpecialRuntime); @@ -5618,7 +5618,7 @@ static void do_code_gen(CodeGen *g) { continue; } - if (var->value->type->id == TypeTableEntryIdNumLitInt) { + if (var->value->type->id == TypeTableEntryIdComptimeInt) { // Generate debug info for it but that's it. ConstExprValue *const_val = var->value; assert(const_val->special != ConstValSpecialRuntime); @@ -6012,16 +6012,18 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_block = entry; } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitFloat); - buf_init_from_str(&entry->name, "(float literal)"); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeFloat); + buf_init_from_str(&entry->name, "comptime_float"); entry->zero_bits = true; g->builtin_types.entry_num_lit_float = entry; + g->primitive_type_table.put(&entry->name, entry); } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNumLitInt); - buf_init_from_str(&entry->name, "(integer literal)"); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdComptimeInt); + buf_init_from_str(&entry->name, "comptime_int"); entry->zero_bits = true; g->builtin_types.entry_num_lit_int = entry; + g->primitive_type_table.put(&entry->name, entry); } { TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefLit); @@ -6495,8 +6497,8 @@ static void define_builtin_compile_vars(CodeGen *g) { " Slice: Slice,\n" " Array: Array,\n" " Struct: Struct,\n" - " FloatLiteral: void,\n" - " IntLiteral: void,\n" + " ComptimeFloat: void,\n" + " ComptimeInt: void,\n" " UndefinedLiteral: void,\n" " NullLiteral: void,\n" " Nullable: Nullable,\n" @@ -7070,8 +7072,8 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry switch (type_entry->id) { case TypeTableEntryIdInvalid: case TypeTableEntryIdMetaType: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -7255,8 +7257,8 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf case TypeTableEntryIdBoundFn: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdArgTuple: @@ -7407,8 +7409,8 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdInt: case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdArray: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: diff --git a/src/ir.cpp b/src/ir.cpp index 5cea04ea55..2819ef5b0e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6945,14 +6945,14 @@ static bool ir_emit_global_runtime_side_effect(IrAnalyze *ira, IrInstruction *so } static bool const_val_fits_in_num_lit(ConstExprValue *const_val, TypeTableEntry *num_lit_type) { - return ((num_lit_type->id == TypeTableEntryIdNumLitFloat && - (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdNumLitFloat)) || - (num_lit_type->id == TypeTableEntryIdNumLitInt && - (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdNumLitInt))); + return ((num_lit_type->id == TypeTableEntryIdComptimeFloat && + (const_val->type->id == TypeTableEntryIdFloat || const_val->type->id == TypeTableEntryIdComptimeFloat)) || + (num_lit_type->id == TypeTableEntryIdComptimeInt && + (const_val->type->id == TypeTableEntryIdInt || const_val->type->id == TypeTableEntryIdComptimeInt))); } static bool float_has_fraction(ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_has_fraction(&const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { @@ -6975,7 +6975,7 @@ static bool float_has_fraction(ConstExprValue *const_val) { } static void float_append_buf(Buf *buf, ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_append_buf(buf, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { @@ -7010,7 +7010,7 @@ static void float_append_buf(Buf *buf, ConstExprValue *const_val) { } static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) { - if (const_val->type->id == TypeTableEntryIdNumLitFloat) { + if (const_val->type->id == TypeTableEntryIdComptimeFloat) { bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { @@ -7046,7 +7046,7 @@ static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) { } static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { @@ -7068,7 +7068,7 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { } static void float_init_f32(ConstExprValue *dest_val, float x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_32(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { @@ -7094,7 +7094,7 @@ static void float_init_f32(ConstExprValue *dest_val, float x) { } static void float_init_f64(ConstExprValue *dest_val, double x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_64(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { @@ -7120,7 +7120,7 @@ static void float_init_f64(ConstExprValue *dest_val, double x) { } static void float_init_f128(ConstExprValue *dest_val, float128_t x) { - if (dest_val->type->id == TypeTableEntryIdNumLitFloat) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_128(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { @@ -7150,7 +7150,7 @@ static void float_init_f128(ConstExprValue *dest_val, float128_t x) { } static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) { - if (src_val->type->id == TypeTableEntryIdNumLitFloat) { + if (src_val->type->id == TypeTableEntryIdComptimeFloat) { float_init_bigfloat(dest_val, &src_val->data.x_bigfloat); } else if (src_val->type->id == TypeTableEntryIdFloat) { switch (src_val->type->data.floating.bit_count) { @@ -7173,7 +7173,7 @@ static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7210,7 +7210,7 @@ static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) { } static Cmp float_cmp_zero(ConstExprValue *op) { - if (op->type->id == TypeTableEntryIdNumLitFloat) { + if (op->type->id == TypeTableEntryIdComptimeFloat) { return bigfloat_cmp_zero(&op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { @@ -7251,7 +7251,7 @@ static Cmp float_cmp_zero(ConstExprValue *op) { static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7275,7 +7275,7 @@ static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7299,7 +7299,7 @@ static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7323,7 +7323,7 @@ static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7347,7 +7347,7 @@ static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7382,7 +7382,7 @@ static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstE static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7407,7 +7407,7 @@ static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstE static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7431,7 +7431,7 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprValue *op2) { assert(op1->type == op2->type); out_val->type = op1->type; - if (op1->type->id == TypeTableEntryIdNumLitFloat) { + if (op1->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { @@ -7456,7 +7456,7 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal static void float_negate(ConstExprValue *out_val, ConstExprValue *op) { out_val->type = op->type; - if (op->type->id == TypeTableEntryIdNumLitFloat) { + if (op->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { @@ -7530,9 +7530,9 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc assert(const_val->special != ConstValSpecialRuntime); bool const_val_is_int = (const_val->type->id == TypeTableEntryIdInt || - const_val->type->id == TypeTableEntryIdNumLitInt); + const_val->type->id == TypeTableEntryIdComptimeInt); bool const_val_is_float = (const_val->type->id == TypeTableEntryIdFloat || - const_val->type->id == TypeTableEntryIdNumLitFloat); + const_val->type->id == TypeTableEntryIdComptimeFloat); if (other_type->id == TypeTableEntryIdFloat) { return true; } else if (other_type->id == TypeTableEntryIdInt && const_val_is_int) { @@ -7576,7 +7576,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc return true; } } - if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdNumLitInt) && + if (explicit_cast && (other_type->id == TypeTableEntryIdInt || other_type->id == TypeTableEntryIdComptimeInt) && const_val_is_float) { if (float_has_fraction(const_val)) { @@ -7589,7 +7589,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc buf_ptr(&other_type->name))); return false; } else { - if (other_type->id == TypeTableEntryIdNumLitInt) { + if (other_type->id == TypeTableEntryIdComptimeInt) { return true; } else { BigInt bigint; @@ -8078,8 +8078,8 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, // implicit number literal to typed number // implicit number literal to &const integer - if (actual_type->id == TypeTableEntryIdNumLitFloat || - actual_type->id == TypeTableEntryIdNumLitInt) + if (actual_type->id == TypeTableEntryIdComptimeFloat || + actual_type->id == TypeTableEntryIdComptimeInt) { if (expected_type->id == TypeTableEntryIdPointer && expected_type->data.pointer.is_const) @@ -8099,9 +8099,9 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, // implicit typed number to integer or float literal. // works when the number is known if (value->value.special == ConstValSpecialStatic) { - if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdNumLitInt) { + if (actual_type->id == TypeTableEntryIdInt && expected_type->id == TypeTableEntryIdComptimeInt) { return ImplicitCastMatchResultYes; - } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdNumLitFloat) { + } else if (actual_type->id == TypeTableEntryIdFloat && expected_type->id == TypeTableEntryIdComptimeFloat) { return ImplicitCastMatchResultYes; } } @@ -8555,8 +8555,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdNumLitInt || - prev_type->id == TypeTableEntryIdNumLitFloat) + if (prev_type->id == TypeTableEntryIdComptimeInt || + prev_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, prev_inst, cur_type, false)) { prev_inst = cur_inst; @@ -8566,8 +8566,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - if (cur_type->id == TypeTableEntryIdNumLitInt || - cur_type->id == TypeTableEntryIdNumLitFloat) + if (cur_type->id == TypeTableEntryIdComptimeInt || + cur_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, cur_inst, prev_type, false)) { continue; @@ -8671,8 +8671,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } else if (expected_type != nullptr && expected_type->id == TypeTableEntryIdErrorUnion) { return get_error_union_type(ira->codegen, err_set_type, expected_type->data.error_union.payload_type); } else { - if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || - prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) + if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt || + prev_inst->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of number literal")); @@ -8686,8 +8686,8 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNullLit) { - if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || - prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) + if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt || + prev_inst->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make maybe out of number literal")); @@ -8743,7 +8743,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, break; } case CastOpNumLitToConcrete: - if (other_val->type->id == TypeTableEntryIdNumLitFloat) { + if (other_val->type->id == TypeTableEntryIdComptimeFloat) { assert(new_type->id == TypeTableEntryIdFloat); switch (new_type->data.floating.bit_count) { case 32: @@ -8758,7 +8758,7 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, default: zig_unreachable(); } - } else if (other_val->type->id == TypeTableEntryIdNumLitInt) { + } else if (other_val->type->id == TypeTableEntryIdComptimeInt) { bigint_init_bigint(&const_val->data.x_bigint, &other_val->data.x_bigint); } else { zig_unreachable(); @@ -9601,9 +9601,9 @@ static IrInstruction *ir_analyze_number_to_literal(IrAnalyze *ira, IrInstruction IrInstruction *result = ir_create_const(&ira->new_irb, source_instr->scope, source_instr->source_node, wanted_type); - if (wanted_type->id == TypeTableEntryIdNumLitFloat) { + if (wanted_type->id == TypeTableEntryIdComptimeFloat) { float_init_float(&result->value, val); - } else if (wanted_type->id == TypeTableEntryIdNumLitInt) { + } else if (wanted_type->id == TypeTableEntryIdComptimeInt) { bigint_init_bigint(&result->value.data.x_bigint, &val->data.x_bigint); } else { zig_unreachable(); @@ -9978,8 +9978,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); - } else if (actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + } else if (actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_child_type, true)) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); @@ -10013,8 +10013,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst if (wanted_type->id == TypeTableEntryIdErrorUnion) { if (types_match_const_cast_only(ira, wanted_type->data.error_union.payload_type, actual_type, source_node).id == ConstCastResultIdOk) { return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); - } else if (actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + } else if (actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { if (ir_num_lit_fits_in_other_type(ira, value, wanted_type->data.error_union.payload_type, true)) { return ir_analyze_err_wrap_payload(ira, source_instr, value, wanted_type); @@ -10062,8 +10062,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk || actual_type->id == TypeTableEntryIdNullLit || - actual_type->id == TypeTableEntryIdNumLitInt || - actual_type->id == TypeTableEntryIdNumLitFloat) + actual_type->id == TypeTableEntryIdComptimeInt || + actual_type->id == TypeTableEntryIdComptimeFloat) { IrInstruction *cast1 = ir_analyze_cast(ira, source_instr, wanted_type->data.error_union.payload_type, value); if (type_is_invalid(cast1->value.type)) @@ -10079,8 +10079,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from number literal to another type // explicit cast from number literal to &const integer - if (actual_type->id == TypeTableEntryIdNumLitFloat || - actual_type->id == TypeTableEntryIdNumLitInt) + if (actual_type->id == TypeTableEntryIdComptimeFloat || + actual_type->id == TypeTableEntryIdComptimeInt) { ensure_complete_type(ira->codegen, wanted_type); if (type_is_invalid(wanted_type)) @@ -10109,9 +10109,9 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst return cast2; } else if (ir_num_lit_fits_in_other_type(ira, value, wanted_type, true)) { CastOp op; - if ((actual_type->id == TypeTableEntryIdNumLitFloat && + if ((actual_type->id == TypeTableEntryIdComptimeFloat && wanted_type->id == TypeTableEntryIdFloat) || - (actual_type->id == TypeTableEntryIdNumLitInt && + (actual_type->id == TypeTableEntryIdComptimeInt && wanted_type->id == TypeTableEntryIdInt)) { op = CastOpNumLitToConcrete; @@ -10131,8 +10131,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from typed number to integer or float literal. // works when the number is known at compile time if (instr_is_comptime(value) && - ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdNumLitInt) || - (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdNumLitFloat))) + ((actual_type->id == TypeTableEntryIdInt && wanted_type->id == TypeTableEntryIdComptimeInt) || + (actual_type->id == TypeTableEntryIdFloat && wanted_type->id == TypeTableEntryIdComptimeFloat))) { return ir_analyze_number_to_literal(ira, source_instr, value, wanted_type); } @@ -10759,8 +10759,8 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdInvalid: zig_unreachable(); // handled above - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: break; @@ -10818,10 +10818,10 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp bool one_possible_value = !type_requires_comptime(resolved_type) && !type_has_bits(resolved_type); if (one_possible_value || (value_is_comptime(op1_val) && value_is_comptime(op2_val))) { bool answer; - if (resolved_type->id == TypeTableEntryIdNumLitFloat || resolved_type->id == TypeTableEntryIdFloat) { + if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdFloat) { Cmp cmp_result = float_cmp(op1_val, op2_val); answer = resolve_cmp_op_id(op_id, cmp_result); - } else if (resolved_type->id == TypeTableEntryIdNumLitInt || resolved_type->id == TypeTableEntryIdInt) { + } else if (resolved_type->id == TypeTableEntryIdComptimeInt || resolved_type->id == TypeTableEntryIdInt) { Cmp cmp_result = bigint_cmp(&op1_val->data.x_bigint, &op2_val->data.x_bigint); answer = resolve_cmp_op_id(op_id, cmp_result); } else { @@ -10885,12 +10885,12 @@ static int ir_eval_math_op(TypeTableEntry *type_entry, ConstExprValue *op1_val, bool is_int; bool is_float; Cmp op2_zcmp; - if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdNumLitInt) { + if (type_entry->id == TypeTableEntryIdInt || type_entry->id == TypeTableEntryIdComptimeInt) { is_int = true; is_float = false; op2_zcmp = bigint_cmp_zero(&op2_val->data.x_bigint); } else if (type_entry->id == TypeTableEntryIdFloat || - type_entry->id == TypeTableEntryIdNumLitFloat) + type_entry->id == TypeTableEntryIdComptimeFloat) { is_int = false; is_float = true; @@ -11064,7 +11064,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * if (type_is_invalid(op1->value.type)) return ira->codegen->builtin_types.entry_invalid; - if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdNumLitInt) { + if (op1->value.type->id != TypeTableEntryIdInt && op1->value.type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, &bin_op_instruction->base, buf_sprintf("bit shifting operation expected integer type, found '%s'", buf_ptr(&op1->value.type->name))); @@ -11077,7 +11077,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * IrInstruction *casted_op2; IrBinOp op_id = bin_op_instruction->op_id; - if (op1->value.type->id == TypeTableEntryIdNumLitInt) { + if (op1->value.type->id == TypeTableEntryIdComptimeInt) { casted_op2 = op2; if (op_id == IrBinOpBitShiftLeftLossy) { @@ -11122,7 +11122,7 @@ static TypeTableEntry *ir_analyze_bit_shift(IrAnalyze *ira, IrInstructionBinOp * ir_num_lit_fits_in_other_type(ira, result_instruction, op1->value.type, false); return op1->value.type; - } else if (op1->value.type->id == TypeTableEntryIdNumLitInt) { + } else if (op1->value.type->id == TypeTableEntryIdComptimeInt) { ir_add_error(ira, &bin_op_instruction->base, buf_sprintf("LHS of shift must be an integer type, or RHS must be compile-time known")); return ira->codegen->builtin_types.entry_invalid; @@ -11158,15 +11158,15 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp if (type_is_invalid(resolved_type)) return resolved_type; - bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdNumLitInt; - bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdNumLitFloat; + bool is_int = resolved_type->id == TypeTableEntryIdInt || resolved_type->id == TypeTableEntryIdComptimeInt; + bool is_float = resolved_type->id == TypeTableEntryIdFloat || resolved_type->id == TypeTableEntryIdComptimeFloat; bool is_signed_div = ( (resolved_type->id == TypeTableEntryIdInt && resolved_type->data.integral.is_signed) || resolved_type->id == TypeTableEntryIdFloat || - (resolved_type->id == TypeTableEntryIdNumLitFloat && + (resolved_type->id == TypeTableEntryIdComptimeFloat && ((bigfloat_cmp_zero(&op1->value.data.x_bigfloat) != CmpGT) != (bigfloat_cmp_zero(&op2->value.data.x_bigfloat) != CmpGT))) || - (resolved_type->id == TypeTableEntryIdNumLitInt && + (resolved_type->id == TypeTableEntryIdComptimeInt && ((bigint_cmp_zero(&op1->value.data.x_bigint) != CmpGT) != (bigint_cmp_zero(&op2->value.data.x_bigint) != CmpGT))) ); @@ -11267,7 +11267,7 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp return ira->codegen->builtin_types.entry_invalid; } - if (resolved_type->id == TypeTableEntryIdNumLitInt) { + if (resolved_type->id == TypeTableEntryIdComptimeInt) { if (op_id == IrBinOpAddWrap) { op_id = IrBinOpAdd; } else if (op_id == IrBinOpSubWrap) { @@ -11641,8 +11641,8 @@ static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { case TypeTableEntryIdFn: case TypeTableEntryIdPromise: return VarClassRequiredAny; - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdBlock: case TypeTableEntryIdNullLit: @@ -11910,8 +11910,8 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdMetaType: case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -11934,8 +11934,8 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdFloat: case TypeTableEntryIdPointer: case TypeTableEntryIdArray: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -12149,7 +12149,7 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod } bool comptime_arg = param_decl_node->data.param_decl.is_inline || - casted_arg->value.type->id == TypeTableEntryIdNumLitInt || casted_arg->value.type->id == TypeTableEntryIdNumLitFloat; + casted_arg->value.type->id == TypeTableEntryIdComptimeInt || casted_arg->value.type->id == TypeTableEntryIdComptimeFloat; ConstExprValue *arg_val; @@ -12174,8 +12174,8 @@ static bool ir_analyze_fn_call_generic_arg(IrAnalyze *ira, AstNode *fn_proto_nod var->shadowable = !comptime_arg; *next_proto_i += 1; - } else if (casted_arg->value.type->id == TypeTableEntryIdNumLitInt || - casted_arg->value.type->id == TypeTableEntryIdNumLitFloat) + } else if (casted_arg->value.type->id == TypeTableEntryIdComptimeInt || + casted_arg->value.type->id == TypeTableEntryIdComptimeFloat) { ir_add_error(ira, casted_arg, buf_sprintf("compiler bug: integer and float literals in var args function must be casted. https://github.com/ziglang/zig/issues/557")); @@ -12898,8 +12898,8 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -12935,10 +12935,10 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un bool is_wrap_op = (un_op_instruction->op_id == IrUnOpNegationWrap); - bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdNumLitFloat); + bool is_float = (expr_type->id == TypeTableEntryIdFloat || expr_type->id == TypeTableEntryIdComptimeFloat); if ((expr_type->id == TypeTableEntryIdInt && expr_type->data.integral.is_signed) || - expr_type->id == TypeTableEntryIdNumLitInt || (is_float && !is_wrap_op)) + expr_type->id == TypeTableEntryIdComptimeInt || (is_float && !is_wrap_op)) { if (instr_is_comptime(value)) { ConstExprValue *target_const_val = ir_resolve_const(ira, value, UndefBad); @@ -12954,7 +12954,7 @@ static TypeTableEntry *ir_analyze_negation(IrAnalyze *ira, IrInstructionUnOp *un } else { bigint_negate(&out_val->data.x_bigint, &target_const_val->data.x_bigint); } - if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdNumLitInt) { + if (is_wrap_op || is_float || expr_type->id == TypeTableEntryIdComptimeInt) { return expr_type; } @@ -13150,8 +13150,8 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP if (type_is_invalid(resolved_type)) return resolved_type; - if (resolved_type->id == TypeTableEntryIdNumLitFloat || - resolved_type->id == TypeTableEntryIdNumLitInt || + if (resolved_type->id == TypeTableEntryIdComptimeFloat || + resolved_type->id == TypeTableEntryIdComptimeInt || resolved_type->id == TypeTableEntryIdNullLit || resolved_type->id == TypeTableEntryIdUndefLit) { @@ -14213,8 +14213,8 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi switch (type_entry->id) { case TypeTableEntryIdInvalid: zig_unreachable(); // handled above - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -14495,8 +14495,8 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -14603,8 +14603,8 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -14659,8 +14659,8 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdBlock: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdBoundFn: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: @@ -15020,8 +15020,8 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdBool: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdPointer: case TypeTableEntryIdPromise: case TypeTableEntryIdFn: @@ -15618,8 +15618,8 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdMaybe: @@ -16280,8 +16280,8 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t case TypeTableEntryIdVoid: case TypeTableEntryIdBool: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -17143,7 +17143,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; if (dest_type->id != TypeTableEntryIdInt && - dest_type->id != TypeTableEntryIdNumLitInt) + dest_type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, dest_type_value, buf_sprintf("expected integer type, found '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -17155,7 +17155,7 @@ static TypeTableEntry *ir_analyze_instruction_truncate(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; if (src_type->id != TypeTableEntryIdInt && - src_type->id != TypeTableEntryIdNumLitInt) + src_type->id != TypeTableEntryIdComptimeInt) { ir_add_error(ira, target, buf_sprintf("expected integer type, found '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -17876,8 +17876,8 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc zig_unreachable(); case TypeTableEntryIdMetaType: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdNamespace: @@ -18377,8 +18377,8 @@ static TypeTableEntry *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira if (!end_val) return ira->codegen->builtin_types.entry_invalid; - assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdNumLitInt); - assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdNumLitInt); + assert(start_val->type->id == TypeTableEntryIdInt || start_val->type->id == TypeTableEntryIdComptimeInt); + assert(end_val->type->id == TypeTableEntryIdInt || end_val->type->id == TypeTableEntryIdComptimeInt); AstNode *prev_node = rangeset_add_range(&rs, &start_val->data.x_bigint, &end_val->data.x_bigint, start_value->source_node); if (prev_node != nullptr) { @@ -18610,8 +18610,8 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdPromise: @@ -18677,8 +18677,8 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: case TypeTableEntryIdPromise: @@ -18758,8 +18758,8 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: ir_add_error(ira, dest_type_value, @@ -18784,8 +18784,8 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdNumLitFloat: - case TypeTableEntryIdNumLitInt: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdNullLit: ir_add_error(ira, dest_type_value, @@ -19560,7 +19560,7 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction if (type_is_invalid(op->value.type)) return ira->codegen->builtin_types.entry_invalid; - bool ok_type = float_type->id == TypeTableEntryIdNumLitFloat || float_type->id == TypeTableEntryIdFloat; + bool ok_type = float_type->id == TypeTableEntryIdComptimeFloat || float_type->id == TypeTableEntryIdFloat; if (!ok_type) { ir_add_error(ira, instruction->type, buf_sprintf("@sqrt does not support type '%s'", buf_ptr(&float_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -19577,7 +19577,7 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - if (float_type->id == TypeTableEntryIdNumLitFloat) { + if (float_type->id == TypeTableEntryIdComptimeFloat) { bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat); } else if (float_type->id == TypeTableEntryIdFloat) { switch (float_type->data.floating.bit_count) { diff --git a/std/math/ln.zig b/std/math/ln.zig index 263e5955cb..3fd75977b9 100644 --- a/std/math/ln.zig +++ b/std/math/ln.zig @@ -14,7 +14,7 @@ const TypeId = builtin.TypeId; pub fn ln(x: var) @typeOf(x) { const T = @typeOf(x); switch (@typeId(T)) { - TypeId.FloatLiteral => { + TypeId.ComptimeFloat => { return @typeOf(1.0)(ln_64(x)); }, TypeId.Float => { @@ -24,7 +24,7 @@ pub fn ln(x: var) @typeOf(x) { else => @compileError("ln not implemented for " ++ @typeName(T)), }; }, - TypeId.IntLiteral => { + TypeId.ComptimeInt => { return @typeOf(1)(math.floor(ln_64(f64(x)))); }, TypeId.Int => { diff --git a/std/math/log.zig b/std/math/log.zig index 1cba1138db..2c876081d8 100644 --- a/std/math/log.zig +++ b/std/math/log.zig @@ -9,15 +9,15 @@ pub fn log(comptime T: type, base: T, x: T) T { return math.log2(x); } else if (base == 10) { return math.log10(x); - } else if ((@typeId(T) == TypeId.Float or @typeId(T) == TypeId.FloatLiteral) and base == math.e) { + } else if ((@typeId(T) == TypeId.Float or @typeId(T) == TypeId.ComptimeFloat) and base == math.e) { return math.ln(x); } switch (@typeId(T)) { - TypeId.FloatLiteral => { + TypeId.ComptimeFloat => { return @typeOf(1.0)(math.ln(f64(x)) / math.ln(f64(base))); }, - TypeId.IntLiteral => { + TypeId.ComptimeInt => { return @typeOf(1)(math.floor(math.ln(f64(x)) / math.ln(f64(base)))); }, builtin.TypeId.Int => { diff --git a/std/math/log10.zig b/std/math/log10.zig index d9fa1dcb02..c444add9ac 100644 --- a/std/math/log10.zig +++ b/std/math/log10.zig @@ -14,7 +14,7 @@ const TypeId = builtin.TypeId; pub fn log10(x: var) @typeOf(x) { const T = @typeOf(x); switch (@typeId(T)) { - TypeId.FloatLiteral => { + TypeId.ComptimeFloat => { return @typeOf(1.0)(log10_64(x)); }, TypeId.Float => { @@ -24,7 +24,7 @@ pub fn log10(x: var) @typeOf(x) { else => @compileError("log10 not implemented for " ++ @typeName(T)), }; }, - TypeId.IntLiteral => { + TypeId.ComptimeInt => { return @typeOf(1)(math.floor(log10_64(f64(x)))); }, TypeId.Int => { diff --git a/std/math/log2.zig b/std/math/log2.zig index 22cc8082b3..2530519941 100644 --- a/std/math/log2.zig +++ b/std/math/log2.zig @@ -14,7 +14,7 @@ const TypeId = builtin.TypeId; pub fn log2(x: var) @typeOf(x) { const T = @typeOf(x); switch (@typeId(T)) { - TypeId.FloatLiteral => { + TypeId.ComptimeFloat => { return @typeOf(1.0)(log2_64(x)); }, TypeId.Float => { @@ -24,7 +24,7 @@ pub fn log2(x: var) @typeOf(x) { else => @compileError("log2 not implemented for " ++ @typeName(T)), }; }, - TypeId.IntLiteral => comptime { + TypeId.ComptimeInt => comptime { var result = 0; var x_shifted = x; while (b: { diff --git a/std/math/sqrt.zig b/std/math/sqrt.zig index 982bd28b72..7a3ddb3b96 100644 --- a/std/math/sqrt.zig +++ b/std/math/sqrt.zig @@ -14,9 +14,9 @@ const TypeId = builtin.TypeId; pub fn sqrt(x: var) (if (@typeId(@typeOf(x)) == TypeId.Int) @IntType(false, @typeOf(x).bit_count / 2) else @typeOf(x)) { const T = @typeOf(x); switch (@typeId(T)) { - TypeId.FloatLiteral => return T(@sqrt(f64, x)), // TODO upgrade to f128 + TypeId.ComptimeFloat => return T(@sqrt(f64, x)), // TODO upgrade to f128 TypeId.Float => return @sqrt(T, x), - TypeId.IntLiteral => comptime { + TypeId.ComptimeInt => comptime { if (x > @maxValue(u128)) { @compileError("sqrt not implemented for comptime_int greater than 128 bits"); } diff --git a/test/cases/math.zig b/test/cases/math.zig index 0c18293dd5..0bf99cff0e 100644 --- a/test/cases/math.zig +++ b/test/cases/math.zig @@ -329,14 +329,14 @@ fn testShrExact(x: u8) void { assert(shifted == 0b00101101); } -test "big number addition" { +test "comptime_int addition" { comptime { assert(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950); assert(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380); } } -test "big number multiplication" { +test "comptime_int multiplication" { comptime { assert( 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567, @@ -347,13 +347,13 @@ test "big number multiplication" { } } -test "big number shifting" { +test "comptime_int shifting" { comptime { assert((u128(1) << 127) == 0x80000000000000000000000000000000); } } -test "big number multi-limb shift and mask" { +test "comptime_int multi-limb shift and mask" { comptime { var a = 0xefffffffa0000001eeeeeeefaaaaaaab; @@ -370,7 +370,7 @@ test "big number multi-limb shift and mask" { } } -test "big number multi-limb partial shift right" { +test "comptime_int multi-limb partial shift right" { comptime { var a = 0x1ffffffffeeeeeeee; a >>= 16; @@ -391,7 +391,7 @@ fn test_xor() void { assert(0xFF ^ 0xFF == 0x00); } -test "big number xor" { +test "comptime_int xor" { comptime { assert(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); assert(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); @@ -449,3 +449,15 @@ test "@sqrt" { fn testSqrt(comptime T: type, x: T) void { assert(@sqrt(T, x * x) == x); } + +test "comptime_int param and return" { + const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702); + assert(a == 137114567242441932203689521744947848950); + + const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768); + assert(b == 985095453608931032642182098849559179469148836107390954364380); +} + +fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int { + return a + b; +} diff --git a/test/cases/misc.zig b/test/cases/misc.zig index e007ec4c46..1821e29a20 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -501,8 +501,8 @@ test "@typeId" { assert(@typeId(*f32) == Tid.Pointer); assert(@typeId([2]u8) == Tid.Array); assert(@typeId(AStruct) == Tid.Struct); - assert(@typeId(@typeOf(1)) == Tid.IntLiteral); - assert(@typeId(@typeOf(1.0)) == Tid.FloatLiteral); + assert(@typeId(@typeOf(1)) == Tid.ComptimeInt); + assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat); assert(@typeId(@typeOf(undefined)) == Tid.UndefinedLiteral); assert(@typeId(@typeOf(null)) == Tid.NullLiteral); assert(@typeId(?i32) == Tid.Nullable); diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 17136e150f..e264d57b5e 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1539,7 +1539,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\fn foo() *const i32 { return y; } \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } , - ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const (integer literal)'", + ".tmp_source.zig:3:30: error: expected type '*const i32', found '*const comptime_int'", ); cases.add( @@ -1555,7 +1555,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\const x = 2 == 2.0; \\export fn entry() usize { return @sizeOf(@typeOf(x)); } , - ".tmp_source.zig:1:11: error: integer value 2 cannot be implicitly casted to type '(float literal)'", + ".tmp_source.zig:1:11: error: integer value 2 cannot be implicitly casted to type 'comptime_float'", ); cases.add( @@ -2189,7 +2189,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\export fn entry() usize { return @sizeOf(@typeOf(block_aligned_stuff)); } , - ".tmp_source.zig:3:60: error: unable to perform binary not operation on type '(integer literal)'", + ".tmp_source.zig:3:60: error: unable to perform binary not operation on type 'comptime_int'", ); cases.addCase(x: { @@ -3269,10 +3269,10 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ fn bar(self: *const Foo) void {} \\}; , - ".tmp_source.zig:4:4: error: variable of type '*(integer literal)' must be const or comptime", + ".tmp_source.zig:4:4: error: variable of type '*comptime_int' must be const or comptime", ".tmp_source.zig:7:4: error: variable of type '(undefined)' must be const or comptime", - ".tmp_source.zig:8:4: error: variable of type '(integer literal)' must be const or comptime", - ".tmp_source.zig:9:4: error: variable of type '(float literal)' must be const or comptime", + ".tmp_source.zig:8:4: error: variable of type 'comptime_int' must be const or comptime", + ".tmp_source.zig:9:4: error: variable of type 'comptime_float' must be const or comptime", ".tmp_source.zig:10:4: error: variable of type '(block)' must be const or comptime", ".tmp_source.zig:11:4: error: variable of type '(null)' must be const or comptime", ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime", -- cgit v1.2.3 From 236c680f6bae490fddab4935892bd75240176d0b Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Tue, 5 Jun 2018 11:30:01 +0200 Subject: Removed NullLiteral to Null --- doc/langref.html.in | 6 ++--- src/all_types.hpp | 2 +- src/analyze.cpp | 46 ++++++++++++++++++------------------- src/codegen.cpp | 14 ++++++------ src/ir.cpp | 66 ++++++++++++++++++++++++++--------------------------- test/cases/misc.zig | 2 +- 6 files changed, 68 insertions(+), 68 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 0689baa6f9..70f11c0e2b 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -4896,7 +4896,7 @@ pub const TypeId = enum { ComptimeFloat, ComptimeInt, UndefinedLiteral, - NullLiteral, + Null, Nullable, ErrorUnion, Error, @@ -4930,7 +4930,7 @@ pub const TypeInfo = union(TypeId) { ComptimeFloat: void, ComptimeInt: void, UndefinedLiteral: void, - NullLiteral: void, + Null: void, Nullable: Nullable, ErrorUnion: ErrorUnion, ErrorSet: ErrorSet, @@ -5688,7 +5688,7 @@ pub const TypeId = enum { ComptimeFloat, ComptimeInt, UndefinedLiteral, - NullLiteral, + Null, Nullable, ErrorUnion, ErrorSet, diff --git a/src/all_types.hpp b/src/all_types.hpp index bf635eae7c..6b30a1155d 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1162,7 +1162,7 @@ enum TypeTableEntryId { TypeTableEntryIdComptimeFloat, TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefLit, - TypeTableEntryIdNullLit, + TypeTableEntryIdNull, TypeTableEntryIdMaybe, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, diff --git a/src/analyze.cpp b/src/analyze.cpp index 21841d45b6..a605cb3a7f 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -235,7 +235,7 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -271,7 +271,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -1336,7 +1336,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -1377,7 +1377,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -1512,7 +1512,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: add_node_error(g, param_node->data.param_decl.type, @@ -1600,7 +1600,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c zig_unreachable(); case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: add_node_error(g, fn_proto->return_type, @@ -3338,7 +3338,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -3485,7 +3485,7 @@ static void resolve_decl_var(CodeGen *g, TldVar *tld_var) { { add_node_error(g, source_node, buf_sprintf("unable to infer variable type")); implicit_type = g->builtin_types.entry_invalid; - } else if (implicit_type->id == TypeTableEntryIdNullLit) { + } else if (implicit_type->id == TypeTableEntryIdNull) { add_node_error(g, source_node, buf_sprintf("unable to infer variable type")); implicit_type = g->builtin_types.entry_invalid; } else if (implicit_type->id == TypeTableEntryIdMetaType && !is_const) { @@ -3733,7 +3733,7 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -3782,7 +3782,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -4286,7 +4286,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -4674,7 +4674,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { return 223048345; case TypeTableEntryIdUndefLit: return 162837799; - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: return 844854567; case TypeTableEntryIdArray: // TODO better hashing algorithm @@ -4757,7 +4757,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: case TypeTableEntryIdFn: @@ -4822,7 +4822,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: case TypeTableEntryIdFn: @@ -4933,7 +4933,7 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -5412,7 +5412,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { return true; case TypeTableEntryIdUndefLit: zig_panic("TODO"); - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: zig_panic("TODO"); case TypeTableEntryIdMaybe: if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) { @@ -5646,7 +5646,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "}"); return; } - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: { buf_appendf(buf, "null"); return; @@ -5764,7 +5764,7 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -5810,7 +5810,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdPromise: case TypeTableEntryIdErrorSet: @@ -5932,7 +5932,7 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdComptimeFloat, TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefLit, - TypeTableEntryIdNullLit, + TypeTableEntryIdNull, TypeTableEntryIdMaybe, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, @@ -5986,7 +5986,7 @@ size_t type_id_index(TypeTableEntry *entry) { return 10; case TypeTableEntryIdUndefLit: return 11; - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: return 12; case TypeTableEntryIdMaybe: return 13; @@ -6044,8 +6044,8 @@ const char *type_id_name(TypeTableEntryId id) { return "ComptimeInt"; case TypeTableEntryIdUndefLit: return "UndefinedLiteral"; - case TypeTableEntryIdNullLit: - return "NullLiteral"; + case TypeTableEntryIdNull: + return "Null"; case TypeTableEntryIdMaybe: return "Nullable"; case TypeTableEntryIdErrorUnion: diff --git a/src/codegen.cpp b/src/codegen.cpp index dc915e766d..3177a2491f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4919,7 +4919,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -5365,7 +5365,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -6032,7 +6032,7 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_undef = entry; } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNullLit); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdNull); buf_init_from_str(&entry->name, "(null)"); entry->zero_bits = true; g->builtin_types.entry_null = entry; @@ -6500,7 +6500,7 @@ static void define_builtin_compile_vars(CodeGen *g) { " ComptimeFloat: void,\n" " ComptimeInt: void,\n" " UndefinedLiteral: void,\n" - " NullLiteral: void,\n" + " Null: void,\n" " Nullable: Nullable,\n" " ErrorUnion: ErrorUnion,\n" " ErrorSet: ErrorSet,\n" @@ -7075,7 +7075,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -7260,7 +7260,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: zig_unreachable(); @@ -7413,7 +7413,7 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdArray: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: diff --git a/src/ir.cpp b/src/ir.cpp index 2819ef5b0e..2fbc72309a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -7960,7 +7960,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, // implicit conversion from null literal to maybe type if (expected_type->id == TypeTableEntryIdMaybe && - actual_type->id == TypeTableEntryIdNullLit) + actual_type->id == TypeTableEntryIdNull) { return ImplicitCastMatchResultYes; } @@ -8190,7 +8190,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNullLit); + bool any_are_null = (prev_inst->value.type->id == TypeTableEntryIdNull); bool convert_to_const_slice = false; for (size_t i = 1; i < instruction_count; i += 1) { IrInstruction *cur_inst = instructions[i]; @@ -8469,12 +8469,12 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } - if (prev_type->id == TypeTableEntryIdNullLit) { + if (prev_type->id == TypeTableEntryIdNull) { prev_inst = cur_inst; continue; } - if (cur_type->id == TypeTableEntryIdNullLit) { + if (cur_type->id == TypeTableEntryIdNull) { any_are_null = true; continue; } @@ -8677,7 +8677,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of number literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdNullLit) { + } else if (prev_inst->value.type->id == TypeTableEntryIdNull) { ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of null literal")); return ira->codegen->builtin_types.entry_invalid; @@ -8685,7 +8685,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type); } } - } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNullLit) { + } else if (any_are_null && prev_inst->value.type->id != TypeTableEntryIdNull) { if (prev_inst->value.type->id == TypeTableEntryIdComptimeInt || prev_inst->value.type->id == TypeTableEntryIdComptimeFloat) { @@ -10004,7 +10004,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from null literal to maybe type if (wanted_type->id == TypeTableEntryIdMaybe && - actual_type->id == TypeTableEntryIdNullLit) + actual_type->id == TypeTableEntryIdNull) { return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type); } @@ -10061,7 +10061,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst { TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk || - actual_type->id == TypeTableEntryIdNullLit || + actual_type->id == TypeTableEntryIdNull || actual_type->id == TypeTableEntryIdComptimeInt || actual_type->id == TypeTableEntryIdComptimeFloat) { @@ -10627,19 +10627,19 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp IrBinOp op_id = bin_op_instruction->op_id; bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq); if (is_equality_cmp && - ((op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdMaybe) || - (op2->value.type->id == TypeTableEntryIdNullLit && op1->value.type->id == TypeTableEntryIdMaybe) || - (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit))) + ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdMaybe) || + (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdMaybe) || + (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull))) { - if (op1->value.type->id == TypeTableEntryIdNullLit && op2->value.type->id == TypeTableEntryIdNullLit) { + if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) { ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); out_val->data.x_bool = (op_id == IrBinOpCmpEq); return ira->codegen->builtin_types.entry_bool; } IrInstruction *maybe_op; - if (op1->value.type->id == TypeTableEntryIdNullLit) { + if (op1->value.type->id == TypeTableEntryIdNull) { maybe_op = op2; - } else if (op2->value.type->id == TypeTableEntryIdNullLit) { + } else if (op2->value.type->id == TypeTableEntryIdNull) { maybe_op = op1; } else { zig_unreachable(); @@ -10796,7 +10796,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdArray: case TypeTableEntryIdStruct: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdUnion: @@ -11645,7 +11645,7 @@ static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: case TypeTableEntryIdBlock: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdOpaque: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: @@ -11913,7 +11913,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -11937,7 +11937,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -12901,7 +12901,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -13152,7 +13152,7 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdComptimeInt || - resolved_type->id == TypeTableEntryIdNullLit || + resolved_type->id == TypeTableEntryIdNull || resolved_type->id == TypeTableEntryIdUndefLit) { ir_add_error_node(ira, phi_instruction->base.source_node, @@ -14216,7 +14216,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -14480,7 +14480,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, zig_unreachable(); case TypeTableEntryIdUnreachable: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -14588,7 +14588,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, zig_unreachable(); case TypeTableEntryIdUnreachable: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -14657,7 +14657,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, zig_unreachable(); case TypeTableEntryIdUnreachable: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: @@ -14713,7 +14713,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn ir_build_test_nonnull_from(&ira->new_irb, &instruction->base, value); return ira->codegen->builtin_types.entry_bool; - } else if (type_entry->id == TypeTableEntryIdNullLit) { + } else if (type_entry->id == TypeTableEntryIdNull) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_bool = false; return ira->codegen->builtin_types.entry_bool; @@ -15100,7 +15100,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdArray: case TypeTableEntryIdStruct: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -15621,7 +15621,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -16283,7 +16283,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: @@ -17879,7 +17879,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: @@ -18613,7 +18613,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: @@ -18680,7 +18680,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdVoid: @@ -18761,7 +18761,7 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name))); return ira->codegen->builtin_types.entry_invalid; @@ -18787,7 +18787,7 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefLit: - case TypeTableEntryIdNullLit: + case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); return ira->codegen->builtin_types.entry_invalid; diff --git a/test/cases/misc.zig b/test/cases/misc.zig index 1821e29a20..ed14243b39 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -504,7 +504,7 @@ test "@typeId" { assert(@typeId(@typeOf(1)) == Tid.ComptimeInt); assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat); assert(@typeId(@typeOf(undefined)) == Tid.UndefinedLiteral); - assert(@typeId(@typeOf(null)) == Tid.NullLiteral); + assert(@typeId(@typeOf(null)) == Tid.Null); assert(@typeId(?i32) == Tid.Nullable); assert(@typeId(error!i32) == Tid.ErrorUnion); assert(@typeId(error) == Tid.ErrorSet); -- cgit v1.2.3 From a8146ade2a57bea12ea2d16bd273f03578e5d559 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Tue, 5 Jun 2018 11:54:11 +0200 Subject: Renamed UndefinedLiteral to Undefined --- doc/langref.html.in | 8 ++++---- src/all_types.hpp | 2 +- src/analyze.cpp | 44 ++++++++++++++++++++++---------------------- src/codegen.cpp | 14 +++++++------- src/ir.cpp | 44 ++++++++++++++++++++++---------------------- test/cases/misc.zig | 2 +- 6 files changed, 57 insertions(+), 57 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 70f11c0e2b..4359cadb58 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -4895,7 +4895,7 @@ pub const TypeId = enum { Struct, ComptimeFloat, ComptimeInt, - UndefinedLiteral, + Undefined, Null, Nullable, ErrorUnion, @@ -4929,7 +4929,7 @@ pub const TypeInfo = union(TypeId) { Struct: Struct, ComptimeFloat: void, ComptimeInt: void, - UndefinedLiteral: void, + Undefined: void, Null: void, Nullable: Nullable, ErrorUnion: ErrorUnion, @@ -5687,7 +5687,7 @@ pub const TypeId = enum { Struct, ComptimeFloat, ComptimeInt, - UndefinedLiteral, + Undefined, Null, Nullable, ErrorUnion, @@ -5715,7 +5715,7 @@ pub const TypeInfo = union(TypeId) { Struct: Struct, ComptimeFloat: void, ComptimeInt: void, - UndefinedLiteral: void, + Undefined: void, Null: void, Nullable: Nullable, ErrorUnion: ErrorUnion, diff --git a/src/all_types.hpp b/src/all_types.hpp index 6b30a1155d..3b2ea02b71 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1161,7 +1161,7 @@ enum TypeTableEntryId { TypeTableEntryIdStruct, TypeTableEntryIdComptimeFloat, TypeTableEntryIdComptimeInt, - TypeTableEntryIdUndefLit, + TypeTableEntryIdUndefined, TypeTableEntryIdNull, TypeTableEntryIdMaybe, TypeTableEntryIdErrorUnion, diff --git a/src/analyze.cpp b/src/analyze.cpp index a605cb3a7f..8008bea68d 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -234,7 +234,7 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdArray: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -270,7 +270,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdArray: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -1335,7 +1335,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -1376,7 +1376,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdMetaType: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -1511,7 +1511,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdInvalid: return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -1599,7 +1599,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdInvalid: zig_unreachable(); - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdOpaque: @@ -3337,7 +3337,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdInvalid: return g->builtin_types.entry_invalid; case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: @@ -3732,7 +3732,7 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdArray: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -3781,7 +3781,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdArray: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -4285,7 +4285,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { case TypeTableEntryIdMetaType: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -4672,7 +4672,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case TypeTableEntryIdPromise: // TODO better hashing algorithm return 223048345; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: return 162837799; case TypeTableEntryIdNull: return 844854567; @@ -4756,7 +4756,7 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { case TypeTableEntryIdFloat: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: @@ -4821,7 +4821,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdFloat: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBoundFn: @@ -4932,7 +4932,7 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { zig_unreachable(); case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMetaType: case TypeTableEntryIdNamespace: @@ -5410,7 +5410,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { return false; } return true; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: zig_panic("TODO"); case TypeTableEntryIdNull: zig_panic("TODO"); @@ -5651,7 +5651,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "null"); return; } - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: { buf_appendf(buf, "undefined"); return; @@ -5763,7 +5763,7 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorSet: @@ -5809,7 +5809,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdPromise: @@ -5931,7 +5931,7 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdStruct, TypeTableEntryIdComptimeFloat, TypeTableEntryIdComptimeInt, - TypeTableEntryIdUndefLit, + TypeTableEntryIdUndefined, TypeTableEntryIdNull, TypeTableEntryIdMaybe, TypeTableEntryIdErrorUnion, @@ -5984,7 +5984,7 @@ size_t type_id_index(TypeTableEntry *entry) { return 9; case TypeTableEntryIdComptimeInt: return 10; - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: return 11; case TypeTableEntryIdNull: return 12; @@ -6042,8 +6042,8 @@ const char *type_id_name(TypeTableEntryId id) { return "ComptimeFloat"; case TypeTableEntryIdComptimeInt: return "ComptimeInt"; - case TypeTableEntryIdUndefLit: - return "UndefinedLiteral"; + case TypeTableEntryIdUndefined: + return "Undefined"; case TypeTableEntryIdNull: return "Null"; case TypeTableEntryIdMaybe: diff --git a/src/codegen.cpp b/src/codegen.cpp index 3177a2491f..a977c34daf 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -4918,7 +4918,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: @@ -5364,7 +5364,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -6026,7 +6026,7 @@ static void define_builtin_types(CodeGen *g) { g->primitive_type_table.put(&entry->name, entry); } { - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefLit); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdUndefined); buf_init_from_str(&entry->name, "(undefined)"); entry->zero_bits = true; g->builtin_types.entry_undef = entry; @@ -6499,7 +6499,7 @@ static void define_builtin_compile_vars(CodeGen *g) { " Struct: Struct,\n" " ComptimeFloat: void,\n" " ComptimeInt: void,\n" - " UndefinedLiteral: void,\n" + " Undefined: void,\n" " Null: void,\n" " Nullable: Nullable,\n" " ErrorUnion: ErrorUnion,\n" @@ -7074,7 +7074,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry case TypeTableEntryIdMetaType: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -7259,7 +7259,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf case TypeTableEntryIdBlock: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: @@ -7412,7 +7412,7 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: case TypeTableEntryIdArray: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: diff --git a/src/ir.cpp b/src/ir.cpp index 2fbc72309a..9578795fcc 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8142,7 +8142,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, } // implicit undefined literal to anything - if (actual_type->id == TypeTableEntryIdUndefLit) { + if (actual_type->id == TypeTableEntryIdUndefined) { return ImplicitCastMatchResultYes; } @@ -8546,11 +8546,11 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (cur_type->id == TypeTableEntryIdUndefLit) { + if (cur_type->id == TypeTableEntryIdUndefined) { continue; } - if (prev_type->id == TypeTableEntryIdUndefLit) { + if (prev_type->id == TypeTableEntryIdUndefined) { prev_inst = cur_inst; continue; } @@ -10230,7 +10230,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from undefined to anything - if (actual_type->id == TypeTableEntryIdUndefLit) { + if (actual_type->id == TypeTableEntryIdUndefined) { return ir_analyze_undefined_to_anything(ira, source_instr, value, wanted_type); } @@ -10795,7 +10795,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdUnreachable: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -11643,7 +11643,7 @@ static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { return VarClassRequiredAny; case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdBlock: case TypeTableEntryIdNull: case TypeTableEntryIdOpaque: @@ -11912,7 +11912,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -11936,7 +11936,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdArray: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -12900,7 +12900,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -13153,7 +13153,7 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP if (resolved_type->id == TypeTableEntryIdComptimeFloat || resolved_type->id == TypeTableEntryIdComptimeInt || resolved_type->id == TypeTableEntryIdNull || - resolved_type->id == TypeTableEntryIdUndefLit) + resolved_type->id == TypeTableEntryIdUndefined) { ir_add_error_node(ira, phi_instruction->base.source_node, buf_sprintf("unable to infer expression type")); @@ -14215,7 +14215,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi zig_unreachable(); // handled above case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -14479,7 +14479,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: @@ -14587,7 +14587,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdArgTuple: @@ -14656,7 +14656,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdInvalid: // handled above zig_unreachable(); case TypeTableEntryIdUnreachable: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdBlock: case TypeTableEntryIdComptimeFloat: @@ -15099,7 +15099,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdUnreachable: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdBlock: @@ -15620,7 +15620,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdMaybe: case TypeTableEntryIdErrorUnion: @@ -16282,7 +16282,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -17878,7 +17878,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdNamespace: case TypeTableEntryIdBlock: @@ -18612,7 +18612,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); @@ -18679,7 +18679,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: case TypeTableEntryIdPromise: zig_unreachable(); @@ -18760,7 +18760,7 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast from type '%s'", buf_ptr(&src_type->name))); @@ -18786,7 +18786,7 @@ static TypeTableEntry *ir_analyze_instruction_bit_cast(IrAnalyze *ira, IrInstruc case TypeTableEntryIdUnreachable: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefLit: + case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: ir_add_error(ira, dest_type_value, buf_sprintf("unable to @bitCast to type '%s'", buf_ptr(&dest_type->name))); diff --git a/test/cases/misc.zig b/test/cases/misc.zig index ed14243b39..9450cf5e6e 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -503,7 +503,7 @@ test "@typeId" { assert(@typeId(AStruct) == Tid.Struct); assert(@typeId(@typeOf(1)) == Tid.ComptimeInt); assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat); - assert(@typeId(@typeOf(undefined)) == Tid.UndefinedLiteral); + assert(@typeId(@typeOf(undefined)) == Tid.Undefined); assert(@typeId(@typeOf(null)) == Tid.Null); assert(@typeId(?i32) == Tid.Nullable); assert(@typeId(error!i32) == Tid.ErrorUnion); -- cgit v1.2.3 From 7a0948253636080e5abe59b938761ee7348a7025 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 5 Jun 2018 10:48:53 -0400 Subject: fix crash when evaluating return type has compile error closes #1058 --- src/analyze.cpp | 2 ++ test/compile_errors.zig | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 8008bea68d..b0f0196020 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1018,6 +1018,8 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { } if (fn_type_id->return_type != nullptr) { ensure_complete_type(g, fn_type_id->return_type); + if (type_is_invalid(fn_type_id->return_type)) + return g->builtin_types.entry_invalid; } else { zig_panic("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"); } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index e264d57b5e..4bd6e9bc24 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,22 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "error when evaluating return type", + \\const Foo = struct { + \\ map: i32(i32), + \\ + \\ fn init() Foo { + \\ return undefined; + \\ } + \\}; + \\export fn entry() void { + \\ var rule_set = try Foo.init(); + \\} + , + ".tmp_source.zig:2:13: error: invalid cast from type 'type' to 'i32'", + ); + cases.add( "slicing single-item pointer", \\export fn entry(ptr: *i32) void { -- cgit v1.2.3 From 652f4bdf6242462182005f4c7149f13beaaa3259 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 5 Jun 2018 18:03:21 -0400 Subject: disallow unknown-length pointer to opaque This also means that translate-c has to detect when a pointer to opaque is happening, and use `*` instead of `[*]`. See #1059 --- src/analyze.cpp | 1 + src/ir.cpp | 10 +++++----- src/tokenizer.hpp | 2 ++ src/translate_c.cpp | 37 +++++++++++++++++++++++++++++++++---- std/c/index.zig | 20 ++++++++++---------- std/heap.zig | 8 ++++---- std/os/darwin.zig | 8 ++++---- std/os/file.zig | 2 +- std/os/index.zig | 4 ++-- std/os/windows/index.zig | 14 +++++++------- std/os/windows/util.zig | 2 +- test/compare_output.zig | 4 ++-- test/compile_errors.zig | 7 +++++++ test/translate_c.zig | 20 ++++++++++---------- 14 files changed, 89 insertions(+), 50 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index b0f0196020..0adb992798 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -384,6 +384,7 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count) { assert(!type_is_invalid(child_type)); + assert(ptr_len == PtrLenSingle || child_type->id != TypeTableEntryIdOpaque); TypeId type_id = {}; TypeTableEntry **parent_pointer = nullptr; diff --git a/src/ir.cpp b/src/ir.cpp index 9578795fcc..5c44e7c0ff 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -4620,11 +4620,8 @@ static IrInstruction *ir_lval_wrap(IrBuilder *irb, Scope *scope, IrInstruction * static IrInstruction *ir_gen_pointer_type(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypePointerType); - // The null check here is for C imports which don't set a token on the AST node. We could potentially - // update that code to create a fake token and then remove this check. - PtrLen ptr_len = (node->data.pointer_type.star_token != nullptr && - (node->data.pointer_type.star_token->id == TokenIdStar || - node->data.pointer_type.star_token->id == TokenIdStarStar)) ? PtrLenSingle : PtrLenUnknown; + PtrLen ptr_len = (node->data.pointer_type.star_token->id == TokenIdStar || + node->data.pointer_type.star_token->id == TokenIdStarStar) ? PtrLenSingle : PtrLenUnknown; bool is_const = node->data.pointer_type.is_const; bool is_volatile = node->data.pointer_type.is_volatile; AstNode *expr_node = node->data.pointer_type.op_expr; @@ -18973,6 +18970,9 @@ static TypeTableEntry *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstruc if (child_type->id == TypeTableEntryIdUnreachable) { ir_add_error(ira, &instruction->base, buf_sprintf("pointer to noreturn not allowed")); return ira->codegen->builtin_types.entry_invalid; + } else if (child_type->id == TypeTableEntryIdOpaque && instruction->ptr_len == PtrLenUnknown) { + ir_add_error(ira, &instruction->base, buf_sprintf("unknown-length pointer to opaque")); + return ira->codegen->builtin_types.entry_invalid; } uint32_t align_bytes; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index d659c0a772..d0089909cd 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -170,6 +170,8 @@ struct Token { TokenCharLit char_lit; } data; }; +// work around conflicting name Token which is also found in libclang +typedef Token ZigToken; struct Tokenization { ZigList *tokens; diff --git a/src/translate_c.cpp b/src/translate_c.cpp index db541d34f3..d78bd1fa70 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -276,8 +276,11 @@ static AstNode *maybe_suppress_result(Context *c, ResultUsed result_used, AstNod node); } -static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node) { +static AstNode *trans_create_node_ptr_type(Context *c, bool is_const, bool is_volatile, AstNode *child_node, PtrLen ptr_len) { AstNode *node = trans_create_node(c, NodeTypePointerType); + node->data.pointer_type.star_token = allocate(1); + node->data.pointer_type.star_token->id = (ptr_len == PtrLenSingle) ? TokenIdStar: TokenIdBracketStarBracket; + node->data.pointer_type.is_const = is_const; node->data.pointer_type.is_const = is_const; node->data.pointer_type.is_volatile = is_volatile; node->data.pointer_type.op_expr = child_node; @@ -731,6 +734,30 @@ static bool qual_type_has_wrapping_overflow(Context *c, QualType qt) { } } +static bool type_is_opaque(Context *c, const Type *ty, const SourceLocation &source_loc) { + switch (ty->getTypeClass()) { + case Type::Builtin: { + const BuiltinType *builtin_ty = static_cast(ty); + return builtin_ty->getKind() == BuiltinType::Void; + } + case Type::Record: { + const RecordType *record_ty = static_cast(ty); + return record_ty->getDecl()->getDefinition() == nullptr; + } + case Type::Elaborated: { + const ElaboratedType *elaborated_ty = static_cast(ty); + return type_is_opaque(c, elaborated_ty->getNamedType().getTypePtr(), source_loc); + } + case Type::Typedef: { + const TypedefType *typedef_ty = static_cast(ty); + const TypedefNameDecl *typedef_decl = typedef_ty->getDecl(); + return type_is_opaque(c, typedef_decl->getUnderlyingType().getTypePtr(), source_loc); + } + default: + return false; + } +} + static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &source_loc) { switch (ty->getTypeClass()) { case Type::Builtin: @@ -855,8 +882,10 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node); } + PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown; + AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), - child_qt.isVolatileQualified(), child_node); + child_qt.isVolatileQualified(), child_node, ptr_len); return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node); } case Type::Typedef: @@ -1041,7 +1070,7 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou return nullptr; } AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), - child_qt.isVolatileQualified(), child_type_node); + child_qt.isVolatileQualified(), child_type_node, PtrLenUnknown); return pointer_node; } case Type::BlockPointer: @@ -4448,7 +4477,7 @@ static AstNode *parse_ctok_suffix_op_expr(Context *c, CTokenize *ctok, size_t *t } else if (first_tok->id == CTokIdAsterisk) { *tok_i += 1; - node = trans_create_node_ptr_type(c, false, false, node); + node = trans_create_node_ptr_type(c, false, false, node, PtrLenUnknown); } else { return node; } diff --git a/std/c/index.zig b/std/c/index.zig index ade37f36c1..7de8634d07 100644 --- a/std/c/index.zig +++ b/std/c/index.zig @@ -20,11 +20,11 @@ pub extern "c" fn @"fstat$INODE64"(fd: c_int, buf: *Stat) c_int; pub extern "c" fn lseek(fd: c_int, offset: isize, whence: c_int) isize; pub extern "c" fn open(path: [*]const u8, oflag: c_int, ...) c_int; pub extern "c" fn raise(sig: c_int) c_int; -pub extern "c" fn read(fd: c_int, buf: [*]c_void, nbyte: usize) isize; +pub extern "c" fn read(fd: c_int, buf: *c_void, nbyte: usize) isize; pub extern "c" fn stat(noalias path: [*]const u8, noalias buf: *Stat) c_int; -pub extern "c" fn write(fd: c_int, buf: [*]const c_void, nbyte: usize) isize; -pub extern "c" fn mmap(addr: ?[*]c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?[*]c_void; -pub extern "c" fn munmap(addr: [*]c_void, len: usize) c_int; +pub extern "c" fn write(fd: c_int, buf: *const c_void, nbyte: usize) isize; +pub extern "c" fn mmap(addr: ?*c_void, len: usize, prot: c_int, flags: c_int, fd: c_int, offset: isize) ?*c_void; +pub extern "c" fn munmap(addr: *c_void, len: usize) c_int; pub extern "c" fn unlink(path: [*]const u8) c_int; pub extern "c" fn getcwd(buf: [*]u8, size: usize) ?[*]u8; pub extern "c" fn waitpid(pid: c_int, stat_loc: *c_int, options: c_int) c_int; @@ -48,15 +48,15 @@ pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int; pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int; pub extern "c" fn rmdir(path: [*]const u8) c_int; -pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?[*]c_void; -pub extern "c" fn malloc(usize) ?[*]c_void; -pub extern "c" fn realloc([*]c_void, usize) ?[*]c_void; -pub extern "c" fn free([*]c_void) void; -pub extern "c" fn posix_memalign(memptr: *[*]c_void, alignment: usize, size: usize) c_int; +pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void; +pub extern "c" fn malloc(usize) ?*c_void; +pub extern "c" fn realloc(*c_void, usize) ?*c_void; +pub extern "c" fn free(*c_void) void; +pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int; pub extern "pthread" fn pthread_create(noalias newthread: *pthread_t, noalias attr: ?*const pthread_attr_t, start_routine: extern fn (?*c_void) ?*c_void, noalias arg: ?*c_void) c_int; pub extern "pthread" fn pthread_attr_init(attr: *pthread_attr_t) c_int; -pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: [*]c_void, stacksize: usize) c_int; +pub extern "pthread" fn pthread_attr_setstack(attr: *pthread_attr_t, stackaddr: *c_void, stacksize: usize) c_int; pub extern "pthread" fn pthread_attr_destroy(attr: *pthread_attr_t) c_int; pub extern "pthread" fn pthread_join(thread: pthread_t, arg_return: ?*?*c_void) c_int; diff --git a/std/heap.zig b/std/heap.zig index 4444a2307a..5d430bc761 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -22,7 +22,7 @@ fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 { } fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { - const old_ptr = @ptrCast([*]c_void, old_mem.ptr); + const old_ptr = @ptrCast(*c_void, old_mem.ptr); if (c.realloc(old_ptr, new_size)) |buf| { return @ptrCast([*]u8, buf)[0..new_size]; } else if (new_size <= old_mem.len) { @@ -33,7 +33,7 @@ fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![ } fn cFree(self: *Allocator, old_mem: []u8) void { - const old_ptr = @ptrCast([*]c_void, old_mem.ptr); + const old_ptr = @ptrCast(*c_void, old_mem.ptr); c.free(old_ptr); } @@ -140,7 +140,7 @@ pub const DirectAllocator = struct { const old_adjusted_addr = @ptrToInt(old_mem.ptr); const old_record_addr = old_adjusted_addr + old_mem.len; const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; - const old_ptr = @intToPtr([*]c_void, root_addr); + const old_ptr = @intToPtr(*c_void, root_addr); const amt = new_size + alignment + @sizeOf(usize); const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: { if (new_size > old_mem.len) return error.OutOfMemory; @@ -170,7 +170,7 @@ pub const DirectAllocator = struct { Os.windows => { const record_addr = @ptrToInt(bytes.ptr) + bytes.len; const root_addr = @intToPtr(*align(1) usize, record_addr).*; - const ptr = @intToPtr([*]c_void, root_addr); + const ptr = @intToPtr(*c_void, root_addr); _ = os.windows.HeapFree(??self.heap_handle, 0, ptr); }, else => @compileError("Unsupported OS"), diff --git a/std/os/darwin.zig b/std/os/darwin.zig index b8e18561cc..a835959103 100644 --- a/std/os/darwin.zig +++ b/std/os/darwin.zig @@ -327,7 +327,7 @@ pub fn raise(sig: i32) usize { } pub fn read(fd: i32, buf: [*]u8, nbyte: usize) usize { - return errnoWrap(c.read(fd, @ptrCast([*]c_void, buf), nbyte)); + return errnoWrap(c.read(fd, @ptrCast(*c_void, buf), nbyte)); } pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { @@ -335,17 +335,17 @@ pub fn stat(noalias path: [*]const u8, noalias buf: *stat) usize { } pub fn write(fd: i32, buf: [*]const u8, nbyte: usize) usize { - return errnoWrap(c.write(fd, @ptrCast([*]const c_void, buf), nbyte)); + return errnoWrap(c.write(fd, @ptrCast(*const c_void, buf), nbyte)); } pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: u32, fd: i32, offset: isize) usize { - const ptr_result = c.mmap(@ptrCast([*]c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); + const ptr_result = c.mmap(@ptrCast(*c_void, address), length, @bitCast(c_int, c_uint(prot)), @bitCast(c_int, c_uint(flags)), fd, offset); const isize_result = @bitCast(isize, @ptrToInt(ptr_result)); return errnoWrap(isize_result); } pub fn munmap(address: usize, length: usize) usize { - return errnoWrap(c.munmap(@intToPtr([*]c_void, address), length)); + return errnoWrap(c.munmap(@intToPtr(*c_void, address), length)); } pub fn unlink(path: [*]const u8) usize { diff --git a/std/os/file.zig b/std/os/file.zig index 378782507b..d5af55b5e4 100644 --- a/std/os/file.zig +++ b/std/os/file.zig @@ -334,7 +334,7 @@ pub const File = struct { while (index < buffer.len) { const want_read_count = windows.DWORD(math.min(windows.DWORD(@maxValue(windows.DWORD)), buffer.len - index)); var amt_read: windows.DWORD = undefined; - if (windows.ReadFile(self.handle, @ptrCast([*]c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) { + if (windows.ReadFile(self.handle, @ptrCast(*c_void, buffer.ptr + index), want_read_count, &amt_read, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.OPERATION_ABORTED => continue, diff --git a/std/os/index.zig b/std/os/index.zig index 6023929b04..fe5ecc38ba 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -2362,7 +2362,7 @@ pub const Thread = struct { }, builtin.Os.windows => struct { handle: windows.HANDLE, - alloc_start: [*]c_void, + alloc_start: *c_void, heap_handle: windows.HANDLE, }, else => @compileError("Unsupported OS"), @@ -2533,7 +2533,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread // align to page stack_end -= stack_end % os.page_size; - assert(c.pthread_attr_setstack(&attr, @intToPtr([*]c_void, stack_addr), stack_end - stack_addr) == 0); + assert(c.pthread_attr_setstack(&attr, @intToPtr(*c_void, stack_addr), stack_end - stack_addr) == 0); const err = c.pthread_create(&thread_ptr.data.handle, &attr, MainFuncs.posixThreadMain, @intToPtr(*c_void, arg)); switch (err) { diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig index c491ae6538..53e12500e7 100644 --- a/std/os/windows/index.zig +++ b/std/os/windows/index.zig @@ -101,17 +101,17 @@ pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(?*FILETIME) void; pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE; pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; -pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void, dwBytes: SIZE_T) ?[*]c_void; -pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) SIZE_T; -pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]const c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; +pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; +pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL; pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE; -pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?[*]c_void; +pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void; -pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: [*]c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; pub extern "kernel32" stdcallcc fn MoveFileExA( lpExistingFileName: LPCSTR, @@ -127,7 +127,7 @@ pub extern "kernel32" stdcallcc fn PathFileExists(pszPath: ?LPCTSTR) BOOL; pub extern "kernel32" stdcallcc fn ReadFile( in_hFile: HANDLE, - out_lpBuffer: [*]c_void, + out_lpBuffer: *c_void, in_nNumberOfBytesToRead: DWORD, out_lpNumberOfBytesRead: *DWORD, in_out_lpOverlapped: ?*OVERLAPPED, @@ -150,7 +150,7 @@ pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMillis pub extern "kernel32" stdcallcc fn WriteFile( in_hFile: HANDLE, - in_lpBuffer: [*]const c_void, + in_lpBuffer: *const c_void, in_nNumberOfBytesToWrite: DWORD, out_lpNumberOfBytesWritten: ?*DWORD, in_out_lpOverlapped: ?*OVERLAPPED, diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig index 5a40567310..7170346108 100644 --- a/std/os/windows/util.zig +++ b/std/os/windows/util.zig @@ -42,7 +42,7 @@ pub const WriteError = error{ }; pub fn windowsWrite(handle: windows.HANDLE, bytes: []const u8) WriteError!void { - if (windows.WriteFile(handle, @ptrCast([*]const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { + if (windows.WriteFile(handle, @ptrCast(*const c_void, bytes.ptr), u32(bytes.len), null, null) == 0) { const err = windows.GetLastError(); return switch (err) { windows.ERROR.INVALID_USER_BUFFER => WriteError.SystemResources, diff --git a/test/compare_output.zig b/test/compare_output.zig index 8d5dc68d45..eec077ef85 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -284,7 +284,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { cases.addC("expose function pointer to C land", \\const c = @cImport(@cInclude("stdlib.h")); \\ - \\export fn compare_fn(a: ?[*]const c_void, b: ?[*]const c_void) c_int { + \\export fn compare_fn(a: ?*const c_void, b: ?*const c_void) c_int { \\ const a_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), a)); \\ const b_int = @ptrCast(*const i32, @alignCast(@alignOf(i32), b)); \\ if (a_int.* < b_int.*) { @@ -299,7 +299,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\export fn main() c_int { \\ var array = []u32{ 1, 7, 3, 2, 0, 9, 4, 8, 6, 5 }; \\ - \\ c.qsort(@ptrCast(?[*]c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn); + \\ c.qsort(@ptrCast(?*c_void, array[0..].ptr), c_ulong(array.len), @sizeOf(i32), compare_fn); \\ \\ for (array) |item, i| { \\ if (item != i) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 4bd6e9bc24..9cecb859fa 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,13 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "unknown length pointer to opaque", + \\export const T = [*]@OpaqueType(); + , + ".tmp_source.zig:1:18: error: unknown-length pointer to opaque", + ); + cases.add( "error when evaluating return type", \\const Foo = struct { diff --git a/test/translate_c.zig b/test/translate_c.zig index ac0a98e6cc..3489f9da21 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -99,7 +99,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { cases.add("restrict -> noalias", \\void foo(void *restrict bar, void *restrict); , - \\pub extern fn foo(noalias bar: ?[*]c_void, noalias arg1: ?[*]c_void) void; + \\pub extern fn foo(noalias bar: ?*c_void, noalias arg1: ?*c_void) void; ); cases.add("simple struct", @@ -172,7 +172,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub const struct_Foo = @OpaqueType(); , - \\pub extern fn some_func(foo: ?[*]struct_Foo, x: c_int) ?[*]struct_Foo; + \\pub extern fn some_func(foo: ?*struct_Foo, x: c_int) ?*struct_Foo; , \\pub const Foo = struct_Foo; ); @@ -233,7 +233,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { , \\pub const Foo = c_void; , - \\pub extern fn fun(a: ?[*]Foo) Foo; + \\pub extern fn fun(a: ?*Foo) Foo; ); cases.add("generate inline func for #define global extern fn", @@ -505,7 +505,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 6; \\} , - \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { + \\pub export fn and_or_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ if ((a != 0) and (b != 0)) return 0; \\ if ((b != 0) and (c != null)) return 1; \\ if ((a != 0) and (c != null)) return 2; @@ -653,8 +653,8 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return x; \\} , - \\pub export fn foo(x: ?[*]c_ushort) ?[*]c_void { - \\ return @ptrCast(?[*]c_void, x); + \\pub export fn foo(x: ?[*]c_ushort) ?*c_void { + \\ return @ptrCast(?*c_void, x); \\} ); @@ -1173,7 +1173,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return !c; \\} , - \\pub fn foo(a: c_int, b: f32, c: ?[*]c_void) c_int { + \\pub fn foo(a: c_int, b: f32, c: ?*c_void) c_int { \\ return !(a == 0); \\ return !(a != 0); \\ return !(b != 0); @@ -1231,7 +1231,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ B, \\ C, \\}; - \\pub fn if_none_bool(a: c_int, b: f32, c: ?[*]c_void, d: enum_SomeEnum) c_int { + \\pub fn if_none_bool(a: c_int, b: f32, c: ?*c_void, d: enum_SomeEnum) c_int { \\ if (a != 0) return 0; \\ if (b != 0) return 1; \\ if (c != null) return 2; @@ -1248,7 +1248,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn while_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { + \\pub fn while_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; @@ -1264,7 +1264,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ return 3; \\} , - \\pub fn for_none_bool(a: c_int, b: f32, c: ?[*]c_void) c_int { + \\pub fn for_none_bool(a: c_int, b: f32, c: ?*c_void) c_int { \\ while (a != 0) return 0; \\ while (b != 0) return 1; \\ while (c != null) return 2; -- cgit v1.2.3 From 0ccc18686921dce8e7f2feb95eed83b894ca8df4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 5 Jun 2018 20:24:11 -0400 Subject: disable field access for unknown length pointers See #770 --- src/analyze.cpp | 4 ++-- test/compile_errors.zig | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 0adb992798..15f08aa3fe 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3753,13 +3753,13 @@ static bool is_container(TypeTableEntry *type_entry) { } bool is_container_ref(TypeTableEntry *type_entry) { - return (type_entry->id == TypeTableEntryIdPointer) ? + return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ? is_container(type_entry->data.pointer.child_type) : is_container(type_entry); } TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) { assert(is_container_ref(type_entry)); - return (type_entry->id == TypeTableEntryIdPointer) ? + return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ? type_entry->data.pointer.child_type : type_entry; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 9cecb859fa..ab539dd94a 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,19 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "field access of unknown length pointer", + \\const Foo = extern struct { + \\ a: i32, + \\}; + \\ + \\export fn entry(foo: [*]Foo) void { + \\ foo.a += 1; + \\} + , + ".tmp_source.zig:6:8: error: type '[*]Foo' does not support field access", + ); + cases.add( "unknown length pointer to opaque", \\export const T = [*]@OpaqueType(); -- cgit v1.2.3 From d3693dca73dfc726aed32908691437abe614e5cf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 6 Jun 2018 00:39:39 -0400 Subject: Pointer Reform: update @typeInfo * add assertion for trying to do @typeInfo on global error set * remove TypeInfo.Slice * add TypeInfo.Pointer.Size with possible values - One - Many - Slice See #770 --- src/analyze.cpp | 2 +- src/codegen.cpp | 11 ++++--- src/ir.cpp | 80 ++++++++++++++++++++++++++++++------------------ std/fmt/index.zig | 31 +++++++++++-------- test/cases/type_info.zig | 33 +++++++++++++++----- 5 files changed, 102 insertions(+), 55 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 15f08aa3fe..93373f6ec2 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5981,7 +5981,7 @@ size_t type_id_index(TypeTableEntry *entry) { return 7; case TypeTableEntryIdStruct: if (entry->data.structure.is_slice) - return 25; + return 6; return 8; case TypeTableEntryIdComptimeFloat: return 9; diff --git a/src/codegen.cpp b/src/codegen.cpp index a977c34daf..7f95f335d1 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6481,7 +6481,6 @@ static void define_builtin_compile_vars(CodeGen *g) { const TypeTableEntryId id = type_id_at_index(i); buf_appendf(contents, " %s,\n", type_id_name(id)); } - buf_appendf(contents, " Slice,\n"); buf_appendf(contents, "};\n\n"); } { @@ -6494,7 +6493,6 @@ static void define_builtin_compile_vars(CodeGen *g) { " Int: Int,\n" " Float: Float,\n" " Pointer: Pointer,\n" - " Slice: Slice,\n" " Array: Array,\n" " Struct: Struct,\n" " ComptimeFloat: void,\n" @@ -6524,13 +6522,18 @@ static void define_builtin_compile_vars(CodeGen *g) { " };\n" "\n" " pub const Pointer = struct {\n" + " size: Size,\n" " is_const: bool,\n" " is_volatile: bool,\n" " alignment: u32,\n" " child: type,\n" - " };\n" "\n" - " pub const Slice = Pointer;\n" + " pub const Size = enum {\n" + " One,\n" + " Many,\n" + " Slice,\n" + " };\n" + " };\n" "\n" " pub const Array = struct {\n" " len: usize,\n" diff --git a/src/ir.cpp b/src/ir.cpp index a6686aae76..3486e8c047 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -16222,8 +16222,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop return true; } -static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) -{ +static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *type_entry) { assert(type_entry != nullptr); assert(!type_is_invalid(type_entry)); @@ -16248,38 +16247,67 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t enum_field_val->data.x_struct.fields = inner_fields; }; - const auto create_ptr_like_type_info = [ira](const char *name, TypeTableEntry *ptr_type_entry) { + const auto create_ptr_like_type_info = [ira](TypeTableEntry *ptr_type_entry) { + TypeTableEntry *attrs_type; + uint32_t size_enum_index; + if (is_slice(ptr_type_entry)) { + attrs_type = ptr_type_entry->data.structure.fields[slice_ptr_index].type_entry; + size_enum_index = 2; + } else if (ptr_type_entry->id == TypeTableEntryIdPointer) { + attrs_type = ptr_type_entry; + size_enum_index = (ptr_type_entry->data.pointer.ptr_len == PtrLenSingle) ? 0 : 1; + } else { + zig_unreachable(); + } + + TypeTableEntry *type_info_pointer_type = ir_type_info_get_type(ira, "Pointer"); + ensure_complete_type(ira->codegen, type_info_pointer_type); + assert(!type_is_invalid(type_info_pointer_type)); + ConstExprValue *result = create_const_vals(1); result->special = ConstValSpecialStatic; - result->type = ir_type_info_get_type(ira, name); + result->type = type_info_pointer_type; - ConstExprValue *fields = create_const_vals(4); + ConstExprValue *fields = create_const_vals(5); result->data.x_struct.fields = fields; - // is_const: bool - ensure_field_index(result->type, "is_const", 0); + // size: Size + ensure_field_index(result->type, "size", 0); + TypeTableEntry *type_info_pointer_size_type = ir_type_info_get_type(ira, "Size", type_info_pointer_type); + ensure_complete_type(ira->codegen, type_info_pointer_size_type); + assert(!type_is_invalid(type_info_pointer_size_type)); fields[0].special = ConstValSpecialStatic; - fields[0].type = ira->codegen->builtin_types.entry_bool; - fields[0].data.x_bool = ptr_type_entry->data.pointer.is_const; - // is_volatile: bool - ensure_field_index(result->type, "is_volatile", 1); + fields[0].type = type_info_pointer_size_type; + bigint_init_unsigned(&fields[0].data.x_enum_tag, size_enum_index); + + // is_const: bool + ensure_field_index(result->type, "is_const", 1); fields[1].special = ConstValSpecialStatic; fields[1].type = ira->codegen->builtin_types.entry_bool; - fields[1].data.x_bool = ptr_type_entry->data.pointer.is_volatile; - // alignment: u32 - ensure_field_index(result->type, "alignment", 2); + fields[1].data.x_bool = attrs_type->data.pointer.is_const; + // is_volatile: bool + ensure_field_index(result->type, "is_volatile", 2); fields[2].special = ConstValSpecialStatic; - fields[2].type = ira->codegen->builtin_types.entry_u32; - bigint_init_unsigned(&fields[2].data.x_bigint, ptr_type_entry->data.pointer.alignment); - // child: type - ensure_field_index(result->type, "child", 3); + fields[2].type = ira->codegen->builtin_types.entry_bool; + fields[2].data.x_bool = attrs_type->data.pointer.is_volatile; + // alignment: u32 + ensure_field_index(result->type, "alignment", 3); fields[3].special = ConstValSpecialStatic; - fields[3].type = ira->codegen->builtin_types.entry_type; - fields[3].data.x_type = ptr_type_entry->data.pointer.child_type; + fields[3].type = ira->codegen->builtin_types.entry_u32; + bigint_init_unsigned(&fields[3].data.x_bigint, attrs_type->data.pointer.alignment); + // child: type + ensure_field_index(result->type, "child", 4); + fields[4].special = ConstValSpecialStatic; + fields[4].type = ira->codegen->builtin_types.entry_type; + fields[4].data.x_type = attrs_type->data.pointer.child_type; return result; }; + if (type_entry == ira->codegen->builtin_types.entry_global_error_set) { + zig_panic("TODO implement @typeInfo for global error set"); + } + ConstExprValue *result = nullptr; switch (type_entry->id) { @@ -16348,7 +16376,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t } case TypeTableEntryIdPointer: { - result = create_ptr_like_type_info("Pointer", type_entry); + result = create_ptr_like_type_info(type_entry); break; } case TypeTableEntryIdArray: @@ -16621,15 +16649,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t case TypeTableEntryIdStruct: { if (type_entry->data.structure.is_slice) { - Buf ptr_field_name = BUF_INIT; - buf_init_from_str(&ptr_field_name, "ptr"); - TypeTableEntry *ptr_type = type_entry->data.structure.fields_by_name.get(&ptr_field_name)->type_entry; - ensure_complete_type(ira->codegen, ptr_type); - if (type_is_invalid(ptr_type)) - return nullptr; - buf_deinit(&ptr_field_name); - - result = create_ptr_like_type_info("Slice", ptr_type); + result = create_ptr_like_type_info(type_entry); break; } diff --git a/std/fmt/index.zig b/std/fmt/index.zig index 047a154bb8..bbf48df0cf 100644 --- a/std/fmt/index.zig +++ b/std/fmt/index.zig @@ -97,7 +97,11 @@ pub fn formatType( output: fn (@typeOf(context), []const u8) Errors!void, ) Errors!void { const T = @typeOf(value); - switch (@typeId(T)) { + if (T == error) { + try output(context, "error."); + return output(context, @errorName(value)); + } + switch (@typeInfo(T)) { builtin.TypeId.Int, builtin.TypeId.Float => { return formatValue(value, fmt, context, Errors, output); }, @@ -125,12 +129,13 @@ pub fn formatType( try output(context, "error."); return output(context, @errorName(value)); }, - builtin.TypeId.Pointer => { - switch (@typeId(T.Child)) { - builtin.TypeId.Array => { - if (T.Child.Child == u8) { + builtin.TypeId.Pointer => |ptr_info| switch (ptr_info.size) { + builtin.TypeInfo.Pointer.Size.One => switch (@typeInfo(ptr_info.child)) { + builtin.TypeId.Array => |info| { + if (info.child == u8) { return formatText(value, fmt, context, Errors, output); } + return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); }, builtin.TypeId.Enum, builtin.TypeId.Union, builtin.TypeId.Struct => { const has_cust_fmt = comptime cf: { @@ -154,14 +159,16 @@ pub fn formatType( return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); }, else => return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)), - } - }, - else => if (@canImplicitCast([]const u8, value)) { - const casted_value = ([]const u8)(value); - return output(context, casted_value); - } else { - @compileError("Unable to format type '" ++ @typeName(T) ++ "'"); + }, + builtin.TypeInfo.Pointer.Size.Many => { + return format(context, Errors, output, "{}@{x}", @typeName(T.Child), @ptrToInt(value)); + }, + builtin.TypeInfo.Pointer.Size.Slice => { + const casted_value = ([]const u8)(value); + return output(context, casted_value); + }, }, + else => @compileError("Unable to format type '" ++ @typeName(T) ++ "'"), } } diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig index 921ff785a7..b452c8e9f6 100644 --- a/test/cases/type_info.zig +++ b/test/cases/type_info.zig @@ -39,12 +39,28 @@ test "type info: pointer type info" { fn testPointer() void { const u32_ptr_info = @typeInfo(*u32); assert(TypeId(u32_ptr_info) == TypeId.Pointer); + assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.One); assert(u32_ptr_info.Pointer.is_const == false); assert(u32_ptr_info.Pointer.is_volatile == false); - assert(u32_ptr_info.Pointer.alignment == 4); + assert(u32_ptr_info.Pointer.alignment == @alignOf(u32)); assert(u32_ptr_info.Pointer.child == u32); } +test "type info: unknown length pointer type info" { + testUnknownLenPtr(); + comptime testUnknownLenPtr(); +} + +fn testUnknownLenPtr() void { + const u32_ptr_info = @typeInfo([*]const volatile f64); + assert(TypeId(u32_ptr_info) == TypeId.Pointer); + assert(u32_ptr_info.Pointer.size == TypeInfo.Pointer.Size.Many); + assert(u32_ptr_info.Pointer.is_const == true); + assert(u32_ptr_info.Pointer.is_volatile == true); + assert(u32_ptr_info.Pointer.alignment == @alignOf(f64)); + assert(u32_ptr_info.Pointer.child == f64); +} + test "type info: slice type info" { testSlice(); comptime testSlice(); @@ -52,11 +68,12 @@ test "type info: slice type info" { fn testSlice() void { const u32_slice_info = @typeInfo([]u32); - assert(TypeId(u32_slice_info) == TypeId.Slice); - assert(u32_slice_info.Slice.is_const == false); - assert(u32_slice_info.Slice.is_volatile == false); - assert(u32_slice_info.Slice.alignment == 4); - assert(u32_slice_info.Slice.child == u32); + assert(TypeId(u32_slice_info) == TypeId.Pointer); + assert(u32_slice_info.Pointer.size == TypeInfo.Pointer.Size.Slice); + assert(u32_slice_info.Pointer.is_const == false); + assert(u32_slice_info.Pointer.is_volatile == false); + assert(u32_slice_info.Pointer.alignment == 4); + assert(u32_slice_info.Pointer.child == u32); } test "type info: array type info" { @@ -149,11 +166,11 @@ fn testUnion() void { assert(TypeId(typeinfo_info) == TypeId.Union); assert(typeinfo_info.Union.layout == TypeInfo.ContainerLayout.Auto); assert(typeinfo_info.Union.tag_type == TypeId); - assert(typeinfo_info.Union.fields.len == 26); + assert(typeinfo_info.Union.fields.len == 25); assert(typeinfo_info.Union.fields[4].enum_field != null); assert((??typeinfo_info.Union.fields[4].enum_field).value == 4); assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); - assert(typeinfo_info.Union.defs.len == 21); + assert(typeinfo_info.Union.defs.len == 20); const TestNoTagUnion = union { Foo: void, -- cgit v1.2.3 From 31aefa6a2179dfae752020195fb193c6333bae7e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 7 Jun 2018 17:26:41 -0400 Subject: fix structs that contain types which require comptime Now, if a struct has any fields which require comptime, such as `type`, then the struct is marked as requiring comptime as well. Same goes for unions. This means that a function will implicitly be called at comptime if the return type is a struct which contains a field of type `type`. closes #586 --- src/all_types.hpp | 8 ++++ src/analyze.cpp | 23 +++++++++- src/ir.cpp | 112 ++++++++++++++---------------------------------- test/cases/eval.zig | 13 ++++++ test/compile_errors.zig | 2 +- 5 files changed, 77 insertions(+), 81 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/all_types.hpp b/src/all_types.hpp index 3b2ea02b71..b193fe8ae8 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1037,6 +1037,10 @@ struct TypeTableEntryStruct { // whether we've finished resolving it bool complete; + // whether any of the fields require comptime + // the value is not valid until zero_bits_known == true + bool requires_comptime; + bool zero_bits_loop_flag; bool zero_bits_known; uint32_t abi_alignment; // also figured out with zero_bits pass @@ -1105,6 +1109,10 @@ struct TypeTableEntryUnion { // whether we've finished resolving it bool complete; + // whether any of the fields require comptime + // the value is not valid until zero_bits_known == true + bool requires_comptime; + bool zero_bits_loop_flag; bool zero_bits_known; uint32_t abi_alignment; // also figured out with zero_bits pass diff --git a/src/analyze.cpp b/src/analyze.cpp index 93373f6ec2..e05fb23237 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -2533,6 +2533,10 @@ static void resolve_struct_zero_bits(CodeGen *g, TypeTableEntry *struct_type) { continue; } + if (type_requires_comptime(field_type)) { + struct_type->data.structure.requires_comptime = true; + } + if (!type_has_bits(field_type)) continue; @@ -2724,6 +2728,11 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { } union_field->type_entry = field_type; + if (type_requires_comptime(field_type)) { + union_type->data.unionation.requires_comptime = true; + } + + if (field_node->data.struct_field.value != nullptr && !decl_node->data.container_decl.auto_enum) { ErrorMsg *msg = add_node_error(g, field_node->data.struct_field.value, buf_sprintf("non-enum union field assignment")); @@ -4944,17 +4953,29 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdArgTuple: return true; case TypeTableEntryIdArray: + return type_requires_comptime(type_entry->data.array.child_type); case TypeTableEntryIdStruct: + assert(type_has_zero_bits_known(type_entry)); + return type_entry->data.structure.requires_comptime; case TypeTableEntryIdUnion: + assert(type_has_zero_bits_known(type_entry)); + return type_entry->data.unionation.requires_comptime; case TypeTableEntryIdMaybe: + return type_requires_comptime(type_entry->data.maybe.child_type); case TypeTableEntryIdErrorUnion: + return type_requires_comptime(type_entry->data.error_union.payload_type); + case TypeTableEntryIdPointer: + if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) { + return false; + } else { + return type_requires_comptime(type_entry->data.pointer.child_type); + } case TypeTableEntryIdEnum: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: case TypeTableEntryIdBool: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: - case TypeTableEntryIdPointer: case TypeTableEntryIdVoid: case TypeTableEntryIdUnreachable: case TypeTableEntryIdPromise: diff --git a/src/ir.cpp b/src/ir.cpp index 3486e8c047..304127b099 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -11624,61 +11624,6 @@ static TypeTableEntry *ir_analyze_instruction_bin_op(IrAnalyze *ira, IrInstructi zig_unreachable(); } -enum VarClassRequired { - VarClassRequiredAny, - VarClassRequiredConst, - VarClassRequiredIllegal, -}; - -static VarClassRequired get_var_class_required(TypeTableEntry *type_entry) { - switch (type_entry->id) { - case TypeTableEntryIdInvalid: - zig_unreachable(); - case TypeTableEntryIdUnreachable: - return VarClassRequiredIllegal; - case TypeTableEntryIdBool: - case TypeTableEntryIdInt: - case TypeTableEntryIdFloat: - case TypeTableEntryIdVoid: - case TypeTableEntryIdErrorSet: - case TypeTableEntryIdFn: - case TypeTableEntryIdPromise: - return VarClassRequiredAny; - case TypeTableEntryIdComptimeFloat: - case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefined: - case TypeTableEntryIdBlock: - case TypeTableEntryIdNull: - case TypeTableEntryIdOpaque: - case TypeTableEntryIdMetaType: - case TypeTableEntryIdNamespace: - case TypeTableEntryIdBoundFn: - case TypeTableEntryIdArgTuple: - return VarClassRequiredConst; - - case TypeTableEntryIdPointer: - if (type_entry->data.pointer.child_type->id == TypeTableEntryIdOpaque) { - return VarClassRequiredAny; - } else { - return get_var_class_required(type_entry->data.pointer.child_type); - } - case TypeTableEntryIdArray: - return get_var_class_required(type_entry->data.array.child_type); - case TypeTableEntryIdMaybe: - return get_var_class_required(type_entry->data.maybe.child_type); - case TypeTableEntryIdErrorUnion: - return get_var_class_required(type_entry->data.error_union.payload_type); - - case TypeTableEntryIdStruct: - case TypeTableEntryIdEnum: - case TypeTableEntryIdUnion: - // TODO check the fields of these things and make sure that they don't recursively - // contain any of the other variable classes - return VarClassRequiredAny; - } - zig_unreachable(); -} - static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstructionDeclVar *decl_var_instruction) { VariableTableEntry *var = decl_var_instruction->var; @@ -11713,36 +11658,41 @@ static TypeTableEntry *ir_analyze_instruction_decl_var(IrAnalyze *ira, IrInstruc if (type_is_invalid(result_type)) { result_type = ira->codegen->builtin_types.entry_invalid; } else { - switch (get_var_class_required(result_type)) { - case VarClassRequiredIllegal: + type_ensure_zero_bits_known(ira->codegen, result_type); + if (type_is_invalid(result_type)) { + result_type = ira->codegen->builtin_types.entry_invalid; + } + } + + if (!type_is_invalid(result_type)) { + if (result_type->id == TypeTableEntryIdUnreachable || + result_type->id == TypeTableEntryIdOpaque) + { + ir_add_error_node(ira, source_node, + buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name))); + result_type = ira->codegen->builtin_types.entry_invalid; + } else if (type_requires_comptime(result_type)) { + var_class_requires_const = true; + if (!var->src_is_const && !is_comptime_var) { ir_add_error_node(ira, source_node, - buf_sprintf("variable of type '%s' not allowed", buf_ptr(&result_type->name))); + buf_sprintf("variable of type '%s' must be const or comptime", + buf_ptr(&result_type->name))); result_type = ira->codegen->builtin_types.entry_invalid; - break; - case VarClassRequiredConst: + } + } else { + if (casted_init_value->value.special == ConstValSpecialStatic && + casted_init_value->value.type->id == TypeTableEntryIdFn && + casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) + { var_class_requires_const = true; if (!var->src_is_const && !is_comptime_var) { - ir_add_error_node(ira, source_node, - buf_sprintf("variable of type '%s' must be const or comptime", - buf_ptr(&result_type->name))); + ErrorMsg *msg = ir_add_error_node(ira, source_node, + buf_sprintf("functions marked inline must be stored in const or comptime var")); + AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node; + add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); result_type = ira->codegen->builtin_types.entry_invalid; } - break; - case VarClassRequiredAny: - if (casted_init_value->value.special == ConstValSpecialStatic && - casted_init_value->value.type->id == TypeTableEntryIdFn && - casted_init_value->value.data.x_ptr.data.fn.fn_entry->fn_inline == FnInlineAlways) - { - var_class_requires_const = true; - if (!var->src_is_const && !is_comptime_var) { - ErrorMsg *msg = ir_add_error_node(ira, source_node, - buf_sprintf("functions marked inline must be stored in const or comptime var")); - AstNode *proto_node = casted_init_value->value.data.x_ptr.data.fn.fn_entry->proto_node; - add_error_note(ira->codegen, msg, proto_node, buf_sprintf("declared here")); - result_type = ira->codegen->builtin_types.entry_invalid; - } - } - break; + } } } @@ -12623,6 +12573,10 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal inst_fn_type_id.return_type = specified_return_type; } + type_ensure_zero_bits_known(ira->codegen, specified_return_type); + if (type_is_invalid(specified_return_type)) + return ira->codegen->builtin_types.entry_invalid; + if (type_requires_comptime(specified_return_type)) { // Throw out our work and call the function as if it were comptime. return ir_analyze_fn_call(ira, call_instruction, fn_entry, fn_type, fn_ref, first_arg_ptr, true, FnInlineAuto); diff --git a/test/cases/eval.zig b/test/cases/eval.zig index 461408afea..9612466a86 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -610,3 +610,16 @@ test "slice of type" { } } } + +const Wrapper = struct { + T: type, +}; + +fn wrap(comptime T: type) Wrapper { + return Wrapper{ .T = T }; +} + +test "function which returns struct with type field causes implicit comptime" { + const ty = wrap(i32).T; + assert(ty == i32); +} diff --git a/test/compile_errors.zig b/test/compile_errors.zig index c995cd679e..102c4e428d 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3329,7 +3329,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ".tmp_source.zig:9:4: error: variable of type 'comptime_float' must be const or comptime", ".tmp_source.zig:10:4: error: variable of type '(block)' must be const or comptime", ".tmp_source.zig:11:4: error: variable of type '(null)' must be const or comptime", - ".tmp_source.zig:12:4: error: variable of type 'Opaque' must be const or comptime", + ".tmp_source.zig:12:4: error: variable of type 'Opaque' not allowed", ".tmp_source.zig:13:4: error: variable of type 'type' must be const or comptime", ".tmp_source.zig:14:4: error: variable of type '(namespace)' must be const or comptime", ".tmp_source.zig:15:4: error: variable of type '(bound fn(*const Foo) void)' must be const or comptime", -- cgit v1.2.3 From bf3d1c1aab336c4a650bb67dcaca132d4a0f6164 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Fri, 8 Jun 2018 09:21:31 +0200 Subject: Allow access of array.len through a pointer --- src/analyze.cpp | 14 ++++++++++++-- src/analyze.hpp | 2 ++ src/ir.cpp | 8 ++++++-- test/cases/array.zig | 10 +++++++++- 4 files changed, 29 insertions(+), 5 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index e05fb23237..84f1473ea1 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3761,14 +3761,24 @@ static bool is_container(TypeTableEntry *type_entry) { zig_unreachable(); } +bool is_ref(TypeTableEntry *type_entry) { + return type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle; +} + +bool is_array_ref(TypeTableEntry *type_entry) { + TypeTableEntry *array = is_ref(type_entry) ? + type_entry->data.pointer.child_type : type_entry; + return array->id == TypeTableEntryIdArray; +} + bool is_container_ref(TypeTableEntry *type_entry) { - return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ? + return is_ref(type_entry) ? is_container(type_entry->data.pointer.child_type) : is_container(type_entry); } TypeTableEntry *container_ref_type(TypeTableEntry *type_entry) { assert(is_container_ref(type_entry)); - return (type_entry->id == TypeTableEntryIdPointer && type_entry->data.pointer.ptr_len == PtrLenSingle) ? + return is_ref(type_entry) ? type_entry->data.pointer.child_type : type_entry; } diff --git a/src/analyze.hpp b/src/analyze.hpp index 25bda198d6..88e06b2390 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -70,6 +70,8 @@ TypeUnionField *find_union_type_field(TypeTableEntry *type_entry, Buf *name); TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag); TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt *tag); +bool is_ref(TypeTableEntry *type_entry); +bool is_array_ref(TypeTableEntry *type_entry); bool is_container_ref(TypeTableEntry *type_entry); void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node); void scan_import(CodeGen *g, ImportTableEntry *import); diff --git a/src/ir.cpp b/src/ir.cpp index cc4ffb44a9..4766bff5e7 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -13846,10 +13846,14 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru ir_link_new_instruction(result, &field_ptr_instruction->base); return result->value.type; } - } else if (container_type->id == TypeTableEntryIdArray) { + } else if (is_array_ref(container_type)) { if (buf_eql_str(field_name, "len")) { ConstExprValue *len_val = create_const_vals(1); - init_const_usize(ira->codegen, len_val, container_type->data.array.len); + if (container_type->id == TypeTableEntryIdPointer) { + init_const_usize(ira->codegen, len_val, container_type->data.pointer.child_type->data.array.len); + } else { + init_const_usize(ira->codegen, len_val, container_type->data.array.len); + } TypeTableEntry *usize = ira->codegen->builtin_types.entry_usize; bool ptr_is_const = true; diff --git a/test/cases/array.zig b/test/cases/array.zig index ef919b27bd..b481261b4f 100644 --- a/test/cases/array.zig +++ b/test/cases/array.zig @@ -116,6 +116,15 @@ test "array len property" { assert(@typeOf(x).len == 5); } +test "array len field" { + var arr = [4]u8{ 0, 0, 0, 0 }; + var ptr = &arr; + assert(arr.len == 4); + comptime assert(arr.len == 4); + assert(ptr.len == 4); + comptime assert(ptr.len == 4); +} + test "single-item pointer to array indexing and slicing" { testSingleItemPtrArrayIndexSlice(); comptime testSingleItemPtrArrayIndexSlice(); @@ -143,4 +152,3 @@ fn testImplicitCastSingleItemPtr() void { slice[0] += 1; assert(byte == 101); } - -- cgit v1.2.3 From 6edd81109d16178f1dc688dacee4b38964b617c4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 9 Jun 2018 00:15:23 -0400 Subject: nullable pointers follow const-casting rules any *T -> ?*T cast is allowed implicitly, even when it occurs deep inside the type, and the cast is a no-op at runtime. in order to add this I had to make the comptime value representation of nullable pointers the same as the comptime value representation of normal pointers, so that we don't have to do any recursive transformation of values when doing this kind of cast. --- src/all_types.hpp | 5 +- src/analyze.cpp | 280 ++++++++++++++++++++++++++++------------------------ src/codegen.cpp | 158 +++++++++++++++-------------- src/ir.cpp | 121 +++++++++++++++-------- test/cases/cast.zig | 10 +- 5 files changed, 322 insertions(+), 252 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/all_types.hpp b/src/all_types.hpp index c671682363..14a44ea768 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -144,6 +144,9 @@ enum ConstPtrSpecial { // understand the value of pointee at compile time. However, we will still // emit a binary with a compile time known address. // In this case index is the numeric address value. + // We also use this for null pointer. We need the data layout for ConstCastOnly == true + // types to be the same, so all nullables of pointer types use x_ptr + // instead of x_nullable ConstPtrSpecialHardCodedAddr, // This means that the pointer represents memory of assigning to _. // That is, storing discards the data, and loading is invalid. @@ -251,7 +254,7 @@ struct ConstExprValue { bool x_bool; ConstBoundFnValue x_bound_fn; TypeTableEntry *x_type; - ConstExprValue *x_maybe; + ConstExprValue *x_nullable; ConstErrValue x_err_union; ErrorTableEntry *x_err_set; BigInt x_enum_tag; diff --git a/src/analyze.cpp b/src/analyze.cpp index 84f1473ea1..16b2cb0590 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4578,6 +4578,52 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) { return true; } +static uint32_t hash_const_val_ptr(ConstExprValue *const_val) { + uint32_t hash_val = 0; + switch (const_val->data.x_ptr.mut) { + case ConstPtrMutRuntimeVar: + hash_val += (uint32_t)3500721036; + break; + case ConstPtrMutComptimeConst: + hash_val += (uint32_t)4214318515; + break; + case ConstPtrMutComptimeVar: + hash_val += (uint32_t)1103195694; + break; + } + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + hash_val += (uint32_t)2478261866; + hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee); + return hash_val; + case ConstPtrSpecialBaseArray: + hash_val += (uint32_t)1764906839; + hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val); + hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index); + hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492; + return hash_val; + case ConstPtrSpecialBaseStruct: + hash_val += (uint32_t)3518317043; + hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val); + hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index); + return hash_val; + case ConstPtrSpecialHardCodedAddr: + hash_val += (uint32_t)4048518294; + hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr); + return hash_val; + case ConstPtrSpecialDiscard: + hash_val += 2010123162; + return hash_val; + case ConstPtrSpecialFunction: + hash_val += (uint32_t)2590901619; + hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); + return hash_val; + } + zig_unreachable(); +} + static uint32_t hash_const_val(ConstExprValue *const_val) { assert(const_val->special == ConstValSpecialStatic); switch (const_val->type->id) { @@ -4646,51 +4692,7 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction); return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); case TypeTableEntryIdPointer: - { - uint32_t hash_val = 0; - switch (const_val->data.x_ptr.mut) { - case ConstPtrMutRuntimeVar: - hash_val += (uint32_t)3500721036; - break; - case ConstPtrMutComptimeConst: - hash_val += (uint32_t)4214318515; - break; - case ConstPtrMutComptimeVar: - hash_val += (uint32_t)1103195694; - break; - } - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - hash_val += (uint32_t)2478261866; - hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee); - return hash_val; - case ConstPtrSpecialBaseArray: - hash_val += (uint32_t)1764906839; - hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val); - hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index); - hash_val += const_val->data.x_ptr.data.base_array.is_cstr ? 1297263887 : 200363492; - return hash_val; - case ConstPtrSpecialBaseStruct: - hash_val += (uint32_t)3518317043; - hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val); - hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index); - return hash_val; - case ConstPtrSpecialHardCodedAddr: - hash_val += (uint32_t)4048518294; - hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr); - return hash_val; - case ConstPtrSpecialDiscard: - hash_val += 2010123162; - return hash_val; - case ConstPtrSpecialFunction: - hash_val += (uint32_t)2590901619; - hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry); - return hash_val; - } - zig_unreachable(); - } + return hash_const_val_ptr(const_val); case TypeTableEntryIdPromise: // TODO better hashing algorithm return 223048345; @@ -4708,10 +4710,14 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { // TODO better hashing algorithm return 2709806591; case TypeTableEntryIdMaybe: - if (const_val->data.x_maybe) { - return hash_const_val(const_val->data.x_maybe) * 1992916303; + if (get_codegen_ptr_type(const_val->type) != nullptr) { + return hash_const_val(const_val) * 1992916303; } else { - return 4016830364; + if (const_val->data.x_nullable) { + return hash_const_val(const_val->data.x_nullable) * 1992916303; + } else { + return 4016830364; + } } case TypeTableEntryIdErrorUnion: // TODO better hashing algorithm @@ -4812,9 +4818,11 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { return false; case TypeTableEntryIdMaybe: - if (value->data.x_maybe == nullptr) + if (get_codegen_ptr_type(value->type) != nullptr) + return value->data.x_ptr.mut == ConstPtrMutComptimeVar; + if (value->data.x_nullable == nullptr) return false; - return can_mutate_comptime_var_state(value->data.x_maybe); + return can_mutate_comptime_var_state(value->data.x_nullable); case TypeTableEntryIdErrorUnion: if (value->data.x_err_union.err != nullptr) @@ -5340,6 +5348,52 @@ bool ir_get_var_is_comptime(VariableTableEntry *var) { return var->is_comptime->value.data.x_bool; } +bool const_values_equal_ptr(ConstExprValue *a, ConstExprValue *b) { + if (a->data.x_ptr.special != b->data.x_ptr.special) + return false; + if (a->data.x_ptr.mut != b->data.x_ptr.mut) + return false; + switch (a->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee) + return false; + return true; + case ConstPtrSpecialBaseArray: + if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val && + a->data.x_ptr.data.base_array.array_val->global_refs != + b->data.x_ptr.data.base_array.array_val->global_refs) + { + return false; + } + if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index) + return false; + if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr) + return false; + return true; + case ConstPtrSpecialBaseStruct: + if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val && + a->data.x_ptr.data.base_struct.struct_val->global_refs != + b->data.x_ptr.data.base_struct.struct_val->global_refs) + { + return false; + } + if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index) + return false; + return true; + case ConstPtrSpecialHardCodedAddr: + if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr) + return false; + return true; + case ConstPtrSpecialDiscard: + return true; + case ConstPtrSpecialFunction: + return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry; + } + zig_unreachable(); +} + bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { assert(a->type->id == b->type->id); assert(a->special == ConstValSpecialStatic); @@ -5391,49 +5445,7 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { return bigint_cmp(&a->data.x_bigint, &b->data.x_bigint) == CmpEQ; case TypeTableEntryIdPointer: case TypeTableEntryIdFn: - if (a->data.x_ptr.special != b->data.x_ptr.special) - return false; - if (a->data.x_ptr.mut != b->data.x_ptr.mut) - return false; - switch (a->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - if (a->data.x_ptr.data.ref.pointee != b->data.x_ptr.data.ref.pointee) - return false; - return true; - case ConstPtrSpecialBaseArray: - if (a->data.x_ptr.data.base_array.array_val != b->data.x_ptr.data.base_array.array_val && - a->data.x_ptr.data.base_array.array_val->global_refs != - b->data.x_ptr.data.base_array.array_val->global_refs) - { - return false; - } - if (a->data.x_ptr.data.base_array.elem_index != b->data.x_ptr.data.base_array.elem_index) - return false; - if (a->data.x_ptr.data.base_array.is_cstr != b->data.x_ptr.data.base_array.is_cstr) - return false; - return true; - case ConstPtrSpecialBaseStruct: - if (a->data.x_ptr.data.base_struct.struct_val != b->data.x_ptr.data.base_struct.struct_val && - a->data.x_ptr.data.base_struct.struct_val->global_refs != - b->data.x_ptr.data.base_struct.struct_val->global_refs) - { - return false; - } - if (a->data.x_ptr.data.base_struct.field_index != b->data.x_ptr.data.base_struct.field_index) - return false; - return true; - case ConstPtrSpecialHardCodedAddr: - if (a->data.x_ptr.data.hard_coded_addr.addr != b->data.x_ptr.data.hard_coded_addr.addr) - return false; - return true; - case ConstPtrSpecialDiscard: - return true; - case ConstPtrSpecialFunction: - return a->data.x_ptr.data.fn.fn_entry == b->data.x_ptr.data.fn.fn_entry; - } - zig_unreachable(); + return const_values_equal_ptr(a, b); case TypeTableEntryIdArray: zig_panic("TODO"); case TypeTableEntryIdStruct: @@ -5449,10 +5461,12 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { case TypeTableEntryIdNull: zig_panic("TODO"); case TypeTableEntryIdMaybe: - if (a->data.x_maybe == nullptr || b->data.x_maybe == nullptr) { - return (a->data.x_maybe == nullptr && b->data.x_maybe == nullptr); + if (get_codegen_ptr_type(a->type) != nullptr) + return const_values_equal_ptr(a, b); + if (a->data.x_nullable == nullptr || b->data.x_nullable == nullptr) { + return (a->data.x_nullable == nullptr && b->data.x_nullable == nullptr); } else { - return const_values_equal(a->data.x_maybe, b->data.x_maybe); + return const_values_equal(a->data.x_nullable, b->data.x_nullable); } case TypeTableEntryIdErrorUnion: zig_panic("TODO"); @@ -5525,6 +5539,41 @@ void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue * } } +void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeTableEntry *type_entry) { + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + case ConstPtrSpecialBaseStruct: + buf_appendf(buf, "*"); + render_const_value(g, buf, const_ptr_pointee(g, const_val)); + return; + case ConstPtrSpecialBaseArray: + if (const_val->data.x_ptr.data.base_array.is_cstr) { + buf_appendf(buf, "*(c str lit)"); + return; + } else { + buf_appendf(buf, "*"); + render_const_value(g, buf, const_ptr_pointee(g, const_val)); + return; + } + case ConstPtrSpecialHardCodedAddr: + buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name), + const_val->data.x_ptr.data.hard_coded_addr.addr); + return; + case ConstPtrSpecialDiscard: + buf_append_str(buf, "*_"); + return; + case ConstPtrSpecialFunction: + { + FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry; + buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name)); + return; + } + } + zig_unreachable(); +} + void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { switch (const_val->special) { case ConstValSpecialRuntime: @@ -5601,38 +5650,7 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { return; } case TypeTableEntryIdPointer: - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - case ConstPtrSpecialBaseStruct: - buf_appendf(buf, "&"); - render_const_value(g, buf, const_ptr_pointee(g, const_val)); - return; - case ConstPtrSpecialBaseArray: - if (const_val->data.x_ptr.data.base_array.is_cstr) { - buf_appendf(buf, "&(c str lit)"); - return; - } else { - buf_appendf(buf, "&"); - render_const_value(g, buf, const_ptr_pointee(g, const_val)); - return; - } - case ConstPtrSpecialHardCodedAddr: - buf_appendf(buf, "(&%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name), - const_val->data.x_ptr.data.hard_coded_addr.addr); - return; - case ConstPtrSpecialDiscard: - buf_append_str(buf, "&_"); - return; - case ConstPtrSpecialFunction: - { - FnTableEntry *fn_entry = const_val->data.x_ptr.data.fn.fn_entry; - buf_appendf(buf, "@ptrCast(%s, %s)", buf_ptr(&const_val->type->name), buf_ptr(&fn_entry->symbol_name)); - return; - } - } - zig_unreachable(); + return render_const_val_ptr(g, buf, const_val, type_entry); case TypeTableEntryIdBlock: { AstNode *node = const_val->data.x_block->source_node; @@ -5692,8 +5710,10 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { } case TypeTableEntryIdMaybe: { - if (const_val->data.x_maybe) { - render_const_value(g, buf, const_val->data.x_maybe); + if (get_codegen_ptr_type(const_val->type) != nullptr) + return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type); + if (const_val->data.x_nullable) { + render_const_value(g, buf, const_val->data.x_nullable); } else { buf_appendf(buf, "null"); } diff --git a/src/codegen.cpp b/src/codegen.cpp index fab2ad659e..65b465a519 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5020,6 +5020,79 @@ static bool is_llvm_value_unnamed_type(TypeTableEntry *type_entry, LLVMValueRef return LLVMTypeOf(val) != type_entry->type_ref; } +static LLVMValueRef gen_const_val_ptr(CodeGen *g, ConstExprValue *const_val, const char *name) { + render_const_val_global(g, const_val, name); + switch (const_val->data.x_ptr.special) { + case ConstPtrSpecialInvalid: + case ConstPtrSpecialDiscard: + zig_unreachable(); + case ConstPtrSpecialRef: + { + ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee; + render_const_val(g, pointee, ""); + render_const_val_global(g, pointee, ""); + ConstExprValue *other_val = pointee; + const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + case ConstPtrSpecialBaseArray: + { + ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val; + size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index; + assert(array_const_val->type->id == TypeTableEntryIdArray); + if (array_const_val->type->zero_bits) { + // make this a null pointer + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val, + elem_index); + LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); + const_val->global_refs->llvm_value = ptr_val; + render_const_val_global(g, const_val, ""); + return ptr_val; + } + case ConstPtrSpecialBaseStruct: + { + ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val; + assert(struct_const_val->type->id == TypeTableEntryIdStruct); + if (struct_const_val->type->zero_bits) { + // make this a null pointer + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index; + size_t gen_field_index = + struct_const_val->type->data.structure.fields[src_field_index].gen_index; + LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val, + gen_field_index); + LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); + const_val->global_refs->llvm_value = ptr_val; + render_const_val_global(g, const_val, ""); + return ptr_val; + } + case ConstPtrSpecialHardCodedAddr: + { + uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr; + TypeTableEntry *usize = g->builtin_types.entry_usize; + const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false), + const_val->type->type_ref); + render_const_val_global(g, const_val, ""); + return const_val->global_refs->llvm_value; + } + case ConstPtrSpecialFunction: + return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref); + } + zig_unreachable(); +} + static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const char *name) { TypeTableEntry *type_entry = const_val->type; assert(!type_entry->zero_bits); @@ -5068,19 +5141,15 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c { TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { - return LLVMConstInt(LLVMInt1Type(), const_val->data.x_maybe ? 1 : 0, false); + return LLVMConstInt(LLVMInt1Type(), const_val->data.x_nullable ? 1 : 0, false); } else if (type_is_codegen_pointer(child_type)) { - if (const_val->data.x_maybe) { - return gen_const_val(g, const_val->data.x_maybe, ""); - } else { - return LLVMConstNull(child_type->type_ref); - } + return gen_const_val_ptr(g, const_val, name); } else { LLVMValueRef child_val; LLVMValueRef maybe_val; bool make_unnamed_struct; - if (const_val->data.x_maybe) { - child_val = gen_const_val(g, const_val->data.x_maybe, ""); + if (const_val->data.x_nullable) { + child_val = gen_const_val(g, const_val->data.x_nullable, ""); maybe_val = LLVMConstAllOnes(LLVMInt1Type()); make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val); @@ -5270,78 +5339,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst); return fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry); case TypeTableEntryIdPointer: - { - render_const_val_global(g, const_val, name); - switch (const_val->data.x_ptr.special) { - case ConstPtrSpecialInvalid: - case ConstPtrSpecialDiscard: - zig_unreachable(); - case ConstPtrSpecialRef: - { - ConstExprValue *pointee = const_val->data.x_ptr.data.ref.pointee; - render_const_val(g, pointee, ""); - render_const_val_global(g, pointee, ""); - ConstExprValue *other_val = pointee; - const_val->global_refs->llvm_value = LLVMConstBitCast(other_val->global_refs->llvm_global, const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - case ConstPtrSpecialBaseArray: - { - ConstExprValue *array_const_val = const_val->data.x_ptr.data.base_array.array_val; - size_t elem_index = const_val->data.x_ptr.data.base_array.elem_index; - assert(array_const_val->type->id == TypeTableEntryIdArray); - if (array_const_val->type->zero_bits) { - // make this a null pointer - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - LLVMValueRef uncasted_ptr_val = gen_const_ptr_array_recursive(g, array_const_val, - elem_index); - LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); - const_val->global_refs->llvm_value = ptr_val; - render_const_val_global(g, const_val, ""); - return ptr_val; - } - case ConstPtrSpecialBaseStruct: - { - ConstExprValue *struct_const_val = const_val->data.x_ptr.data.base_struct.struct_val; - assert(struct_const_val->type->id == TypeTableEntryIdStruct); - if (struct_const_val->type->zero_bits) { - // make this a null pointer - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstNull(usize->type_ref), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - size_t src_field_index = const_val->data.x_ptr.data.base_struct.field_index; - size_t gen_field_index = - struct_const_val->type->data.structure.fields[src_field_index].gen_index; - LLVMValueRef uncasted_ptr_val = gen_const_ptr_struct_recursive(g, struct_const_val, - gen_field_index); - LLVMValueRef ptr_val = LLVMConstBitCast(uncasted_ptr_val, const_val->type->type_ref); - const_val->global_refs->llvm_value = ptr_val; - render_const_val_global(g, const_val, ""); - return ptr_val; - } - case ConstPtrSpecialHardCodedAddr: - { - uint64_t addr_value = const_val->data.x_ptr.data.hard_coded_addr.addr; - TypeTableEntry *usize = g->builtin_types.entry_usize; - const_val->global_refs->llvm_value = LLVMConstIntToPtr(LLVMConstInt(usize->type_ref, addr_value, false), - const_val->type->type_ref); - render_const_val_global(g, const_val, ""); - return const_val->global_refs->llvm_value; - } - case ConstPtrSpecialFunction: - return LLVMConstBitCast(fn_llvm_value(g, const_val->data.x_ptr.data.fn.fn_entry), const_val->type->type_ref); - } - } - zig_unreachable(); + return gen_const_val_ptr(g, const_val, name); case TypeTableEntryIdErrorUnion: { TypeTableEntry *payload_type = type_entry->data.error_union.payload_type; diff --git a/src/ir.cpp b/src/ir.cpp index e62ec71875..13ecfd4233 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -62,6 +62,7 @@ enum ConstCastResultId { ConstCastResultIdType, ConstCastResultIdUnresolvedInferredErrSet, ConstCastResultIdAsyncAllocatorType, + ConstCastResultIdNullWrapPtr, }; struct ConstCastErrSetMismatch { @@ -90,6 +91,7 @@ struct ConstCastOnly { ConstCastOnly *error_union_error_set; ConstCastOnly *return_type; ConstCastOnly *async_allocator_type; + ConstCastOnly *null_wrap_ptr_child; ConstCastArg fn_arg; ConstCastArgNoAlias arg_no_alias; } data; @@ -7660,6 +7662,21 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry if (expected_type == actual_type) return result; + // * and [*] can do a const-cast-only to ?* and ?[*], respectively + if (expected_type->id == TypeTableEntryIdMaybe && + expected_type->data.maybe.child_type->id == TypeTableEntryIdPointer && + actual_type->id == TypeTableEntryIdPointer) + { + ConstCastOnly child = types_match_const_cast_only(ira, + expected_type->data.maybe.child_type, actual_type, source_node); + if (child.id != ConstCastResultIdOk) { + result.id = ConstCastResultIdNullWrapPtr; + result.data.null_wrap_ptr_child = allocate_nonzero(1); + *result.data.null_wrap_ptr_child = child; + } + return result; + } + // pointer const if (expected_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer && @@ -8741,7 +8758,8 @@ static void eval_const_expr_implicit_cast(CastOp cast_op, zig_panic("TODO"); case CastOpNoop: { - copy_const_val(const_val, other_val, other_val->special == ConstValSpecialStatic); + bool same_global_refs = other_val->special == ConstValSpecialStatic; + copy_const_val(const_val, other_val, same_global_refs); const_val->type = new_type; break; } @@ -9189,9 +9207,13 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, source_instr->scope, source_instr->source_node); - const_instruction->base.value.type = wanted_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_maybe = val; + if (get_codegen_ptr_type(wanted_type) != nullptr) { + copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst); + } else { + const_instruction->base.value.data.x_nullable = val; + } + const_instruction->base.value.type = wanted_type; return &const_instruction->base; } @@ -9346,9 +9368,14 @@ static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *so assert(val); IrInstructionConst *const_instruction = ir_create_instruction(&ira->new_irb, source_instr->scope, source_instr->source_node); - const_instruction->base.value.type = wanted_type; const_instruction->base.value.special = ConstValSpecialStatic; - const_instruction->base.value.data.x_maybe = nullptr; + if (get_codegen_ptr_type(wanted_type) != nullptr) { + const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr; + const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0; + } else { + const_instruction->base.value.data.x_nullable = nullptr; + } + const_instruction->base.value.type = wanted_type; return &const_instruction->base; } @@ -10062,7 +10089,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } - // explicit cast from child type of maybe type to maybe type + // explicit cast from T to ?T + // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism if (wanted_type->id == TypeTableEntryIdMaybe) { TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) { @@ -10113,7 +10141,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } } - // explicit cast from [N]T to %[]const T + // explicit cast from [N]T to E![]const T if (wanted_type->id == TypeTableEntryIdErrorUnion && is_slice(wanted_type->data.error_union.payload_type) && actual_type->id == TypeTableEntryIdArray) @@ -10143,7 +10171,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst return ir_analyze_err_wrap_code(ira, source_instr, value, wanted_type); } - // explicit cast from T to %?T + // explicit cast from T to E!?T if (wanted_type->id == TypeTableEntryIdErrorUnion && wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe && actual_type->id != TypeTableEntryIdMaybe) @@ -10167,7 +10195,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } // explicit cast from number literal to another type - // explicit cast from number literal to &const integer + // explicit cast from number literal to *const integer if (actual_type->id == TypeTableEntryIdComptimeFloat || actual_type->id == TypeTableEntryIdComptimeInt) { @@ -10391,6 +10419,7 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc IrInstruction *result = ir_create_const(&ira->new_irb, source_instruction->scope, source_instruction->source_node, child_type); copy_const_val(&result->value, pointee, ptr->value.data.x_ptr.mut == ConstPtrMutComptimeConst); + result->value.type = child_type; return result; } } @@ -10708,6 +10737,16 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) { } } +static bool nullable_value_is_null(ConstExprValue *val) { + assert(val->special == ConstValSpecialStatic); + if (get_codegen_ptr_type(val->type) != nullptr) { + return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr && + val->data.x_ptr.data.hard_coded_addr.addr == 0; + } else { + return val->data.x_nullable == nullptr; + } +} + static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp *bin_op_instruction) { IrInstruction *op1 = bin_op_instruction->op1->other; IrInstruction *op2 = bin_op_instruction->op2->other; @@ -10737,7 +10776,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad); if (!maybe_val) return ira->codegen->builtin_types.entry_invalid; - bool is_null = (maybe_val->data.x_maybe == nullptr); + bool is_null = nullable_value_is_null(maybe_val); ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null; return ira->codegen->builtin_types.entry_bool; @@ -12015,7 +12054,9 @@ static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_maybe = nullptr; + assert(get_codegen_ptr_type(nullable_type) != nullptr); + out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr; + out_val->data.x_ptr.data.hard_coded_addr.addr = 0; return nullable_type; } IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, @@ -14207,6 +14248,9 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru static TypeTableEntry *ir_analyze_instruction_load_ptr(IrAnalyze *ira, IrInstructionLoadPtr *load_ptr_instruction) { IrInstruction *ptr = load_ptr_instruction->ptr->other; + if (type_is_invalid(ptr->value.type)) + return ira->codegen->builtin_types.entry_invalid; + IrInstruction *result = ir_get_deref(ira, &load_ptr_instruction->base, ptr); ir_link_new_instruction(result, &load_ptr_instruction->base); assert(result->value.type); @@ -14773,7 +14817,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_bool = (maybe_val->data.x_maybe != nullptr); + out_val->data.x_bool = !nullable_value_is_null(maybe_val); return ira->codegen->builtin_types.entry_bool; } @@ -14837,13 +14881,18 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val); if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - if (!maybe_val->data.x_maybe) { + if (nullable_value_is_null(maybe_val)) { ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null")); return ira->codegen->builtin_types.entry_invalid; } ConstExprValue *out_val = ir_build_const_from(ira, &unwrap_maybe_instruction->base); out_val->data.x_ptr.special = ConstPtrSpecialRef; - out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_maybe; + out_val->data.x_ptr.mut = val->data.x_ptr.mut; + if (type_is_codegen_pointer(child_type)) { + out_val->data.x_ptr.data.ref.pointee = maybe_val; + } else { + out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_nullable; + } return result_type; } } @@ -16206,12 +16255,12 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop 0, 0); fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) { - fn_def_fields[6].data.x_maybe = create_const_vals(1); + fn_def_fields[6].data.x_nullable = create_const_vals(1); ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name); - init_const_slice(ira->codegen, fn_def_fields[6].data.x_maybe, lib_name, 0, buf_len(fn_node->lib_name), true); + init_const_slice(ira->codegen, fn_def_fields[6].data.x_nullable, lib_name, 0, buf_len(fn_node->lib_name), true); + } else { + fn_def_fields[6].data.x_nullable = nullptr; } - else - fn_def_fields[6].data.x_maybe = nullptr; // return_type: type ensure_field_index(fn_def_val->type, "return_type", 7); fn_def_fields[7].special = ConstValSpecialStatic; @@ -16664,8 +16713,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t TypeTableEntry *type_info_enum_field_type = ir_type_info_get_type(ira, "EnumField"); - for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) - { + for (uint32_t union_field_index = 0; union_field_index < union_field_count; union_field_index++) { TypeUnionField *union_field = &type_entry->data.unionation.fields[union_field_index]; ConstExprValue *union_field_val = &union_field_array->data.x_array.s_none.elements[union_field_index]; @@ -16676,12 +16724,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t inner_fields[1].special = ConstValSpecialStatic; inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type); - if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) - inner_fields[1].data.x_maybe = nullptr; - else - { - inner_fields[1].data.x_maybe = create_const_vals(1); - make_enum_field_val(inner_fields[1].data.x_maybe, union_field->enum_field, type_info_enum_field_type); + if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) { + inner_fields[1].data.x_nullable = nullptr; + } else { + inner_fields[1].data.x_nullable = create_const_vals(1); + make_enum_field_val(inner_fields[1].data.x_nullable, union_field->enum_field, type_info_enum_field_type); } inner_fields[2].special = ConstValSpecialStatic; @@ -16737,8 +16784,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t init_const_slice(ira->codegen, &fields[1], struct_field_array, 0, struct_field_count, false); - for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) - { + for (uint32_t struct_field_index = 0; struct_field_index < struct_field_count; struct_field_index++) { TypeStructField *struct_field = &type_entry->data.structure.fields[struct_field_index]; ConstExprValue *struct_field_val = &struct_field_array->data.x_array.s_none.elements[struct_field_index]; @@ -16749,15 +16795,14 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t inner_fields[1].special = ConstValSpecialStatic; inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize); - if (!type_has_bits(struct_field->type_entry)) - inner_fields[1].data.x_maybe = nullptr; - else - { + if (!type_has_bits(struct_field->type_entry)) { + inner_fields[1].data.x_nullable = nullptr; + } else { size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index); - inner_fields[1].data.x_maybe = create_const_vals(1); - inner_fields[1].data.x_maybe->special = ConstValSpecialStatic; - inner_fields[1].data.x_maybe->type = ira->codegen->builtin_types.entry_usize; - bigint_init_unsigned(&inner_fields[1].data.x_maybe->data.x_bigint, byte_offset); + inner_fields[1].data.x_nullable = create_const_vals(1); + inner_fields[1].data.x_nullable->special = ConstValSpecialStatic; + inner_fields[1].data.x_nullable->type = ira->codegen->builtin_types.entry_usize; + bigint_init_unsigned(&inner_fields[1].data.x_nullable->data.x_bigint, byte_offset); } inner_fields[2].special = ConstValSpecialStatic; @@ -19008,9 +19053,6 @@ static TypeTableEntry *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstr ConstExprValue *val = ir_resolve_const(ira, target, UndefBad); if (!val) return ira->codegen->builtin_types.entry_invalid; - if (target->value.type->id == TypeTableEntryIdMaybe) { - val = val->data.x_maybe; - } if (val->type->id == TypeTableEntryIdPointer && val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr) { IrInstruction *result = ir_create_const(&ira->new_irb, instruction->base.scope, instruction->base.source_node, usize); @@ -19936,6 +19978,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi static TypeTableEntry *ir_analyze_instruction(IrAnalyze *ira, IrInstruction *instruction) { TypeTableEntry *instruction_type = ir_analyze_instruction_nocast(ira, instruction); instruction->value.type = instruction_type; + if (instruction->other) { instruction->other->value.type = instruction_type; } else { diff --git a/test/cases/cast.zig b/test/cases/cast.zig index c3ef24cd78..da3cba7d80 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -1,5 +1,6 @@ -const assert = @import("std").debug.assert; -const mem = @import("std").mem; +const std = @import("std"); +const assert = std.debug.assert; +const mem = std.mem; test "int to ptr cast" { const x = usize(13); @@ -400,3 +401,8 @@ fn testCastPtrOfArrayToSliceAndPtr() void { assert(mem.eql(u8, array[0..], "coeu")); } +test "cast *[1][*]const u8 to [*]const ?[*]const u8" { + const window_name = [1][*]const u8{c"window name"}; + const x: [*]const ?[*]const u8 = &window_name; + assert(mem.eql(u8, std.cstr.toSliceConst(??x[0]), "window name")); +} -- cgit v1.2.3 From ec1b6f66737f8c3cbc0420715c2c502c7e710081 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 9 Jun 2018 23:42:14 -0400 Subject: breaking syntax change: ??x to x.? (#1095) See #1023 This also renames Nullable/Maybe to Optional --- build.zig | 2 +- doc/codegen.md | 8 +- doc/langref.html.in | 122 ++++++++++++++-------------- example/cat/main.zig | 2 +- src-self-hosted/arg.zig | 10 +-- src-self-hosted/llvm.zig | 2 +- src-self-hosted/main.zig | 8 +- src/all_types.hpp | 44 +++++------ src/analyze.cpp | 70 ++++++++-------- src/ast_render.cpp | 6 +- src/codegen.cpp | 60 +++++++------- src/ir.cpp | 198 +++++++++++++++++++++++----------------------- src/ir_print.cpp | 16 ++-- src/parser.cpp | 21 +++-- src/tokenizer.cpp | 10 +-- src/tokenizer.hpp | 3 +- src/translate_c.cpp | 14 ++-- std/array_list.zig | 2 +- std/buf_map.zig | 6 +- std/event.zig | 4 +- std/fmt/index.zig | 6 +- std/hash_map.zig | 8 +- std/heap.zig | 4 +- std/json.zig | 12 +-- std/linked_list.zig | 8 +- std/macho.zig | 2 +- std/mem.zig | 22 +++--- std/os/child_process.zig | 18 ++--- std/os/index.zig | 4 +- std/os/linux/vdso.zig | 2 +- std/os/path.zig | 8 +- std/segmented_list.zig | 8 +- std/special/bootstrap.zig | 6 +- std/special/builtin.zig | 8 +- std/unicode.zig | 24 +++--- std/zig/ast.zig | 12 +-- std/zig/parse.zig | 35 +++++--- std/zig/parser_test.zig | 5 +- std/zig/render.zig | 25 +++--- test/cases/bugs/656.zig | 2 +- test/cases/cast.zig | 50 ++++++------ test/cases/error.zig | 2 +- test/cases/eval.zig | 2 +- test/cases/generics.zig | 2 +- test/cases/misc.zig | 2 +- test/cases/null.zig | 30 +++---- test/cases/reflection.zig | 2 +- test/cases/type_info.zig | 14 ++-- test/cases/while.zig | 12 +-- test/compile_errors.zig | 16 ++-- test/tests.zig | 12 +-- 51 files changed, 489 insertions(+), 482 deletions(-) (limited to 'src/analyze.cpp') diff --git a/build.zig b/build.zig index 08a47570ef..eada37816c 100644 --- a/build.zig +++ b/build.zig @@ -75,7 +75,7 @@ pub fn build(b: *Builder) !void { cxx_compiler, "-print-file-name=libstdc++.a", }); - const libstdcxx_path = ??mem.split(libstdcxx_path_padded, "\r\n").next(); + const libstdcxx_path = mem.split(libstdcxx_path_padded, "\r\n").next().?; if (mem.eql(u8, libstdcxx_path, "libstdc++.a")) { warn( \\Unable to determine path to libstdc++.a diff --git a/doc/codegen.md b/doc/codegen.md index 02406fae82..65f12f4875 100644 --- a/doc/codegen.md +++ b/doc/codegen.md @@ -6,7 +6,7 @@ Every type has a "handle". If a type is a simple primitive type such as i32 or f64, the handle is "by value", meaning that we pass around the value itself when we refer to a value of that type. -If a type is a container, error union, maybe type, slice, or array, then its +If a type is a container, error union, optional type, slice, or array, then its handle is a pointer, and everywhere we refer to a value of this type we refer to a pointer. @@ -19,7 +19,7 @@ Error union types are represented as: payload: T, } -Maybe types are represented as: +Optional types are represented as: struct { payload: T, @@ -28,6 +28,6 @@ Maybe types are represented as: ## Data Optimizations -Maybe pointer types are special: the 0x0 pointer value is used to represent a -null pointer. Thus, instead of the struct above, maybe pointer types are +Optional pointer types are special: the 0x0 pointer value is used to represent a +null pointer. Thus, instead of the struct above, optional pointer types are represented as a `usize` in codegen and the handle is by value. diff --git a/doc/langref.html.in b/doc/langref.html.in index 6a1f1c3102..4c4a637095 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -156,18 +156,18 @@ pub fn main() void { true or false, !true); - // nullable - var nullable_value: ?[]const u8 = null; - assert(nullable_value == null); + // optional + var optional_value: ?[]const u8 = null; + assert(optional_value == null); - warn("\nnullable 1\ntype: {}\nvalue: {}\n", - @typeName(@typeOf(nullable_value)), nullable_value); + warn("\noptional 1\ntype: {}\nvalue: {}\n", + @typeName(@typeOf(optional_value)), optional_value); - nullable_value = "hi"; - assert(nullable_value != null); + optional_value = "hi"; + assert(optional_value != null); - warn("\nnullable 2\ntype: {}\nvalue: {}\n", - @typeName(@typeOf(nullable_value)), nullable_value); + warn("\noptional 2\ntype: {}\nvalue: {}\n", + @typeName(@typeOf(optional_value)), optional_value); // error union var number_or_error: error!i32 = error.ArgNotFound; @@ -428,7 +428,7 @@ pub fn main() void { null - used to set a nullable type to null + used to set an optional type to null undefined @@ -440,7 +440,7 @@ pub fn main() void { - {#see_also|Nullables|this#} + {#see_also|Optionals|this#} {#header_close#} {#header_open|String Literals#} {#code_begin|test#} @@ -988,7 +988,7 @@ a ^= b
    a ?? b
      -
    • {#link|Nullables#}
    • +
    • {#link|Optionals#}
    If a is null, @@ -1003,10 +1003,10 @@ unwrapped == 1234 -
    ??a
    +
    a.?
      -
    • {#link|Nullables#}
    • +
    • {#link|Optionals#}
    @@ -1015,7 +1015,7 @@ unwrapped == 1234
    const value: ?u32 = 5678;
    -??value == 5678
    +value.? == 5678 @@ -1103,7 +1103,7 @@ unwrapped == 1234
    a == null
      -
    • {#link|Nullables#}
    • +
    • {#link|Optionals#}
    @@ -1267,8 +1267,8 @@ x.* == 1234
    {#header_open|Precedence#}
    x() x[] x.y
     a!b
    -!x -x -%x ~x &x ?x ??x
    -x{} x.*
    +!x -x -%x ~x &x ?x
    +x{} x.* x.?
     ! * / % ** *%
     + - ++ +% -%
     << >>
    @@ -1483,17 +1483,17 @@ test "volatile" {
         assert(@typeOf(mmio_ptr) == *volatile u8);
     }
     
    -test "nullable pointers" {
    -    // Pointers cannot be null. If you want a null pointer, use the nullable
    -    // prefix `?` to make the pointer type nullable.
    +test "optional pointers" {
    +    // Pointers cannot be null. If you want a null pointer, use the optional
    +    // prefix `?` to make the pointer type optional.
         var ptr: ?*i32 = null;
     
         var x: i32 = 1;
         ptr = &x;
     
    -    assert((??ptr).* == 1);
    +    assert(ptr.?.* == 1);
     
    -    // Nullable pointers are the same size as normal pointers, because pointer
    +    // Optional pointers are the same size as normal pointers, because pointer
         // value 0 is used as the null value.
         assert(@sizeOf(?*i32) == @sizeOf(*i32));
     }
    @@ -1832,7 +1832,7 @@ test "linked list" {
             .last = &node,
             .len = 1,
         };
    -    assert((??list2.first).data == 1234);
    +    assert(list2.first.?.data == 1234);
     }
           {#code_end#}
           {#see_also|comptime|@fieldParentPtr#}
    @@ -2270,7 +2270,7 @@ fn rangeHasNumber(begin: usize, end: usize, number: usize) bool {
     }
     
     test "while null capture" {
    -    // Just like if expressions, while loops can take a nullable as the
    +    // Just like if expressions, while loops can take an optional as the
         // condition and capture the payload. When null is encountered the loop
         // exits.
         var sum1: u32 = 0;
    @@ -2280,7 +2280,7 @@ test "while null capture" {
         }
         assert(sum1 == 3);
     
    -    // The else branch is allowed on nullable iteration. In this case, it will
    +    // The else branch is allowed on optional iteration. In this case, it will
         // be executed on the first null value encountered.
         var sum2: u32 = 0;
         numbers_left = 3;
    @@ -2340,7 +2340,7 @@ fn typeNameLength(comptime T: type) usize {
         return @typeName(T).len;
     }
           {#code_end#}
    -      {#see_also|if|Nullables|Errors|comptime|unreachable#}
    +      {#see_also|if|Optionals|Errors|comptime|unreachable#}
           {#header_close#}
           {#header_open|for#}
           {#code_begin|test|for#}
    @@ -2400,7 +2400,7 @@ test "for else" {
             if (value == null) {
                 break 9;
             } else {
    -            sum += ??value;
    +            sum += value.?;
             }
         } else blk: {
             assert(sum == 7);
    @@ -2461,7 +2461,7 @@ test "if boolean" {
         assert(result == 47);
     }
     
    -test "if nullable" {
    +test "if optional" {
         // If expressions test for null.
     
         const a: ?u32 = 0;
    @@ -2544,7 +2544,7 @@ test "if error union" {
         }
     }
           {#code_end#}
    -      {#see_also|Nullables|Errors#}
    +      {#see_also|Optionals|Errors#}
           {#header_close#}
           {#header_open|defer#}
           {#code_begin|test|defer#}
    @@ -3167,24 +3167,24 @@ test "inferred error set" {
           

    TODO

    {#header_close#} {#header_close#} - {#header_open|Nullables#} + {#header_open|Optionals#}

    One area that Zig provides safety without compromising efficiency or - readability is with the nullable type. + readability is with the optional type.

    - The question mark symbolizes the nullable type. You can convert a type to a nullable + The question mark symbolizes the optional type. You can convert a type to an optional type by putting a question mark in front of it, like this:

    {#code_begin|syntax#} // normal integer const normal_int: i32 = 1234; -// nullable integer -const nullable_int: ?i32 = 5678; +// optional integer +const optional_int: ?i32 = 5678; {#code_end#}

    - Now the variable nullable_int could be an i32, or null. + Now the variable optional_int could be an i32, or null.

    Instead of integers, let's talk about pointers. Null references are the source of many runtime @@ -3193,8 +3193,8 @@ const nullable_int: ?i32 = 5678;

    Zig does not have them.

    - Instead, you can use a nullable pointer. This secretly compiles down to a normal pointer, - since we know we can use 0 as the null value for the nullable type. But the compiler + Instead, you can use an optional pointer. This secretly compiles down to a normal pointer, + since we know we can use 0 as the null value for the optional type. But the compiler can check your work and make sure you don't assign null to something that can't be null.

    @@ -3226,7 +3226,7 @@ fn doAThing() ?*Foo {

    Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr" is *u8 not ?*u8. The ?? operator - unwrapped the nullable type and therefore ptr is guaranteed to be non-null everywhere + unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere it is used in the function.

    @@ -3245,10 +3245,10 @@ fn doAThing() ?*Foo { In Zig you can accomplish the same thing:

    {#code_begin|syntax#} -fn doAThing(nullable_foo: ?*Foo) void { +fn doAThing(optional_foo: ?*Foo) void { // do some stuff - if (nullable_foo) |foo| { + if (optional_foo) |foo| { doSomethingWithFoo(foo); } @@ -3257,7 +3257,7 @@ fn doAThing(nullable_foo: ?*Foo) void { {#code_end#}

    Once again, the notable thing here is that inside the if block, - foo is no longer a nullable pointer, it is a pointer, which + foo is no longer an optional pointer, it is a pointer, which cannot be null.

    @@ -3267,20 +3267,20 @@ fn doAThing(nullable_foo: ?*Foo) void { The optimizer can sometimes make better decisions knowing that pointer arguments cannot be null.

    - {#header_open|Nullable Type#} -

    A nullable is created by putting ? in front of a type. You can use compile-time - reflection to access the child type of a nullable:

    + {#header_open|Optional Type#} +

    An optional is created by putting ? in front of a type. You can use compile-time + reflection to access the child type of an optional:

    {#code_begin|test#} const assert = @import("std").debug.assert; -test "nullable type" { - // Declare a nullable and implicitly cast from null: +test "optional type" { + // Declare an optional and implicitly cast from null: var foo: ?i32 = null; - // Implicitly cast from child type of a nullable + // Implicitly cast from child type of an optional foo = 1234; - // Use compile-time reflection to access the child type of the nullable: + // Use compile-time reflection to access the child type of the optional: comptime assert(@typeOf(foo).Child == i32); } {#code_end#} @@ -4888,7 +4888,7 @@ pub const TypeId = enum { ComptimeInt, Undefined, Null, - Nullable, + Optional, ErrorUnion, Error, Enum, @@ -4922,7 +4922,7 @@ pub const TypeInfo = union(TypeId) { ComptimeInt: void, Undefined: void, Null: void, - Nullable: Nullable, + Optional: Optional, ErrorUnion: ErrorUnion, ErrorSet: ErrorSet, Enum: Enum, @@ -4975,7 +4975,7 @@ pub const TypeInfo = union(TypeId) { defs: []Definition, }; - pub const Nullable = struct { + pub const Optional = struct { child: type, }; @@ -5366,8 +5366,8 @@ comptime {

    At compile-time:

    {#code_begin|test_err|unable to unwrap null#} comptime { - const nullable_number: ?i32 = null; - const number = ??nullable_number; + const optional_number: ?i32 = null; + const number = optional_number.?; } {#code_end#}

    At runtime crashes with the message attempt to unwrap null and a stack trace.

    @@ -5376,9 +5376,9 @@ comptime { {#code_begin|exe|test#} const warn = @import("std").debug.warn; pub fn main() void { - const nullable_number: ?i32 = null; + const optional_number: ?i32 = null; - if (nullable_number) |number| { + if (optional_number) |number| { warn("got number: {}\n", number); } else { warn("it's null\n"); @@ -5939,9 +5939,9 @@ AsmInputItem = "[" Symbol "]" String "(" Expression ")" AsmClobbers= ":" list(String, ",") -UnwrapExpression = BoolOrExpression (UnwrapNullable | UnwrapError) | BoolOrExpression +UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression -UnwrapNullable = "??" Expression +UnwrapOptional = "??" Expression UnwrapError = "catch" option("|" Symbol "|") Expression @@ -6015,12 +6015,10 @@ MultiplyOperator = "||" | "*" | "/" | "%" | "**" | "*%" PrefixOpExpression = PrefixOp TypeExpr | SuffixOpExpression -SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | PtrDerefExpression) +SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?") FieldAccessExpression = "." Symbol -PtrDerefExpression = ".*" - FnCallExpression = "(" list(Expression, ",") ")" ArrayAccessExpression = "[" Expression "]" @@ -6033,7 +6031,7 @@ ContainerInitBody = list(StructLiteralField, ",") | list(Expression, ",") StructLiteralField = "." Symbol "=" Expression -PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "??" | "-%" | "try" | "await" +PrefixOp = "!" | "-" | "~" | (("*" | "[*]") option("align" "(" Expression option(":" Integer ":" Integer) ")" ) option("const") option("volatile")) | "?" | "-%" | "try" | "await" PrimaryExpression = Integer | Float | String | CharLiteral | KeywordLiteral | GroupedExpression | BlockExpression(BlockOrExpression) | Symbol | ("@" Symbol FnCallExpression) | ArrayType | FnProto | AsmExpression | ContainerDecl | ("continue" option(":" Symbol)) | ErrorSetDecl | PromiseType diff --git a/example/cat/main.zig b/example/cat/main.zig index 1b34cb22eb..27690d2695 100644 --- a/example/cat/main.zig +++ b/example/cat/main.zig @@ -7,7 +7,7 @@ const allocator = std.debug.global_allocator; pub fn main() !void { var args_it = os.args(); - const exe = try unwrapArg(??args_it.next(allocator)); + const exe = try unwrapArg(args_it.next(allocator).?); var catted_anything = false; var stdout_file = try io.getStdOut(); diff --git a/src-self-hosted/arg.zig b/src-self-hosted/arg.zig index df2c04ef1f..dc89483213 100644 --- a/src-self-hosted/arg.zig +++ b/src-self-hosted/arg.zig @@ -99,7 +99,7 @@ pub const Args = struct { error.ArgumentNotInAllowedSet => { std.debug.warn("argument '{}' is invalid for flag '{}'\n", args[i], arg); std.debug.warn("allowed options are "); - for (??flag.allowed_set) |possible| { + for (flag.allowed_set.?) |possible| { std.debug.warn("'{}' ", possible); } std.debug.warn("\n"); @@ -276,14 +276,14 @@ test "parse arguments" { debug.assert(!args.present("help2")); debug.assert(!args.present("init")); - debug.assert(mem.eql(u8, ??args.single("build-file"), "build.zig")); - debug.assert(mem.eql(u8, ??args.single("color"), "on")); + debug.assert(mem.eql(u8, args.single("build-file").?, "build.zig")); + debug.assert(mem.eql(u8, args.single("color").?, "on")); - const objects = ??args.many("object"); + const objects = args.many("object").?; debug.assert(mem.eql(u8, objects[0], "obj1")); debug.assert(mem.eql(u8, objects[1], "obj2")); - debug.assert(mem.eql(u8, ??args.single("library"), "lib2")); + debug.assert(mem.eql(u8, args.single("library").?, "lib2")); const pos = args.positionals.toSliceConst(); debug.assert(mem.eql(u8, pos[0], "build")); diff --git a/src-self-hosted/llvm.zig b/src-self-hosted/llvm.zig index 16c359adcf..391a92cd63 100644 --- a/src-self-hosted/llvm.zig +++ b/src-self-hosted/llvm.zig @@ -8,6 +8,6 @@ pub const ContextRef = removeNullability(c.LLVMContextRef); pub const BuilderRef = removeNullability(c.LLVMBuilderRef); fn removeNullability(comptime T: type) type { - comptime assert(@typeId(T) == builtin.TypeId.Nullable); + comptime assert(@typeId(T) == builtin.TypeId.Optional); return T.Child; } diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index a264b5484a..64734f077a 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -490,7 +490,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo try stderr.print("encountered --pkg-end with no matching --pkg-begin\n"); os.exit(1); } - cur_pkg = ??cur_pkg.parent; + cur_pkg = cur_pkg.parent.?; } } @@ -514,7 +514,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo }, } - const basename = os.path.basename(??in_file); + const basename = os.path.basename(in_file.?); var it = mem.split(basename, "."); const root_name = it.next() ?? { try stderr.write("file name cannot be empty\n"); @@ -523,12 +523,12 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo const asm_a = flags.many("assembly"); const obj_a = flags.many("object"); - if (in_file == null and (obj_a == null or (??obj_a).len == 0) and (asm_a == null or (??asm_a).len == 0)) { + if (in_file == null and (obj_a == null or obj_a.?.len == 0) and (asm_a == null or asm_a.?.len == 0)) { try stderr.write("Expected source file argument or at least one --object or --assembly argument\n"); os.exit(1); } - if (out_type == Module.Kind.Obj and (obj_a != null and (??obj_a).len != 0)) { + if (out_type == Module.Kind.Obj and (obj_a != null and obj_a.?.len != 0)) { try stderr.write("When building an object file, --object arguments are invalid\n"); os.exit(1); } diff --git a/src/all_types.hpp b/src/all_types.hpp index 14a44ea768..2a5a0ad740 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -145,8 +145,8 @@ enum ConstPtrSpecial { // emit a binary with a compile time known address. // In this case index is the numeric address value. // We also use this for null pointer. We need the data layout for ConstCastOnly == true - // types to be the same, so all nullables of pointer types use x_ptr - // instead of x_nullable + // types to be the same, so all optionals of pointer types use x_ptr + // instead of x_optional ConstPtrSpecialHardCodedAddr, // This means that the pointer represents memory of assigning to _. // That is, storing discards the data, and loading is invalid. @@ -222,10 +222,10 @@ enum RuntimeHintErrorUnion { RuntimeHintErrorUnionNonError, }; -enum RuntimeHintMaybe { - RuntimeHintMaybeUnknown, - RuntimeHintMaybeNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known. - RuntimeHintMaybeNonNull, +enum RuntimeHintOptional { + RuntimeHintOptionalUnknown, + RuntimeHintOptionalNull, // TODO is this value even possible? if this is the case it might mean the const value is compile time known. + RuntimeHintOptionalNonNull, }; enum RuntimeHintPtr { @@ -254,7 +254,7 @@ struct ConstExprValue { bool x_bool; ConstBoundFnValue x_bound_fn; TypeTableEntry *x_type; - ConstExprValue *x_nullable; + ConstExprValue *x_optional; ConstErrValue x_err_union; ErrorTableEntry *x_err_set; BigInt x_enum_tag; @@ -268,7 +268,7 @@ struct ConstExprValue { // populated if special == ConstValSpecialRuntime RuntimeHintErrorUnion rh_error_union; - RuntimeHintMaybe rh_maybe; + RuntimeHintOptional rh_maybe; RuntimeHintPtr rh_ptr; } data; }; @@ -556,7 +556,7 @@ enum BinOpType { BinOpTypeMultWrap, BinOpTypeDiv, BinOpTypeMod, - BinOpTypeUnwrapMaybe, + BinOpTypeUnwrapOptional, BinOpTypeArrayCat, BinOpTypeArrayMult, BinOpTypeErrorUnion, @@ -623,8 +623,8 @@ enum PrefixOp { PrefixOpBinNot, PrefixOpNegation, PrefixOpNegationWrap, - PrefixOpMaybe, - PrefixOpUnwrapMaybe, + PrefixOpOptional, + PrefixOpUnwrapOptional, PrefixOpAddrOf, }; @@ -1052,7 +1052,7 @@ struct TypeTableEntryStruct { HashMap fields_by_name; }; -struct TypeTableEntryMaybe { +struct TypeTableEntryOptional { TypeTableEntry *child_type; }; @@ -1175,7 +1175,7 @@ enum TypeTableEntryId { TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefined, TypeTableEntryIdNull, - TypeTableEntryIdMaybe, + TypeTableEntryIdOptional, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, TypeTableEntryIdEnum, @@ -1206,7 +1206,7 @@ struct TypeTableEntry { TypeTableEntryFloat floating; TypeTableEntryArray array; TypeTableEntryStruct structure; - TypeTableEntryMaybe maybe; + TypeTableEntryOptional maybe; TypeTableEntryErrorUnion error_union; TypeTableEntryErrorSet error_set; TypeTableEntryEnum enumeration; @@ -1402,7 +1402,7 @@ enum PanicMsgId { PanicMsgIdRemainderDivisionByZero, PanicMsgIdExactDivisionRemainder, PanicMsgIdSliceWidenRemainder, - PanicMsgIdUnwrapMaybeFail, + PanicMsgIdUnwrapOptionalFail, PanicMsgIdInvalidErrorCode, PanicMsgIdIncorrectAlignment, PanicMsgIdBadUnionField, @@ -2016,8 +2016,8 @@ enum IrInstructionId { IrInstructionIdAsm, IrInstructionIdSizeOf, IrInstructionIdTestNonNull, - IrInstructionIdUnwrapMaybe, - IrInstructionIdMaybeWrap, + IrInstructionIdUnwrapOptional, + IrInstructionIdOptionalWrap, IrInstructionIdUnionTag, IrInstructionIdClz, IrInstructionIdCtz, @@ -2184,7 +2184,7 @@ enum IrUnOp { IrUnOpNegation, IrUnOpNegationWrap, IrUnOpDereference, - IrUnOpMaybe, + IrUnOpOptional, }; struct IrInstructionUnOp { @@ -2487,7 +2487,7 @@ struct IrInstructionTestNonNull { IrInstruction *value; }; -struct IrInstructionUnwrapMaybe { +struct IrInstructionUnwrapOptional { IrInstruction base; IrInstruction *value; @@ -2745,7 +2745,7 @@ struct IrInstructionUnwrapErrPayload { bool safety_check_on; }; -struct IrInstructionMaybeWrap { +struct IrInstructionOptionalWrap { IrInstruction base; IrInstruction *value; @@ -2954,10 +2954,10 @@ struct IrInstructionExport { struct IrInstructionErrorReturnTrace { IrInstruction base; - enum Nullable { + enum Optional { Null, NonNull, - } nullable; + } optional; }; struct IrInstructionErrorUnion { diff --git a/src/analyze.cpp b/src/analyze.cpp index 16b2cb0590..ed261148ea 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -236,7 +236,7 @@ bool type_is_complete(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -272,7 +272,7 @@ bool type_has_zero_bits_known(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -520,7 +520,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { } else { ensure_complete_type(g, child_type); - TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdMaybe); + TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional); assert(child_type->type_ref || child_type->zero_bits); assert(child_type->di_type); entry->is_copyable = type_is_copyable(g, child_type); @@ -1361,7 +1361,7 @@ static bool type_allowed_in_packed_struct(TypeTableEntry *type_entry) { return type_entry->data.structure.layout == ContainerLayoutPacked; case TypeTableEntryIdUnion: return type_entry->data.unionation.layout == ContainerLayoutPacked; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; return type_is_codegen_pointer(child_type); @@ -1415,7 +1415,7 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { return type_allowed_in_extern(g, type_entry->data.pointer.child_type); case TypeTableEntryIdStruct: return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn; @@ -1538,7 +1538,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -1632,7 +1632,7 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -2985,8 +2985,8 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { return wrong_panic_prototype(g, proto_node, fn_type); } - TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g)); - if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) { + TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g)); + if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) { return wrong_panic_prototype(g, proto_node, fn_type); } @@ -3368,7 +3368,7 @@ TypeTableEntry *validate_var_type(CodeGen *g, AstNode *source_node, TypeTableEnt case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -3746,7 +3746,7 @@ static bool is_container(TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -3805,7 +3805,7 @@ void resolve_container_type(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdFn: @@ -3824,7 +3824,7 @@ TypeTableEntry *get_codegen_ptr_type(TypeTableEntry *type) { if (type->id == TypeTableEntryIdPointer) return type; if (type->id == TypeTableEntryIdFn) return type; if (type->id == TypeTableEntryIdPromise) return type; - if (type->id == TypeTableEntryIdMaybe) { + if (type->id == TypeTableEntryIdOptional) { if (type->data.maybe.child_type->id == TypeTableEntryIdPointer) return type->data.maybe.child_type; if (type->data.maybe.child_type->id == TypeTableEntryIdFn) return type->data.maybe.child_type; if (type->data.maybe.child_type->id == TypeTableEntryIdPromise) return type->data.maybe.child_type; @@ -4331,7 +4331,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { return type_has_bits(type_entry); case TypeTableEntryIdErrorUnion: return type_has_bits(type_entry->data.error_union.payload_type); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return type_has_bits(type_entry->data.maybe.child_type) && !type_is_codegen_pointer(type_entry->data.maybe.child_type); case TypeTableEntryIdUnion: @@ -4709,12 +4709,12 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { case TypeTableEntryIdUnion: // TODO better hashing algorithm return 2709806591; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: if (get_codegen_ptr_type(const_val->type) != nullptr) { return hash_const_val(const_val) * 1992916303; } else { - if (const_val->data.x_nullable) { - return hash_const_val(const_val->data.x_nullable) * 1992916303; + if (const_val->data.x_optional) { + return hash_const_val(const_val->data.x_optional) * 1992916303; } else { return 4016830364; } @@ -4817,12 +4817,12 @@ static bool can_mutate_comptime_var_state(ConstExprValue *value) { } return false; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: if (get_codegen_ptr_type(value->type) != nullptr) return value->data.x_ptr.mut == ConstPtrMutComptimeVar; - if (value->data.x_nullable == nullptr) + if (value->data.x_optional == nullptr) return false; - return can_mutate_comptime_var_state(value->data.x_nullable); + return can_mutate_comptime_var_state(value->data.x_optional); case TypeTableEntryIdErrorUnion: if (value->data.x_err_union.err != nullptr) @@ -4869,7 +4869,7 @@ static bool return_type_is_cacheable(TypeTableEntry *return_type) { case TypeTableEntryIdUnion: return false; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return return_type_is_cacheable(return_type->data.maybe.child_type); case TypeTableEntryIdErrorUnion: @@ -4978,7 +4978,7 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { case TypeTableEntryIdUnion: assert(type_has_zero_bits_known(type_entry)); return type_entry->data.unionation.requires_comptime; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return type_requires_comptime(type_entry->data.maybe.child_type); case TypeTableEntryIdErrorUnion: return type_requires_comptime(type_entry->data.error_union.payload_type); @@ -5460,13 +5460,13 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { zig_panic("TODO"); case TypeTableEntryIdNull: zig_panic("TODO"); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: if (get_codegen_ptr_type(a->type) != nullptr) return const_values_equal_ptr(a, b); - if (a->data.x_nullable == nullptr || b->data.x_nullable == nullptr) { - return (a->data.x_nullable == nullptr && b->data.x_nullable == nullptr); + if (a->data.x_optional == nullptr || b->data.x_optional == nullptr) { + return (a->data.x_optional == nullptr && b->data.x_optional == nullptr); } else { - return const_values_equal(a->data.x_nullable, b->data.x_nullable); + return const_values_equal(a->data.x_optional, b->data.x_optional); } case TypeTableEntryIdErrorUnion: zig_panic("TODO"); @@ -5708,12 +5708,12 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { buf_appendf(buf, "undefined"); return; } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { if (get_codegen_ptr_type(const_val->type) != nullptr) return render_const_val_ptr(g, buf, const_val, type_entry->data.maybe.child_type); - if (const_val->data.x_nullable) { - render_const_value(g, buf, const_val->data.x_nullable); + if (const_val->data.x_optional) { + render_const_value(g, buf, const_val->data.x_optional); } else { buf_appendf(buf, "null"); } @@ -5819,7 +5819,7 @@ uint32_t type_id_hash(TypeId x) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: case TypeTableEntryIdUnion: @@ -5865,7 +5865,7 @@ bool type_id_eql(TypeId a, TypeId b) { case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdPromise: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -5987,7 +5987,7 @@ static const TypeTableEntryId all_type_ids[] = { TypeTableEntryIdComptimeInt, TypeTableEntryIdUndefined, TypeTableEntryIdNull, - TypeTableEntryIdMaybe, + TypeTableEntryIdOptional, TypeTableEntryIdErrorUnion, TypeTableEntryIdErrorSet, TypeTableEntryIdEnum, @@ -6042,7 +6042,7 @@ size_t type_id_index(TypeTableEntry *entry) { return 11; case TypeTableEntryIdNull: return 12; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: return 13; case TypeTableEntryIdErrorUnion: return 14; @@ -6100,8 +6100,8 @@ const char *type_id_name(TypeTableEntryId id) { return "Undefined"; case TypeTableEntryIdNull: return "Null"; - case TypeTableEntryIdMaybe: - return "Nullable"; + case TypeTableEntryIdOptional: + return "Optional"; case TypeTableEntryIdErrorUnion: return "ErrorUnion"; case TypeTableEntryIdErrorSet: diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 3785cb6ca1..2c8c03b226 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) { case BinOpTypeAssignBitXor: return "^="; case BinOpTypeAssignBitOr: return "|="; case BinOpTypeAssignMergeErrorSets: return "||="; - case BinOpTypeUnwrapMaybe: return "??"; + case BinOpTypeUnwrapOptional: return "??"; case BinOpTypeArrayCat: return "++"; case BinOpTypeArrayMult: return "**"; case BinOpTypeErrorUnion: return "!"; @@ -66,8 +66,8 @@ static const char *prefix_op_str(PrefixOp prefix_op) { case PrefixOpNegationWrap: return "-%"; case PrefixOpBoolNot: return "!"; case PrefixOpBinNot: return "~"; - case PrefixOpMaybe: return "?"; - case PrefixOpUnwrapMaybe: return "??"; + case PrefixOpOptional: return "?"; + case PrefixOpUnwrapOptional: return "??"; case PrefixOpAddrOf: return "&"; } zig_unreachable(); diff --git a/src/codegen.cpp b/src/codegen.cpp index 65b465a519..da08ecfc9e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -865,7 +865,7 @@ static Buf *panic_msg_buf(PanicMsgId msg_id) { return buf_create_from_str("exact division produced remainder"); case PanicMsgIdSliceWidenRemainder: return buf_create_from_str("slice widening size mismatch"); - case PanicMsgIdUnwrapMaybeFail: + case PanicMsgIdUnwrapOptionalFail: return buf_create_from_str("attempt to unwrap null"); case PanicMsgIdUnreachable: return buf_create_from_str("reached unreachable code"); @@ -2734,7 +2734,7 @@ static LLVMValueRef ir_render_un_op(CodeGen *g, IrExecutable *executable, IrInst switch (op_id) { case IrUnOpInvalid: - case IrUnOpMaybe: + case IrUnOpOptional: case IrUnOpDereference: zig_unreachable(); case IrUnOpNegation: @@ -3333,7 +3333,7 @@ static LLVMValueRef ir_render_asm(CodeGen *g, IrExecutable *executable, IrInstru } static LLVMValueRef gen_non_null_bit(CodeGen *g, TypeTableEntry *maybe_type, LLVMValueRef maybe_handle) { - assert(maybe_type->id == TypeTableEntryIdMaybe); + assert(maybe_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = maybe_type->data.maybe.child_type; if (child_type->zero_bits) { return maybe_handle; @@ -3355,23 +3355,23 @@ static LLVMValueRef ir_render_test_non_null(CodeGen *g, IrExecutable *executable } static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable, - IrInstructionUnwrapMaybe *instruction) + IrInstructionUnwrapOptional *instruction) { TypeTableEntry *ptr_type = instruction->value->value.type; assert(ptr_type->id == TypeTableEntryIdPointer); TypeTableEntry *maybe_type = ptr_type->data.pointer.child_type; - assert(maybe_type->id == TypeTableEntryIdMaybe); + assert(maybe_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = maybe_type->data.maybe.child_type; LLVMValueRef maybe_ptr = ir_llvm_value(g, instruction->value); LLVMValueRef maybe_handle = get_handle_value(g, maybe_ptr, maybe_type, ptr_type); if (ir_want_runtime_safety(g, &instruction->base) && instruction->safety_check_on) { LLVMValueRef non_null_bit = gen_non_null_bit(g, maybe_type, maybe_handle); - LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeOk"); - LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapMaybeFail"); + LLVMBasicBlockRef ok_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalOk"); + LLVMBasicBlockRef fail_block = LLVMAppendBasicBlock(g->cur_fn_val, "UnwrapOptionalFail"); LLVMBuildCondBr(g->builder, non_null_bit, ok_block, fail_block); LLVMPositionBuilderAtEnd(g->builder, fail_block); - gen_safety_crash(g, PanicMsgIdUnwrapMaybeFail); + gen_safety_crash(g, PanicMsgIdUnwrapOptionalFail); LLVMPositionBuilderAtEnd(g->builder, ok_block); } @@ -3593,17 +3593,17 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I } else if (target_type->id == TypeTableEntryIdFn) { align_bytes = target_type->data.fn.fn_type_id.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPointer) { align_bytes = target_type->data.maybe.child_type->data.pointer.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdFn) { align_bytes = target_type->data.maybe.child_type->data.fn.fn_type_id.alignment; ptr_val = target_val; - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPromise) { zig_panic("TODO audit this function"); @@ -3705,7 +3705,7 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutable *executable, IrIn success_order, failure_order, instruction->is_weak); TypeTableEntry *maybe_type = instruction->base.value.type; - assert(maybe_type->id == TypeTableEntryIdMaybe); + assert(maybe_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = maybe_type->data.maybe.child_type; if (type_is_codegen_pointer(child_type)) { @@ -4115,10 +4115,10 @@ static LLVMValueRef ir_render_unwrap_err_payload(CodeGen *g, IrExecutable *execu } } -static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionMaybeWrap *instruction) { +static LLVMValueRef ir_render_maybe_wrap(CodeGen *g, IrExecutable *executable, IrInstructionOptionalWrap *instruction) { TypeTableEntry *wanted_type = instruction->base.value.type; - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); TypeTableEntry *child_type = wanted_type->data.maybe.child_type; @@ -4699,8 +4699,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_asm(g, executable, (IrInstructionAsm *)instruction); case IrInstructionIdTestNonNull: return ir_render_test_non_null(g, executable, (IrInstructionTestNonNull *)instruction); - case IrInstructionIdUnwrapMaybe: - return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + return ir_render_unwrap_maybe(g, executable, (IrInstructionUnwrapOptional *)instruction); case IrInstructionIdClz: return ir_render_clz(g, executable, (IrInstructionClz *)instruction); case IrInstructionIdCtz: @@ -4741,8 +4741,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_unwrap_err_code(g, executable, (IrInstructionUnwrapErrCode *)instruction); case IrInstructionIdUnwrapErrPayload: return ir_render_unwrap_err_payload(g, executable, (IrInstructionUnwrapErrPayload *)instruction); - case IrInstructionIdMaybeWrap: - return ir_render_maybe_wrap(g, executable, (IrInstructionMaybeWrap *)instruction); + case IrInstructionIdOptionalWrap: + return ir_render_maybe_wrap(g, executable, (IrInstructionOptionalWrap *)instruction); case IrInstructionIdErrWrapCode: return ir_render_err_wrap_code(g, executable, (IrInstructionErrWrapCode *)instruction); case IrInstructionIdErrWrapPayload: @@ -4972,7 +4972,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Con } case TypeTableEntryIdPointer: case TypeTableEntryIdFn: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdPromise: { LLVMValueRef ptr_val = gen_const_val(g, const_val, ""); @@ -5137,19 +5137,19 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c } else { return LLVMConstNull(LLVMInt1Type()); } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { - return LLVMConstInt(LLVMInt1Type(), const_val->data.x_nullable ? 1 : 0, false); + return LLVMConstInt(LLVMInt1Type(), const_val->data.x_optional ? 1 : 0, false); } else if (type_is_codegen_pointer(child_type)) { return gen_const_val_ptr(g, const_val, name); } else { LLVMValueRef child_val; LLVMValueRef maybe_val; bool make_unnamed_struct; - if (const_val->data.x_nullable) { - child_val = gen_const_val(g, const_val->data.x_nullable, ""); + if (const_val->data.x_optional) { + child_val = gen_const_val(g, const_val->data.x_optional, ""); maybe_val = LLVMConstAllOnes(LLVMInt1Type()); make_unnamed_struct = is_llvm_value_unnamed_type(const_val->type, child_val); @@ -5755,8 +5755,8 @@ static void do_code_gen(CodeGen *g) { } else if (instruction->id == IrInstructionIdSlice) { IrInstructionSlice *slice_instruction = (IrInstructionSlice *)instruction; slot = &slice_instruction->tmp_ptr; - } else if (instruction->id == IrInstructionIdMaybeWrap) { - IrInstructionMaybeWrap *maybe_wrap_instruction = (IrInstructionMaybeWrap *)instruction; + } else if (instruction->id == IrInstructionIdOptionalWrap) { + IrInstructionOptionalWrap *maybe_wrap_instruction = (IrInstructionOptionalWrap *)instruction; slot = &maybe_wrap_instruction->tmp_ptr; } else if (instruction->id == IrInstructionIdErrWrapPayload) { IrInstructionErrWrapPayload *err_wrap_payload_instruction = (IrInstructionErrWrapPayload *)instruction; @@ -6511,7 +6511,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " ComptimeInt: void,\n" " Undefined: void,\n" " Null: void,\n" - " Nullable: Nullable,\n" + " Optional: Optional,\n" " ErrorUnion: ErrorUnion,\n" " ErrorSet: ErrorSet,\n" " Enum: Enum,\n" @@ -6570,7 +6570,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { " defs: []Definition,\n" " };\n" "\n" - " pub const Nullable = struct {\n" + " pub const Optional = struct {\n" " child: type,\n" " };\n" "\n" @@ -7145,7 +7145,7 @@ static void prepend_c_type_to_decl_list(CodeGen *g, GenH *gen_h, TypeTableEntry case TypeTableEntryIdArray: prepend_c_type_to_decl_list(g, gen_h, type_entry->data.array.child_type); return; - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: prepend_c_type_to_decl_list(g, gen_h, type_entry->data.maybe.child_type); return; case TypeTableEntryIdFn: @@ -7234,7 +7234,7 @@ static void get_c_type(CodeGen *g, GenH *gen_h, TypeTableEntry *type_entry, Buf buf_appendf(out_buf, "%s%s *", const_str, buf_ptr(&child_buf)); break; } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; if (child_type->zero_bits) { @@ -7448,7 +7448,7 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdFn: case TypeTableEntryIdPromise: zig_unreachable(); diff --git a/src/ir.cpp b/src/ir.cpp index 10098f3c32..02606fc4aa 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -47,7 +47,7 @@ enum ConstCastResultId { ConstCastResultIdErrSetGlobal, ConstCastResultIdPointerChild, ConstCastResultIdSliceChild, - ConstCastResultIdNullableChild, + ConstCastResultIdOptionalChild, ConstCastResultIdErrorUnionPayload, ConstCastResultIdErrorUnionErrorSet, ConstCastResultIdFnAlign, @@ -86,7 +86,7 @@ struct ConstCastOnly { ConstCastErrSetMismatch error_set; ConstCastOnly *pointer_child; ConstCastOnly *slice_child; - ConstCastOnly *nullable_child; + ConstCastOnly *optional_child; ConstCastOnly *error_union_payload; ConstCastOnly *error_union_error_set; ConstCastOnly *return_type; @@ -372,8 +372,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionTestNonNull *) { return IrInstructionIdTestNonNull; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapMaybe *) { - return IrInstructionIdUnwrapMaybe; +static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapOptional *) { + return IrInstructionIdUnwrapOptional; } static constexpr IrInstructionId ir_instruction_id(IrInstructionClz *) { @@ -524,8 +524,8 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionUnwrapErrPayload return IrInstructionIdUnwrapErrPayload; } -static constexpr IrInstructionId ir_instruction_id(IrInstructionMaybeWrap *) { - return IrInstructionIdMaybeWrap; +static constexpr IrInstructionId ir_instruction_id(IrInstructionOptionalWrap *) { + return IrInstructionIdOptionalWrap; } static constexpr IrInstructionId ir_instruction_id(IrInstructionErrWrapPayload *) { @@ -1571,7 +1571,7 @@ static IrInstruction *ir_build_test_nonnull_from(IrBuilder *irb, IrInstruction * static IrInstruction *ir_build_unwrap_maybe(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value, bool safety_check_on) { - IrInstructionUnwrapMaybe *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionUnwrapOptional *instruction = ir_build_instruction(irb, scope, source_node); instruction->value = value; instruction->safety_check_on = safety_check_on; @@ -1590,7 +1590,7 @@ static IrInstruction *ir_build_unwrap_maybe_from(IrBuilder *irb, IrInstruction * } static IrInstruction *ir_build_maybe_wrap(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { - IrInstructionMaybeWrap *instruction = ir_build_instruction(irb, scope, source_node); + IrInstructionOptionalWrap *instruction = ir_build_instruction(irb, scope, source_node); instruction->value = value; ir_ref_instruction(value, irb->current_basic_block); @@ -2496,9 +2496,9 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } -static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Nullable nullable) { +static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstructionErrorReturnTrace::Optional optional) { IrInstructionErrorReturnTrace *instruction = ir_build_instruction(irb, scope, source_node); - instruction->nullable = nullable; + instruction->optional = optional; return &instruction->base; } @@ -3295,9 +3295,9 @@ static IrInstruction *ir_gen_maybe_ok_or(IrBuilder *irb, Scope *parent_scope, As is_comptime = ir_build_test_comptime(irb, parent_scope, node, is_non_null); } - IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "MaybeNonNull"); - IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "MaybeNull"); - IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "MaybeEnd"); + IrBasicBlock *ok_block = ir_create_basic_block(irb, parent_scope, "OptionalNonNull"); + IrBasicBlock *null_block = ir_create_basic_block(irb, parent_scope, "OptionalNull"); + IrBasicBlock *end_block = ir_create_basic_block(irb, parent_scope, "OptionalEnd"); ir_build_cond_br(irb, parent_scope, node, is_non_null, ok_block, null_block, is_comptime); ir_set_cursor_at_end_and_append_block(irb, null_block); @@ -3426,7 +3426,7 @@ static IrInstruction *ir_gen_bin_op(IrBuilder *irb, Scope *scope, AstNode *node) return ir_gen_bin_op_id(irb, scope, node, IrBinOpArrayMult); case BinOpTypeMergeErrorSets: return ir_gen_bin_op_id(irb, scope, node, IrBinOpMergeErrorSets); - case BinOpTypeUnwrapMaybe: + case BinOpTypeUnwrapOptional: return ir_gen_maybe_ok_or(irb, scope, node); case BinOpTypeErrorUnion: return ir_gen_error_union(irb, scope, node); @@ -4703,9 +4703,9 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegation), lval); case PrefixOpNegationWrap: return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval); - case PrefixOpMaybe: - return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpMaybe), lval); - case PrefixOpUnwrapMaybe: + case PrefixOpOptional: + return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval); + case PrefixOpUnwrapOptional: return ir_gen_maybe_assert_ok(irb, scope, node, lval); case PrefixOpAddrOf: { AstNode *expr_node = node->data.prefix_op_expr.primary_expr; @@ -5370,9 +5370,9 @@ static IrInstruction *ir_gen_test_expr(IrBuilder *irb, Scope *scope, AstNode *no IrInstruction *maybe_val = ir_build_load_ptr(irb, scope, node, maybe_val_ptr); IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_val); - IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "MaybeThen"); - IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "MaybeElse"); - IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "MaybeEndIf"); + IrBasicBlock *then_block = ir_create_basic_block(irb, scope, "OptionalThen"); + IrBasicBlock *else_block = ir_create_basic_block(irb, scope, "OptionalElse"); + IrBasicBlock *endif_block = ir_create_basic_block(irb, scope, "OptionalEndIf"); IrInstruction *is_comptime; if (ir_should_inline(irb->exec, scope)) { @@ -7519,7 +7519,7 @@ static bool ir_num_lit_fits_in_other_type(IrAnalyze *ira, IrInstruction *instruc } } else if (const_val_fits_in_num_lit(const_val, other_type)) { return true; - } else if (other_type->id == TypeTableEntryIdMaybe) { + } else if (other_type->id == TypeTableEntryIdOptional) { TypeTableEntry *child_type = other_type->data.maybe.child_type; if (const_val_fits_in_num_lit(const_val, child_type)) { return true; @@ -7663,7 +7663,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry return result; // * and [*] can do a const-cast-only to ?* and ?[*], respectively - if (expected_type->id == TypeTableEntryIdMaybe && + if (expected_type->id == TypeTableEntryIdOptional && expected_type->data.maybe.child_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer) { @@ -7718,12 +7718,12 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry } // maybe - if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) { + if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) { ConstCastOnly child = types_match_const_cast_only(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, source_node); if (child.id != ConstCastResultIdOk) { - result.id = ConstCastResultIdNullableChild; - result.data.nullable_child = allocate_nonzero(1); - *result.data.nullable_child = child; + result.id = ConstCastResultIdOptionalChild; + result.data.optional_child = allocate_nonzero(1); + *result.data.optional_child = child; } return result; } @@ -7925,7 +7925,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, } // implicit conversion from ?T to ?U - if (expected_type->id == TypeTableEntryIdMaybe && actual_type->id == TypeTableEntryIdMaybe) { + if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdOptional) { ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type, actual_type->data.maybe.child_type, value); if (res != ImplicitCastMatchResultNo) @@ -7933,7 +7933,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, } // implicit conversion from non maybe type to maybe type - if (expected_type->id == TypeTableEntryIdMaybe) { + if (expected_type->id == TypeTableEntryIdOptional) { ImplicitCastMatchResult res = ir_types_match_with_implicit_cast(ira, expected_type->data.maybe.child_type, actual_type, value); if (res != ImplicitCastMatchResultNo) @@ -7941,7 +7941,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, } // implicit conversion from null literal to maybe type - if (expected_type->id == TypeTableEntryIdMaybe && + if (expected_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdNull) { return ImplicitCastMatchResultYes; @@ -7963,7 +7963,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, // implicit conversion from T to U!?T if (expected_type->id == TypeTableEntryIdErrorUnion && - expected_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe && + expected_type->data.error_union.payload_type->id == TypeTableEntryIdOptional && ir_types_match_with_implicit_cast(ira, expected_type->data.error_union.payload_type->data.maybe.child_type, actual_type, value)) @@ -8072,7 +8072,7 @@ static ImplicitCastMatchResult ir_types_match_with_implicit_cast(IrAnalyze *ira, } // implicit [N]T to ?[]const T - if (expected_type->id == TypeTableEntryIdMaybe && + if (expected_type->id == TypeTableEntryIdOptional && is_slice(expected_type->data.maybe.child_type) && actual_type->id == TypeTableEntryIdArray) { @@ -8552,13 +8552,13 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdMaybe && + if (prev_type->id == TypeTableEntryIdOptional && types_match_const_cast_only(ira, prev_type->data.maybe.child_type, cur_type, source_node).id == ConstCastResultIdOk) { continue; } - if (cur_type->id == TypeTableEntryIdMaybe && + if (cur_type->id == TypeTableEntryIdOptional && types_match_const_cast_only(ira, cur_type->data.maybe.child_type, prev_type, source_node).id == ConstCastResultIdOk) { prev_inst = cur_inst; @@ -8711,7 +8711,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod ir_add_error_node(ira, source_node, buf_sprintf("unable to make maybe out of number literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdMaybe) { + } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) { return prev_inst->value.type; } else { return get_maybe_type(ira->codegen, prev_inst->value.type); @@ -9193,7 +9193,7 @@ static FnTableEntry *ir_resolve_fn(IrAnalyze *ira, IrInstruction *fn_value) { } static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) { - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); if (instr_is_comptime(value)) { TypeTableEntry *payload_type = wanted_type->data.maybe.child_type; @@ -9211,7 +9211,7 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc if (get_codegen_ptr_type(wanted_type) != nullptr) { copy_const_val(&const_instruction->base.value, val, val->data.x_ptr.mut == ConstPtrMutComptimeConst); } else { - const_instruction->base.value.data.x_nullable = val; + const_instruction->base.value.data.x_optional = val; } const_instruction->base.value.type = wanted_type; return &const_instruction->base; @@ -9219,7 +9219,7 @@ static IrInstruction *ir_analyze_maybe_wrap(IrAnalyze *ira, IrInstruction *sourc IrInstruction *result = ir_build_maybe_wrap(&ira->new_irb, source_instr->scope, source_instr->source_node, value); result->value.type = wanted_type; - result->value.data.rh_maybe = RuntimeHintMaybeNonNull; + result->value.data.rh_maybe = RuntimeHintOptionalNonNull; ir_add_alloca(ira, result, wanted_type); return result; } @@ -9361,7 +9361,7 @@ static IrInstruction *ir_analyze_cast_ref(IrAnalyze *ira, IrInstruction *source_ } static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value, TypeTableEntry *wanted_type) { - assert(wanted_type->id == TypeTableEntryIdMaybe); + assert(wanted_type->id == TypeTableEntryIdOptional); assert(instr_is_comptime(value)); ConstExprValue *val = ir_resolve_const(ira, value, UndefBad); @@ -9373,7 +9373,7 @@ static IrInstruction *ir_analyze_null_to_maybe(IrAnalyze *ira, IrInstruction *so const_instruction->base.value.data.x_ptr.special = ConstPtrSpecialHardCodedAddr; const_instruction->base.value.data.x_ptr.data.hard_coded_addr.addr = 0; } else { - const_instruction->base.value.data.x_nullable = nullptr; + const_instruction->base.value.data.x_optional = nullptr; } const_instruction->base.value.type = wanted_type; return &const_instruction->base; @@ -9992,7 +9992,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } // explicit cast from [N]T to ?[]const N - if (wanted_type->id == TypeTableEntryIdMaybe && + if (wanted_type->id == TypeTableEntryIdOptional && is_slice(wanted_type->data.maybe.child_type) && actual_type->id == TypeTableEntryIdArray) { @@ -10091,7 +10091,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from T to ?T // note that the *T to ?*T case is handled via the "ConstCastOnly" mechanism - if (wanted_type->id == TypeTableEntryIdMaybe) { + if (wanted_type->id == TypeTableEntryIdOptional) { TypeTableEntry *wanted_child_type = wanted_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk) { return ir_analyze_maybe_wrap(ira, source_instr, value, wanted_type); @@ -10120,7 +10120,7 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst } // explicit cast from null literal to maybe type - if (wanted_type->id == TypeTableEntryIdMaybe && + if (wanted_type->id == TypeTableEntryIdOptional && actual_type->id == TypeTableEntryIdNull) { return ir_analyze_null_to_maybe(ira, source_instr, value, wanted_type); @@ -10173,8 +10173,8 @@ static IrInstruction *ir_analyze_cast(IrAnalyze *ira, IrInstruction *source_inst // explicit cast from T to E!?T if (wanted_type->id == TypeTableEntryIdErrorUnion && - wanted_type->data.error_union.payload_type->id == TypeTableEntryIdMaybe && - actual_type->id != TypeTableEntryIdMaybe) + wanted_type->data.error_union.payload_type->id == TypeTableEntryIdOptional && + actual_type->id != TypeTableEntryIdOptional) { TypeTableEntry *wanted_child_type = wanted_type->data.error_union.payload_type->data.maybe.child_type; if (types_match_const_cast_only(ira, wanted_child_type, actual_type, source_node).id == ConstCastResultIdOk || @@ -10737,13 +10737,13 @@ static bool resolve_cmp_op_id(IrBinOp op_id, Cmp cmp) { } } -static bool nullable_value_is_null(ConstExprValue *val) { +static bool optional_value_is_null(ConstExprValue *val) { assert(val->special == ConstValSpecialStatic); if (get_codegen_ptr_type(val->type) != nullptr) { return val->data.x_ptr.special == ConstPtrSpecialHardCodedAddr && val->data.x_ptr.data.hard_coded_addr.addr == 0; } else { - return val->data.x_nullable == nullptr; + return val->data.x_optional == nullptr; } } @@ -10755,8 +10755,8 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp IrBinOp op_id = bin_op_instruction->op_id; bool is_equality_cmp = (op_id == IrBinOpCmpEq || op_id == IrBinOpCmpNotEq); if (is_equality_cmp && - ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdMaybe) || - (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdMaybe) || + ((op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdOptional) || + (op2->value.type->id == TypeTableEntryIdNull && op1->value.type->id == TypeTableEntryIdOptional) || (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull))) { if (op1->value.type->id == TypeTableEntryIdNull && op2->value.type->id == TypeTableEntryIdNull) { @@ -10776,7 +10776,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp ConstExprValue *maybe_val = ir_resolve_const(ira, maybe_op, UndefBad); if (!maybe_val) return ira->codegen->builtin_types.entry_invalid; - bool is_null = nullable_value_is_null(maybe_val); + bool is_null = optional_value_is_null(maybe_val); ConstExprValue *out_val = ir_build_const_from(ira, &bin_op_instruction->base); out_val->data.x_bool = (op_id == IrBinOpCmpEq) ? is_null : !is_null; return ira->codegen->builtin_types.entry_bool; @@ -10925,7 +10925,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp case TypeTableEntryIdStruct: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdUnion: ir_add_error_node(ira, source_node, @@ -11998,7 +11998,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdNamespace: @@ -12022,7 +12022,7 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: zig_panic("TODO export const value of type %s", buf_ptr(&target->value.type->name)); @@ -12049,24 +12049,24 @@ static bool exec_has_err_ret_trace(CodeGen *g, IrExecutable *exec) { static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, IrInstructionErrorReturnTrace *instruction) { - if (instruction->nullable == IrInstructionErrorReturnTrace::Null) { + if (instruction->optional == IrInstructionErrorReturnTrace::Null) { TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); - TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); + TypeTableEntry *optional_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - assert(get_codegen_ptr_type(nullable_type) != nullptr); + assert(get_codegen_ptr_type(optional_type) != nullptr); out_val->data.x_ptr.special = ConstPtrSpecialHardCodedAddr; out_val->data.x_ptr.data.hard_coded_addr.addr = 0; - return nullable_type; + return optional_type; } IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, instruction->nullable); + instruction->base.source_node, instruction->optional); ir_link_new_instruction(new_instruction, &instruction->base); - return nullable_type; + return optional_type; } else { assert(ira->codegen->have_err_ret_tracing); IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, - instruction->base.source_node, instruction->nullable); + instruction->base.source_node, instruction->optional); ir_link_new_instruction(new_instruction, &instruction->base); return get_ptr_to_stack_trace_type(ira->codegen); } @@ -12998,7 +12998,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -13017,7 +13017,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdUnreachable: case TypeTableEntryIdOpaque: ir_add_error_node(ira, un_op_instruction->base.source_node, - buf_sprintf("type '%s' not nullable", buf_ptr(&type_entry->name))); + buf_sprintf("type '%s' not optional", buf_ptr(&type_entry->name))); return ira->codegen->builtin_types.entry_invalid; } zig_unreachable(); @@ -13109,7 +13109,7 @@ static TypeTableEntry *ir_analyze_instruction_un_op(IrAnalyze *ira, IrInstructio return ir_analyze_negation(ira, un_op_instruction); case IrUnOpDereference: return ir_analyze_dereference(ira, un_op_instruction); - case IrUnOpMaybe: + case IrUnOpOptional: return ir_analyze_maybe(ira, un_op_instruction); } zig_unreachable(); @@ -14155,7 +14155,7 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru buf_ptr(&child_type->name), buf_ptr(field_name))); return ira->codegen->builtin_types.entry_invalid; } - } else if (child_type->id == TypeTableEntryIdMaybe) { + } else if (child_type->id == TypeTableEntryIdOptional) { if (buf_eql_str(field_name, "Child")) { bool ptr_is_const = true; bool ptr_is_volatile = false; @@ -14339,7 +14339,7 @@ static TypeTableEntry *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructi case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14607,7 +14607,7 @@ static TypeTableEntry *ir_analyze_instruction_slice_type(IrAnalyze *ira, case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14715,7 +14715,7 @@ static TypeTableEntry *ir_analyze_instruction_array_type(IrAnalyze *ira, case TypeTableEntryIdStruct: case TypeTableEntryIdComptimeFloat: case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14786,7 +14786,7 @@ static TypeTableEntry *ir_analyze_instruction_size_of(IrAnalyze *ira, case TypeTableEntryIdPointer: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -14810,14 +14810,14 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn TypeTableEntry *type_entry = value->value.type; - if (type_entry->id == TypeTableEntryIdMaybe) { + if (type_entry->id == TypeTableEntryIdOptional) { if (instr_is_comptime(value)) { ConstExprValue *maybe_val = ir_resolve_const(ira, value, UndefBad); if (!maybe_val) return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); - out_val->data.x_bool = !nullable_value_is_null(maybe_val); + out_val->data.x_bool = !optional_value_is_null(maybe_val); return ira->codegen->builtin_types.entry_bool; } @@ -14835,7 +14835,7 @@ static TypeTableEntry *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIn } static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, - IrInstructionUnwrapMaybe *unwrap_maybe_instruction) + IrInstructionUnwrapOptional *unwrap_maybe_instruction) { IrInstruction *value = unwrap_maybe_instruction->value->other; if (type_is_invalid(value->value.type)) @@ -14863,9 +14863,9 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, ptr_type->data.pointer.is_const, ptr_type->data.pointer.is_volatile); ir_link_new_instruction(result_instr, &unwrap_maybe_instruction->base); return result_instr->value.type; - } else if (type_entry->id != TypeTableEntryIdMaybe) { + } else if (type_entry->id != TypeTableEntryIdOptional) { ir_add_error_node(ira, unwrap_maybe_instruction->value->source_node, - buf_sprintf("expected nullable type, found '%s'", buf_ptr(&type_entry->name))); + buf_sprintf("expected optional type, found '%s'", buf_ptr(&type_entry->name))); return ira->codegen->builtin_types.entry_invalid; } TypeTableEntry *child_type = type_entry->data.maybe.child_type; @@ -14881,7 +14881,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, ConstExprValue *maybe_val = const_ptr_pointee(ira->codegen, val); if (val->data.x_ptr.mut != ConstPtrMutRuntimeVar) { - if (nullable_value_is_null(maybe_val)) { + if (optional_value_is_null(maybe_val)) { ir_add_error(ira, &unwrap_maybe_instruction->base, buf_sprintf("unable to unwrap null")); return ira->codegen->builtin_types.entry_invalid; } @@ -14891,7 +14891,7 @@ static TypeTableEntry *ir_analyze_instruction_unwrap_maybe(IrAnalyze *ira, if (type_is_codegen_pointer(child_type)) { out_val->data.x_ptr.data.ref.pointee = maybe_val; } else { - out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_nullable; + out_val->data.x_ptr.data.ref.pointee = maybe_val->data.x_optional; } return result_type; } @@ -15216,7 +15216,7 @@ static TypeTableEntry *ir_analyze_instruction_switch_target(IrAnalyze *ira, case TypeTableEntryIdStruct: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: @@ -15737,7 +15737,7 @@ static TypeTableEntry *ir_analyze_min_max(IrAnalyze *ira, IrInstruction *source_ case TypeTableEntryIdComptimeInt: case TypeTableEntryIdUndefined: case TypeTableEntryIdNull: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdUnion: @@ -16255,11 +16255,11 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop 0, 0); fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) { - fn_def_fields[6].data.x_nullable = create_const_vals(1); + fn_def_fields[6].data.x_optional = create_const_vals(1); ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name); - init_const_slice(ira->codegen, fn_def_fields[6].data.x_nullable, lib_name, 0, buf_len(fn_node->lib_name), true); + init_const_slice(ira->codegen, fn_def_fields[6].data.x_optional, lib_name, 0, buf_len(fn_node->lib_name), true); } else { - fn_def_fields[6].data.x_nullable = nullptr; + fn_def_fields[6].data.x_optional = nullptr; } // return_type: type ensure_field_index(fn_def_val->type, "return_type", 7); @@ -16507,11 +16507,11 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t break; } - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: { result = create_const_vals(1); result->special = ConstValSpecialStatic; - result->type = ir_type_info_get_type(ira, "Nullable"); + result->type = ir_type_info_get_type(ira, "Optional"); ConstExprValue *fields = create_const_vals(1); result->data.x_struct.fields = fields; @@ -16725,10 +16725,10 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type); if (fields[1].data.x_type == ira->codegen->builtin_types.entry_undef) { - inner_fields[1].data.x_nullable = nullptr; + inner_fields[1].data.x_optional = nullptr; } else { - inner_fields[1].data.x_nullable = create_const_vals(1); - make_enum_field_val(inner_fields[1].data.x_nullable, union_field->enum_field, type_info_enum_field_type); + inner_fields[1].data.x_optional = create_const_vals(1); + make_enum_field_val(inner_fields[1].data.x_optional, union_field->enum_field, type_info_enum_field_type); } inner_fields[2].special = ConstValSpecialStatic; @@ -16796,13 +16796,13 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize); if (!type_has_bits(struct_field->type_entry)) { - inner_fields[1].data.x_nullable = nullptr; + inner_fields[1].data.x_optional = nullptr; } else { size_t byte_offset = LLVMOffsetOfElement(ira->codegen->target_data_ref, type_entry->type_ref, struct_field->gen_index); - inner_fields[1].data.x_nullable = create_const_vals(1); - inner_fields[1].data.x_nullable->special = ConstValSpecialStatic; - inner_fields[1].data.x_nullable->type = ira->codegen->builtin_types.entry_usize; - bigint_init_unsigned(&inner_fields[1].data.x_nullable->data.x_bigint, byte_offset); + inner_fields[1].data.x_optional = create_const_vals(1); + inner_fields[1].data.x_optional->special = ConstValSpecialStatic; + inner_fields[1].data.x_optional->type = ira->codegen->builtin_types.entry_usize; + bigint_init_unsigned(&inner_fields[1].data.x_optional->data.x_bigint, byte_offset); } inner_fields[2].special = ConstValSpecialStatic; @@ -18027,7 +18027,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc case TypeTableEntryIdPromise: case TypeTableEntryIdArray: case TypeTableEntryIdStruct: - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: case TypeTableEntryIdErrorUnion: case TypeTableEntryIdErrorSet: case TypeTableEntryIdEnum: @@ -18591,7 +18591,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 old_align_bytes = fn_type_id.alignment; fn_type_id.alignment = align_bytes; result_type = get_fn_type(ira->codegen, &fn_type_id); - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdPointer) { TypeTableEntry *ptr_type = target_type->data.maybe.child_type; @@ -18599,7 +18599,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes); result_type = get_maybe_type(ira->codegen, better_ptr_type); - } else if (target_type->id == TypeTableEntryIdMaybe && + } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdFn) { FnTypeId fn_type_id = target_type->data.maybe.child_type->data.fn.fn_type_id; @@ -18757,7 +18757,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue return; case TypeTableEntryIdStruct: zig_panic("TODO buf_write_value_bytes struct type"); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: zig_panic("TODO buf_write_value_bytes maybe type"); case TypeTableEntryIdErrorUnion: zig_panic("TODO buf_write_value_bytes error union"); @@ -18815,7 +18815,7 @@ static void buf_read_value_bytes(CodeGen *codegen, uint8_t *buf, ConstExprValue zig_panic("TODO buf_read_value_bytes array type"); case TypeTableEntryIdStruct: zig_panic("TODO buf_read_value_bytes struct type"); - case TypeTableEntryIdMaybe: + case TypeTableEntryIdOptional: zig_panic("TODO buf_read_value_bytes maybe type"); case TypeTableEntryIdErrorUnion: zig_panic("TODO buf_read_value_bytes error union"); @@ -19731,7 +19731,7 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi case IrInstructionIdUnionInit: case IrInstructionIdStructFieldPtr: case IrInstructionIdUnionFieldPtr: - case IrInstructionIdMaybeWrap: + case IrInstructionIdOptionalWrap: case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdCast: @@ -19791,8 +19791,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_size_of(ira, (IrInstructionSizeOf *)instruction); case IrInstructionIdTestNonNull: return ir_analyze_instruction_test_non_null(ira, (IrInstructionTestNonNull *)instruction); - case IrInstructionIdUnwrapMaybe: - return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + return ir_analyze_instruction_unwrap_maybe(ira, (IrInstructionUnwrapOptional *)instruction); case IrInstructionIdClz: return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction); case IrInstructionIdCtz: @@ -20128,7 +20128,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdSliceType: case IrInstructionIdSizeOf: case IrInstructionIdTestNonNull: - case IrInstructionIdUnwrapMaybe: + case IrInstructionIdUnwrapOptional: case IrInstructionIdClz: case IrInstructionIdCtz: case IrInstructionIdSwitchVar: @@ -20150,7 +20150,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdFrameAddress: case IrInstructionIdTestErr: case IrInstructionIdUnwrapErrCode: - case IrInstructionIdMaybeWrap: + case IrInstructionIdOptionalWrap: case IrInstructionIdErrWrapCode: case IrInstructionIdErrWrapPayload: case IrInstructionIdFnProto: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 776ef64566..43907fa9d4 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -148,7 +148,7 @@ static const char *ir_un_op_id_str(IrUnOp op_id) { return "-%"; case IrUnOpDereference: return "*"; - case IrUnOpMaybe: + case IrUnOpOptional: return "?"; } zig_unreachable(); @@ -481,7 +481,7 @@ static void ir_print_test_null(IrPrint *irp, IrInstructionTestNonNull *instructi fprintf(irp->f, " != null"); } -static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapMaybe *instruction) { +static void ir_print_unwrap_maybe(IrPrint *irp, IrInstructionUnwrapOptional *instruction) { fprintf(irp->f, "&??*"); ir_print_other_instruction(irp, instruction->value); if (!instruction->safety_check_on) { @@ -777,7 +777,7 @@ static void ir_print_unwrap_err_payload(IrPrint *irp, IrInstructionUnwrapErrPayl } } -static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionMaybeWrap *instruction) { +static void ir_print_maybe_wrap(IrPrint *irp, IrInstructionOptionalWrap *instruction) { fprintf(irp->f, "@maybeWrap("); ir_print_other_instruction(irp, instruction->value); fprintf(irp->f, ")"); @@ -1032,7 +1032,7 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) { static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) { fprintf(irp->f, "@errorReturnTrace("); - switch (instruction->nullable) { + switch (instruction->optional) { case IrInstructionErrorReturnTrace::Null: fprintf(irp->f, "Null"); break; @@ -1348,8 +1348,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdTestNonNull: ir_print_test_null(irp, (IrInstructionTestNonNull *)instruction); break; - case IrInstructionIdUnwrapMaybe: - ir_print_unwrap_maybe(irp, (IrInstructionUnwrapMaybe *)instruction); + case IrInstructionIdUnwrapOptional: + ir_print_unwrap_maybe(irp, (IrInstructionUnwrapOptional *)instruction); break; case IrInstructionIdCtz: ir_print_ctz(irp, (IrInstructionCtz *)instruction); @@ -1465,8 +1465,8 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdUnwrapErrPayload: ir_print_unwrap_err_payload(irp, (IrInstructionUnwrapErrPayload *)instruction); break; - case IrInstructionIdMaybeWrap: - ir_print_maybe_wrap(irp, (IrInstructionMaybeWrap *)instruction); + case IrInstructionIdOptionalWrap: + ir_print_maybe_wrap(irp, (IrInstructionOptionalWrap *)instruction); break; case IrInstructionIdErrWrapCode: ir_print_err_wrap_code(irp, (IrInstructionErrWrapCode *)instruction); diff --git a/src/parser.cpp b/src/parser.cpp index 3ad2de906b..2ee69f81ab 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1046,12 +1046,11 @@ static AstNode *ast_parse_fn_proto_partial(ParseContext *pc, size_t *token_index } /* -SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | PtrDerefExpression | SliceExpression) +SuffixOpExpression = ("async" option("<" SuffixOpExpression ">") SuffixOpExpression FnCallExpression) | PrimaryExpression option(FnCallExpression | ArrayAccessExpression | FieldAccessExpression | SliceExpression | ".*" | ".?") FnCallExpression : token(LParen) list(Expression, token(Comma)) token(RParen) ArrayAccessExpression : token(LBracket) Expression token(RBracket) SliceExpression = "[" Expression ".." option(Expression) "]" FieldAccessExpression : token(Dot) token(Symbol) -PtrDerefExpression = ".*" StructLiteralField : token(Dot) token(Symbol) token(Eq) Expression */ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -1148,6 +1147,14 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, AstNode *node = ast_create_node(pc, NodeTypePtrDeref, first_token); node->data.ptr_deref_expr.target = primary_expr; + primary_expr = node; + } else if (token->id == TokenIdQuestion) { + *token_index += 1; + + AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, first_token); + node->data.prefix_op_expr.prefix_op = PrefixOpUnwrapOptional; + node->data.prefix_op_expr.primary_expr = primary_expr; + primary_expr = node; } else { ast_invalid_token_error(pc, token); @@ -1165,8 +1172,8 @@ static PrefixOp tok_to_prefix_op(Token *token) { case TokenIdDash: return PrefixOpNegation; case TokenIdMinusPercent: return PrefixOpNegationWrap; case TokenIdTilde: return PrefixOpBinNot; - case TokenIdMaybe: return PrefixOpMaybe; - case TokenIdDoubleQuestion: return PrefixOpUnwrapMaybe; + case TokenIdQuestion: return PrefixOpOptional; + case TokenIdDoubleQuestion: return PrefixOpUnwrapOptional; case TokenIdAmpersand: return PrefixOpAddrOf; default: return PrefixOpInvalid; } @@ -2304,8 +2311,8 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma } /* -UnwrapExpression : BoolOrExpression (UnwrapMaybe | UnwrapError) | BoolOrExpression -UnwrapMaybe : "??" BoolOrExpression +UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression +UnwrapOptional : "??" BoolOrExpression UnwrapError = "catch" option("|" Symbol "|") Expression */ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -2322,7 +2329,7 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo AstNode *node = ast_create_node(pc, NodeTypeBinOpExpr, token); node->data.bin_op_expr.op1 = lhs; - node->data.bin_op_expr.bin_op = BinOpTypeUnwrapMaybe; + node->data.bin_op_expr.bin_op = BinOpTypeUnwrapOptional; node->data.bin_op_expr.op2 = rhs; return node; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index badbd695ec..cfabdf11ad 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -625,7 +625,7 @@ void tokenize(Buf *buf, Tokenization *out) { t.state = TokenizeStateSawDot; break; case '?': - begin_token(&t, TokenIdMaybe); + begin_token(&t, TokenIdQuestion); t.state = TokenizeStateSawQuestionMark; break; default: @@ -639,11 +639,6 @@ void tokenize(Buf *buf, Tokenization *out) { end_token(&t); t.state = TokenizeStateStart; break; - case '=': - set_token_id(&t, t.cur_tok, TokenIdMaybeAssign); - end_token(&t); - t.state = TokenizeStateStart; - break; default: t.pos -= 1; end_token(&t); @@ -1609,8 +1604,7 @@ const char * token_name(TokenId id) { case TokenIdLBrace: return "{"; case TokenIdLBracket: return "["; case TokenIdLParen: return "("; - case TokenIdMaybe: return "?"; - case TokenIdMaybeAssign: return "?="; + case TokenIdQuestion: return "?"; case TokenIdMinusEq: return "-="; case TokenIdMinusPercent: return "-%"; case TokenIdMinusPercentEq: return "-%="; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index d0089909cd..7c617f85c6 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -100,8 +100,7 @@ enum TokenId { TokenIdLBrace, TokenIdLBracket, TokenIdLParen, - TokenIdMaybe, - TokenIdMaybeAssign, + TokenIdQuestion, TokenIdMinusEq, TokenIdMinusPercent, TokenIdMinusPercentEq, diff --git a/src/translate_c.cpp b/src/translate_c.cpp index d78bd1fa70..aaaf5a1edb 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -382,7 +382,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r fn_def->data.fn_def.fn_proto = fn_proto; fn_proto->data.fn_proto.fn_def_node = fn_def; - AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, ref_node); + AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, ref_node); AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr); fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node; @@ -410,7 +410,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r } static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) { - return trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, child); + return trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, child); } static AstNode *get_global(Context *c, Buf *name) { @@ -879,14 +879,14 @@ static AstNode *trans_type(Context *c, const Type *ty, const SourceLocation &sou } if (qual_type_child_is_fn_proto(child_qt)) { - return trans_create_node_prefix_op(c, PrefixOpMaybe, child_node); + return trans_create_node_prefix_op(c, PrefixOpOptional, child_node); } PtrLen ptr_len = type_is_opaque(c, child_qt.getTypePtr(), source_loc) ? PtrLenSingle : PtrLenUnknown; AstNode *pointer_node = trans_create_node_ptr_type(c, child_qt.isConstQualified(), child_qt.isVolatileQualified(), child_node, ptr_len); - return trans_create_node_prefix_op(c, PrefixOpMaybe, pointer_node); + return trans_create_node_prefix_op(c, PrefixOpOptional, pointer_node); } case Type::Typedef: { @@ -1963,7 +1963,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType()); if (is_fn_ptr) return value_node; - AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, value_node); + AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, value_node); return trans_create_node_ptr_deref(c, unwrapped); } case UO_Plus: @@ -2587,7 +2587,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope * } } if (callee_node == nullptr) { - callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapMaybe, callee_raw_node); + callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, callee_raw_node); } } else { callee_node = callee_raw_node; @@ -4301,7 +4301,7 @@ static AstNode *trans_lookup_ast_maybe_fn(Context *c, AstNode *ref_node) { return nullptr; if (prefix_node->type != NodeTypePrefixOpExpr) return nullptr; - if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpMaybe) + if (prefix_node->data.prefix_op_expr.prefix_op != PrefixOpOptional) return nullptr; AstNode *fn_proto_node = prefix_node->data.prefix_op_expr.primary_expr; diff --git a/std/array_list.zig b/std/array_list.zig index 30715f4d6f..1a235d28a3 100644 --- a/std/array_list.zig +++ b/std/array_list.zig @@ -258,7 +258,7 @@ test "iterator ArrayList test" { } it.reset(); - assert(??it.next() == 1); + assert(it.next().? == 1); } test "insert ArrayList test" { diff --git a/std/buf_map.zig b/std/buf_map.zig index 22d821ae7b..0d4f3a6d5e 100644 --- a/std/buf_map.zig +++ b/std/buf_map.zig @@ -72,15 +72,15 @@ test "BufMap" { defer bufmap.deinit(); try bufmap.set("x", "1"); - assert(mem.eql(u8, ??bufmap.get("x"), "1")); + assert(mem.eql(u8, bufmap.get("x").?, "1")); assert(1 == bufmap.count()); try bufmap.set("x", "2"); - assert(mem.eql(u8, ??bufmap.get("x"), "2")); + assert(mem.eql(u8, bufmap.get("x").?, "2")); assert(1 == bufmap.count()); try bufmap.set("x", "3"); - assert(mem.eql(u8, ??bufmap.get("x"), "3")); + assert(mem.eql(u8, bufmap.get("x").?, "3")); assert(1 == bufmap.count()); bufmap.delete("x"); diff --git a/std/event.zig b/std/event.zig index 89ab816bb6..0821c789b7 100644 --- a/std/event.zig +++ b/std/event.zig @@ -40,9 +40,9 @@ pub const TcpServer = struct { self.listen_address = std.net.Address.initPosix(try std.os.posixGetSockName(self.sockfd)); self.accept_coro = try async TcpServer.handler(self); - errdefer cancel ??self.accept_coro; + errdefer cancel self.accept_coro.?; - try self.loop.addFd(self.sockfd, ??self.accept_coro); + try self.loop.addFd(self.sockfd, self.accept_coro.?); errdefer self.loop.removeFd(self.sockfd); } diff --git a/std/fmt/index.zig b/std/fmt/index.zig index 3844fbb10a..b52625e26e 100644 --- a/std/fmt/index.zig +++ b/std/fmt/index.zig @@ -111,7 +111,7 @@ pub fn formatType( builtin.TypeId.Bool => { return output(context, if (value) "true" else "false"); }, - builtin.TypeId.Nullable => { + builtin.TypeId.Optional => { if (value) |payload| { return formatType(payload, fmt, context, Errors, output); } else { @@ -819,11 +819,11 @@ test "parse unsigned comptime" { test "fmt.format" { { const value: ?i32 = 1234; - try testFmt("nullable: 1234\n", "nullable: {}\n", value); + try testFmt("optional: 1234\n", "optional: {}\n", value); } { const value: ?i32 = null; - try testFmt("nullable: null\n", "nullable: {}\n", value); + try testFmt("optional: null\n", "optional: {}\n", value); } { const value: error!i32 = 1234; diff --git a/std/hash_map.zig b/std/hash_map.zig index a323cdc197..3bd03d4f28 100644 --- a/std/hash_map.zig +++ b/std/hash_map.zig @@ -265,11 +265,11 @@ test "basic hash map usage" { assert((map.put(4, 44) catch unreachable) == null); assert((map.put(5, 55) catch unreachable) == null); - assert(??(map.put(5, 66) catch unreachable) == 55); - assert(??(map.put(5, 55) catch unreachable) == 66); + assert((map.put(5, 66) catch unreachable).? == 55); + assert((map.put(5, 55) catch unreachable).? == 66); assert(map.contains(2)); - assert((??map.get(2)).value == 22); + assert(map.get(2).?.value == 22); _ = map.remove(2); assert(map.remove(2) == null); assert(map.get(2) == null); @@ -317,7 +317,7 @@ test "iterator hash map" { } it.reset(); - var entry = ??it.next(); + var entry = it.next().?; assert(entry.key == keys[0]); assert(entry.value == values[0]); } diff --git a/std/heap.zig b/std/heap.zig index 5d430bc761..d1fbf9ca0a 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -142,7 +142,7 @@ pub const DirectAllocator = struct { const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; const old_ptr = @intToPtr(*c_void, root_addr); const amt = new_size + alignment + @sizeOf(usize); - const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: { + const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) ?? blk: { if (new_size > old_mem.len) return error.OutOfMemory; const new_record_addr = old_record_addr - new_size + old_mem.len; @intToPtr(*align(1) usize, new_record_addr).* = root_addr; @@ -171,7 +171,7 @@ pub const DirectAllocator = struct { const record_addr = @ptrToInt(bytes.ptr) + bytes.len; const root_addr = @intToPtr(*align(1) usize, record_addr).*; const ptr = @intToPtr(*c_void, root_addr); - _ = os.windows.HeapFree(??self.heap_handle, 0, ptr); + _ = os.windows.HeapFree(self.heap_handle.?, 0, ptr); }, else => @compileError("Unsupported OS"), } diff --git a/std/json.zig b/std/json.zig index 03b19a7fa4..75ea2eee1c 100644 --- a/std/json.zig +++ b/std/json.zig @@ -908,7 +908,7 @@ pub const TokenStream = struct { }; fn checkNext(p: *TokenStream, id: Token.Id) void { - const token = ??(p.next() catch unreachable); + const token = (p.next() catch unreachable).?; debug.assert(token.id == id); } @@ -1376,17 +1376,17 @@ test "json parser dynamic" { var root = tree.root; - var image = (??root.Object.get("Image")).value; + var image = root.Object.get("Image").?.value; - const width = (??image.Object.get("Width")).value; + const width = image.Object.get("Width").?.value; debug.assert(width.Integer == 800); - const height = (??image.Object.get("Height")).value; + const height = image.Object.get("Height").?.value; debug.assert(height.Integer == 600); - const title = (??image.Object.get("Title")).value; + const title = image.Object.get("Title").?.value; debug.assert(mem.eql(u8, title.String, "View from 15th Floor")); - const animated = (??image.Object.get("Animated")).value; + const animated = image.Object.get("Animated").?.value; debug.assert(animated.Bool == false); } diff --git a/std/linked_list.zig b/std/linked_list.zig index fbc0a0c42a..536c6d24d0 100644 --- a/std/linked_list.zig +++ b/std/linked_list.zig @@ -270,8 +270,8 @@ test "basic linked list test" { var last = list.pop(); // {2, 3, 4} list.remove(three); // {2, 4} - assert((??list.first).data == 2); - assert((??list.last).data == 4); + assert(list.first.?.data == 2); + assert(list.last.?.data == 4); assert(list.len == 2); } @@ -336,7 +336,7 @@ test "basic intrusive linked list test" { var last = list.pop(); // {2, 3, 4} list.remove(&three.link); // {2, 4} - assert((??list.first).toData().value == 2); - assert((??list.last).toData().value == 4); + assert(list.first.?.toData().value == 2); + assert(list.last.?.toData().value == 4); assert(list.len == 2); } diff --git a/std/macho.zig b/std/macho.zig index d6eef9a325..64f78ae4a3 100644 --- a/std/macho.zig +++ b/std/macho.zig @@ -130,7 +130,7 @@ pub fn loadSymbols(allocator: *mem.Allocator, in: *io.FileInStream) !SymbolTable for (syms) |sym| { if (!isSymbol(sym)) continue; const start = sym.n_strx; - const end = ??mem.indexOfScalarPos(u8, strings, start, 0); + const end = mem.indexOfScalarPos(u8, strings, start, 0).?; const name = strings[start..end]; const address = sym.n_value; symbols[nsym] = Symbol{ .name = name, .address = address }; diff --git a/std/mem.zig b/std/mem.zig index 423460e73b..f961c7862b 100644 --- a/std/mem.zig +++ b/std/mem.zig @@ -304,20 +304,20 @@ pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, nee } test "mem.indexOf" { - assert(??indexOf(u8, "one two three four", "four") == 14); - assert(??lastIndexOf(u8, "one two three two four", "two") == 14); + assert(indexOf(u8, "one two three four", "four").? == 14); + assert(lastIndexOf(u8, "one two three two four", "two").? == 14); assert(indexOf(u8, "one two three four", "gour") == null); assert(lastIndexOf(u8, "one two three four", "gour") == null); - assert(??indexOf(u8, "foo", "foo") == 0); - assert(??lastIndexOf(u8, "foo", "foo") == 0); + assert(indexOf(u8, "foo", "foo").? == 0); + assert(lastIndexOf(u8, "foo", "foo").? == 0); assert(indexOf(u8, "foo", "fool") == null); assert(lastIndexOf(u8, "foo", "lfoo") == null); assert(lastIndexOf(u8, "foo", "fool") == null); - assert(??indexOf(u8, "foo foo", "foo") == 0); - assert(??lastIndexOf(u8, "foo foo", "foo") == 4); - assert(??lastIndexOfAny(u8, "boo, cat", "abo") == 6); - assert(??lastIndexOfScalar(u8, "boo", 'o') == 2); + assert(indexOf(u8, "foo foo", "foo").? == 0); + assert(lastIndexOf(u8, "foo foo", "foo").? == 4); + assert(lastIndexOfAny(u8, "boo, cat", "abo").? == 6); + assert(lastIndexOfScalar(u8, "boo", 'o').? == 2); } /// Reads an integer from memory with size equal to bytes.len. @@ -432,9 +432,9 @@ pub fn split(buffer: []const u8, split_bytes: []const u8) SplitIterator { test "mem.split" { var it = split(" abc def ghi ", " "); - assert(eql(u8, ??it.next(), "abc")); - assert(eql(u8, ??it.next(), "def")); - assert(eql(u8, ??it.next(), "ghi")); + assert(eql(u8, it.next().?, "abc")); + assert(eql(u8, it.next().?, "def")); + assert(eql(u8, it.next().?, "ghi")); assert(it.next() == null); } diff --git a/std/os/child_process.zig b/std/os/child_process.zig index 822ade2eb8..1e3a732498 100644 --- a/std/os/child_process.zig +++ b/std/os/child_process.zig @@ -156,7 +156,7 @@ pub const ChildProcess = struct { }; } try self.waitUnwrappedWindows(); - return ??self.term; + return self.term.?; } pub fn killPosix(self: *ChildProcess) !Term { @@ -175,7 +175,7 @@ pub const ChildProcess = struct { }; } self.waitUnwrapped(); - return ??self.term; + return self.term.?; } /// Blocks until child process terminates and then cleans up all resources. @@ -212,8 +212,8 @@ pub const ChildProcess = struct { defer Buffer.deinit(&stdout); defer Buffer.deinit(&stderr); - var stdout_file_in_stream = io.FileInStream.init(&??child.stdout); - var stderr_file_in_stream = io.FileInStream.init(&??child.stderr); + var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?); + var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?); try stdout_file_in_stream.stream.readAllBuffer(&stdout, max_output_size); try stderr_file_in_stream.stream.readAllBuffer(&stderr, max_output_size); @@ -232,7 +232,7 @@ pub const ChildProcess = struct { } try self.waitUnwrappedWindows(); - return ??self.term; + return self.term.?; } fn waitPosix(self: *ChildProcess) !Term { @@ -242,7 +242,7 @@ pub const ChildProcess = struct { } self.waitUnwrapped(); - return ??self.term; + return self.term.?; } pub fn deinit(self: *ChildProcess) void { @@ -619,13 +619,13 @@ pub const ChildProcess = struct { self.term = null; if (self.stdin_behavior == StdIo.Pipe) { - os.close(??g_hChildStd_IN_Rd); + os.close(g_hChildStd_IN_Rd.?); } if (self.stderr_behavior == StdIo.Pipe) { - os.close(??g_hChildStd_ERR_Wr); + os.close(g_hChildStd_ERR_Wr.?); } if (self.stdout_behavior == StdIo.Pipe) { - os.close(??g_hChildStd_OUT_Wr); + os.close(g_hChildStd_OUT_Wr.?); } } diff --git a/std/os/index.zig b/std/os/index.zig index fe5ecc38ba..807b2c398b 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -422,7 +422,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: const exe_path = argv[0]; if (mem.indexOfScalar(u8, exe_path, '/') != null) { - return posixExecveErrnoToErr(posix.getErrno(posix.execve(??argv_buf[0], argv_buf.ptr, envp_buf.ptr))); + return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr))); } const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin"; @@ -1729,7 +1729,7 @@ test "windows arg parsing" { fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void { var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line); for (expected_args) |expected_arg| { - const arg = ??it.next(debug.global_allocator) catch unreachable; + const arg = it.next(debug.global_allocator).? catch unreachable; assert(mem.eql(u8, arg, expected_arg)); } assert(it.next(debug.global_allocator) == null); diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig index 2ab4d0cbc1..1414b8185b 100644 --- a/std/os/linux/vdso.zig +++ b/std/os/linux/vdso.zig @@ -67,7 +67,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { if (0 == syms[i].st_shndx) continue; if (!mem.eql(u8, name, cstr.toSliceConst(strings + syms[i].st_name))) continue; if (maybe_versym) |versym| { - if (!checkver(??maybe_verdef, versym[i], vername, strings)) + if (!checkver(maybe_verdef.?, versym[i], vername, strings)) continue; } return base + syms[i].st_value; diff --git a/std/os/path.zig b/std/os/path.zig index 4df6179bf5..430dda2934 100644 --- a/std/os/path.zig +++ b/std/os/path.zig @@ -265,7 +265,7 @@ fn networkShareServersEql(ns1: []const u8, ns2: []const u8) bool { var it2 = mem.split(ns2, []u8{sep2}); // TODO ASCII is wrong, we actually need full unicode support to compare paths. - return asciiEqlIgnoreCase(??it1.next(), ??it2.next()); + return asciiEqlIgnoreCase(it1.next().?, it2.next().?); } fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8) bool { @@ -286,7 +286,7 @@ fn compareDiskDesignators(kind: WindowsPath.Kind, p1: []const u8, p2: []const u8 var it2 = mem.split(p2, []u8{sep2}); // TODO ASCII is wrong, we actually need full unicode support to compare paths. - return asciiEqlIgnoreCase(??it1.next(), ??it2.next()) and asciiEqlIgnoreCase(??it1.next(), ??it2.next()); + return asciiEqlIgnoreCase(it1.next().?, it2.next().?) and asciiEqlIgnoreCase(it1.next().?, it2.next().?); }, } } @@ -414,8 +414,8 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 { WindowsPath.Kind.NetworkShare => { result = try allocator.alloc(u8, max_size); var it = mem.split(paths[first_index], "/\\"); - const server_name = ??it.next(); - const other_name = ??it.next(); + const server_name = it.next().?; + const other_name = it.next().?; result[result_index] = '\\'; result_index += 1; diff --git a/std/segmented_list.zig b/std/segmented_list.zig index a2f3607ad8..9f10f4d44a 100644 --- a/std/segmented_list.zig +++ b/std/segmented_list.zig @@ -364,7 +364,7 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void { assert(x == 0); } - assert(??list.pop() == 100); + assert(list.pop().? == 100); assert(list.len == 99); try list.pushMany([]i32{ @@ -373,9 +373,9 @@ fn testSegmentedList(comptime prealloc: usize, allocator: *Allocator) !void { 3, }); assert(list.len == 102); - assert(??list.pop() == 3); - assert(??list.pop() == 2); - assert(??list.pop() == 1); + assert(list.pop().? == 3); + assert(list.pop().? == 2); + assert(list.pop().? == 1); assert(list.len == 99); try list.pushMany([]const i32{}); diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig index 8aefe4751f..dd37f1edb6 100644 --- a/std/special/bootstrap.zig +++ b/std/special/bootstrap.zig @@ -54,10 +54,10 @@ fn posixCallMainAndExit() noreturn { const argc = argc_ptr[0]; const argv = @ptrCast([*][*]u8, argc_ptr + 1); - const envp_nullable = @ptrCast([*]?[*]u8, argv + argc + 1); + const envp_optional = @ptrCast([*]?[*]u8, argv + argc + 1); var envp_count: usize = 0; - while (envp_nullable[envp_count]) |_| : (envp_count += 1) {} - const envp = @ptrCast([*][*]u8, envp_nullable)[0..envp_count]; + while (envp_optional[envp_count]) |_| : (envp_count += 1) {} + const envp = @ptrCast([*][*]u8, envp_optional)[0..envp_count]; if (builtin.os == builtin.Os.linux) { const auxv = @ptrCast([*]usize, envp.ptr + envp_count + 1); var i: usize = 0; diff --git a/std/special/builtin.zig b/std/special/builtin.zig index e537078924..e97b0a89e4 100644 --- a/std/special/builtin.zig +++ b/std/special/builtin.zig @@ -19,7 +19,7 @@ export fn memset(dest: ?[*]u8, c: u8, n: usize) ?[*]u8 { var index: usize = 0; while (index != n) : (index += 1) - (??dest)[index] = c; + dest.?[index] = c; return dest; } @@ -29,7 +29,7 @@ export fn memcpy(noalias dest: ?[*]u8, noalias src: ?[*]const u8, n: usize) ?[*] var index: usize = 0; while (index != n) : (index += 1) - (??dest)[index] = (??src)[index]; + dest.?[index] = src.?[index]; return dest; } @@ -40,13 +40,13 @@ export fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) ?[*]u8 { if (@ptrToInt(dest) < @ptrToInt(src)) { var index: usize = 0; while (index != n) : (index += 1) { - (??dest)[index] = (??src)[index]; + dest.?[index] = src.?[index]; } } else { var index = n; while (index != 0) { index -= 1; - (??dest)[index] = (??src)[index]; + dest.?[index] = src.?[index]; } } diff --git a/std/unicode.zig b/std/unicode.zig index 3d1bebdb55..21ae12f59c 100644 --- a/std/unicode.zig +++ b/std/unicode.zig @@ -286,15 +286,15 @@ fn testUtf8IteratorOnAscii() void { const s = Utf8View.initComptime("abc"); var it1 = s.iterator(); - debug.assert(std.mem.eql(u8, "a", ??it1.nextCodepointSlice())); - debug.assert(std.mem.eql(u8, "b", ??it1.nextCodepointSlice())); - debug.assert(std.mem.eql(u8, "c", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "a", it1.nextCodepointSlice().?)); + debug.assert(std.mem.eql(u8, "b", it1.nextCodepointSlice().?)); + debug.assert(std.mem.eql(u8, "c", it1.nextCodepointSlice().?)); debug.assert(it1.nextCodepointSlice() == null); var it2 = s.iterator(); - debug.assert(??it2.nextCodepoint() == 'a'); - debug.assert(??it2.nextCodepoint() == 'b'); - debug.assert(??it2.nextCodepoint() == 'c'); + debug.assert(it2.nextCodepoint().? == 'a'); + debug.assert(it2.nextCodepoint().? == 'b'); + debug.assert(it2.nextCodepoint().? == 'c'); debug.assert(it2.nextCodepoint() == null); } @@ -321,15 +321,15 @@ fn testUtf8ViewOk() void { const s = Utf8View.initComptime("東京市"); var it1 = s.iterator(); - debug.assert(std.mem.eql(u8, "東", ??it1.nextCodepointSlice())); - debug.assert(std.mem.eql(u8, "京", ??it1.nextCodepointSlice())); - debug.assert(std.mem.eql(u8, "市", ??it1.nextCodepointSlice())); + debug.assert(std.mem.eql(u8, "東", it1.nextCodepointSlice().?)); + debug.assert(std.mem.eql(u8, "京", it1.nextCodepointSlice().?)); + debug.assert(std.mem.eql(u8, "市", it1.nextCodepointSlice().?)); debug.assert(it1.nextCodepointSlice() == null); var it2 = s.iterator(); - debug.assert(??it2.nextCodepoint() == 0x6771); - debug.assert(??it2.nextCodepoint() == 0x4eac); - debug.assert(??it2.nextCodepoint() == 0x5e02); + debug.assert(it2.nextCodepoint().? == 0x6771); + debug.assert(it2.nextCodepoint().? == 0x4eac); + debug.assert(it2.nextCodepoint().? == 0x5e02); debug.assert(it2.nextCodepoint() == null); } diff --git a/std/zig/ast.zig b/std/zig/ast.zig index a4b64d5db2..defaded78a 100644 --- a/std/zig/ast.zig +++ b/std/zig/ast.zig @@ -1417,7 +1417,7 @@ pub const Node = struct { Range, Sub, SubWrap, - UnwrapMaybe, + UnwrapOptional, }; pub fn iterate(self: *InfixOp, index: usize) ?*Node { @@ -1475,7 +1475,7 @@ pub const Node = struct { Op.Range, Op.Sub, Op.SubWrap, - Op.UnwrapMaybe, + Op.UnwrapOptional, => {}, } @@ -1507,14 +1507,13 @@ pub const Node = struct { BitNot, BoolNot, Cancel, - MaybeType, + OptionalType, Negation, NegationWrap, Resume, PtrType: PtrInfo, SliceType: PtrInfo, Try, - UnwrapMaybe, }; pub const PtrInfo = struct { @@ -1557,12 +1556,12 @@ pub const Node = struct { Op.BitNot, Op.BoolNot, Op.Cancel, - Op.MaybeType, + Op.OptionalType, Op.Negation, Op.NegationWrap, Op.Try, Op.Resume, - Op.UnwrapMaybe, + Op.UnwrapOptional, Op.PointerType, => {}, } @@ -1619,6 +1618,7 @@ pub const Node = struct { ArrayInitializer: InitList, StructInitializer: InitList, Deref, + UnwrapOptional, pub const InitList = SegmentedList(*Node, 2); diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 7faca8e11b..9f8ef3c3d6 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -711,7 +711,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { else => { // TODO: this is a special case. Remove this when #760 is fixed if (token_ptr.id == Token.Id.Keyword_error) { - if ((??tok_it.peek()).id == Token.Id.LBrace) { + if (tok_it.peek().?.id == Token.Id.LBrace) { const error_type_node = try arena.construct(ast.Node.ErrorType{ .base = ast.Node{ .id = ast.Node.Id.ErrorType }, .token = token_index, @@ -1434,8 +1434,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { try stack.append(State{ .ExpectTokenSave = ExpectTokenSave{ .id = Token.Id.AngleBracketRight, - .ptr = &??async_node.rangle_bracket, - }, + .ptr = &async_node.rangle_bracket.? }, }); try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } }); continue; @@ -1567,7 +1566,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { .bit_range = null, }; // TODO https://github.com/ziglang/zig/issues/1022 - const align_info = &??addr_of_info.align_info; + const align_info = &addr_of_info.align_info.?; try stack.append(State{ .AlignBitRange = align_info }); try stack.append(State{ .Expression = OptionalCtx{ .Required = &align_info.node } }); @@ -1604,7 +1603,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { switch (token.ptr.id) { Token.Id.Colon => { align_info.bit_range = ast.Node.PrefixOp.PtrInfo.Align.BitRange(undefined); - const bit_range = &??align_info.bit_range; + const bit_range = &align_info.bit_range.?; try stack.append(State{ .ExpectToken = Token.Id.RParen }); try stack.append(State{ .Expression = OptionalCtx{ .Required = &bit_range.end } }); @@ -2144,7 +2143,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { State.CurlySuffixExpressionEnd => |opt_ctx| { const lhs = opt_ctx.get() ?? continue; - if ((??tok_it.peek()).id == Token.Id.Period) { + if (tok_it.peek().?.id == Token.Id.Period) { const node = try arena.construct(ast.Node.SuffixOp{ .base = ast.Node{ .id = ast.Node.Id.SuffixOp }, .lhs = lhs, @@ -2326,6 +2325,17 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable; continue; } + if (eatToken(&tok_it, &tree, Token.Id.QuestionMark)) |question_token| { + const node = try arena.construct(ast.Node.SuffixOp{ + .base = ast.Node{ .id = ast.Node.Id.SuffixOp }, + .lhs = lhs, + .op = ast.Node.SuffixOp.Op.UnwrapOptional, + .rtoken = question_token, + }); + opt_ctx.store(&node.base); + stack.append(State{ .SuffixOpExpressionEnd = opt_ctx.toRequired() }) catch unreachable; + continue; + } const node = try arena.construct(ast.Node.InfixOp{ .base = ast.Node{ .id = ast.Node.Id.InfixOp }, .lhs = lhs, @@ -2403,7 +2413,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { .arrow_token = next_token_index, .return_type = undefined, }; - const return_type_ptr = &((??node.result).return_type); + const return_type_ptr = &node.result.?.return_type; try stack.append(State{ .Expression = OptionalCtx{ .Required = return_type_ptr } }); continue; }, @@ -2875,7 +2885,7 @@ const OptionalCtx = union(enum) { pub fn get(self: *const OptionalCtx) ?*ast.Node { switch (self.*) { OptionalCtx.Optional => |ptr| return ptr.*, - OptionalCtx.RequiredNull => |ptr| return ??ptr.*, + OptionalCtx.RequiredNull => |ptr| return ptr.*.?, OptionalCtx.Required => |ptr| return ptr.*, } } @@ -3237,7 +3247,7 @@ fn tokenIdToAssignment(id: *const Token.Id) ?ast.Node.InfixOp.Op { fn tokenIdToUnwrapExpr(id: @TagType(Token.Id)) ?ast.Node.InfixOp.Op { return switch (id) { Token.Id.Keyword_catch => ast.Node.InfixOp.Op{ .Catch = null }, - Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapMaybe = void{} }, + Token.Id.QuestionMarkQuestionMark => ast.Node.InfixOp.Op{ .UnwrapOptional = void{} }, else => null, }; } @@ -3299,8 +3309,7 @@ fn tokenIdToPrefixOp(id: @TagType(Token.Id)) ?ast.Node.PrefixOp.Op { .volatile_token = null, }, }, - Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .MaybeType = void{} }, - Token.Id.QuestionMarkQuestionMark => ast.Node.PrefixOp.Op{ .UnwrapMaybe = void{} }, + Token.Id.QuestionMark => ast.Node.PrefixOp.Op{ .OptionalType = void{} }, Token.Id.Keyword_await => ast.Node.PrefixOp.Op{ .Await = void{} }, Token.Id.Keyword_try => ast.Node.PrefixOp.Op{ .Try = void{} }, else => null, @@ -3322,7 +3331,7 @@ fn createToCtxLiteral(arena: *mem.Allocator, opt_ctx: *const OptionalCtx, compti } fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType(Token.Id)) ?TokenIndex { - const token = ??tok_it.peek(); + const token = tok_it.peek().?; if (token.id == id) { return nextToken(tok_it, tree).index; @@ -3334,7 +3343,7 @@ fn eatToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree, id: @TagType( fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedToken { const result = AnnotatedToken{ .index = tok_it.index, - .ptr = ??tok_it.next(), + .ptr = tok_it.next().?, }; assert(result.ptr.id != Token.Id.LineComment); diff --git a/std/zig/parser_test.zig b/std/zig/parser_test.zig index 91a56de827..ea3a4858b0 100644 --- a/std/zig/parser_test.zig +++ b/std/zig/parser_test.zig @@ -650,9 +650,10 @@ test "zig fmt: statements with empty line between" { ); } -test "zig fmt: ptr deref operator" { +test "zig fmt: ptr deref operator and unwrap optional operator" { try testCanonical( \\const a = b.*; + \\const a = b.?; \\ ); } @@ -1209,7 +1210,7 @@ test "zig fmt: precedence" { test "zig fmt: prefix operators" { try testCanonical( \\test "prefix operators" { - \\ try return --%~??!*&0; + \\ try return --%~!*&0; \\} \\ ); diff --git a/std/zig/render.zig b/std/zig/render.zig index 7c9b53b77a..0b8e4d1453 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -222,7 +222,7 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i } } - const value_expr = ??tag.value_expr; + const value_expr = tag.value_expr.?; try renderToken(tree, stream, tree.prevToken(value_expr.firstToken()), indent, start_col, Space.Space); // = try renderExpression(allocator, stream, tree, indent, start_col, value_expr, Space.Comma); // value, }, @@ -465,8 +465,7 @@ fn renderExpression( ast.Node.PrefixOp.Op.BoolNot, ast.Node.PrefixOp.Op.Negation, ast.Node.PrefixOp.Op.NegationWrap, - ast.Node.PrefixOp.Op.UnwrapMaybe, - ast.Node.PrefixOp.Op.MaybeType, + ast.Node.PrefixOp.Op.OptionalType, ast.Node.PrefixOp.Op.AddressOf, => { try renderToken(tree, stream, prefix_op_node.op_token, indent, start_col, Space.None); @@ -513,7 +512,7 @@ fn renderExpression( var it = call_info.params.iterator(0); while (true) { - const param_node = ??it.next(); + const param_node = it.next().?; const param_node_new_indent = if (param_node.*.id == ast.Node.Id.MultilineStringLiteral) blk: { break :blk indent; @@ -559,10 +558,10 @@ fn renderExpression( return renderToken(tree, stream, rbracket, indent, start_col, space); // ] }, - ast.Node.SuffixOp.Op.Deref => { + ast.Node.SuffixOp.Op.Deref, ast.Node.SuffixOp.Op.UnwrapOptional => { try renderExpression(allocator, stream, tree, indent, start_col, suffix_op.lhs, Space.None); try renderToken(tree, stream, tree.prevToken(suffix_op.rtoken), indent, start_col, Space.None); // . - return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // * + return renderToken(tree, stream, suffix_op.rtoken, indent, start_col, space); // * or ? }, @TagType(ast.Node.SuffixOp.Op).Slice => |range| { @@ -595,7 +594,7 @@ fn renderExpression( } if (field_inits.len == 1) blk: { - const field_init = ??field_inits.at(0).*.cast(ast.Node.FieldInitializer); + const field_init = field_inits.at(0).*.cast(ast.Node.FieldInitializer).?; if (field_init.expr.cast(ast.Node.SuffixOp)) |nested_suffix_op| { if (nested_suffix_op.op == ast.Node.SuffixOp.Op.StructInitializer) { @@ -688,7 +687,7 @@ fn renderExpression( var count: usize = 1; var it = exprs.iterator(0); while (true) { - const expr = (??it.next()).*; + const expr = it.next().?.*; if (it.peek()) |next_expr| { const expr_last_token = expr.*.lastToken() + 1; const loc = tree.tokenLocation(tree.tokens.at(expr_last_token).end, next_expr.*.firstToken()); @@ -806,7 +805,7 @@ fn renderExpression( }, } - return renderExpression(allocator, stream, tree, indent, start_col, ??flow_expr.rhs, space); + return renderExpression(allocator, stream, tree, indent, start_col, flow_expr.rhs.?, space); }, ast.Node.Id.Payload => { @@ -1245,7 +1244,7 @@ fn renderExpression( } else { var it = switch_case.items.iterator(0); while (true) { - const node = ??it.next(); + const node = it.next().?; if (it.peek()) |next_node| { try renderExpression(allocator, stream, tree, indent, start_col, node.*, Space.None); @@ -1550,7 +1549,7 @@ fn renderExpression( var it = asm_node.outputs.iterator(0); while (true) { - const asm_output = ??it.next(); + const asm_output = it.next().?; const node = &(asm_output.*).base; if (it.peek()) |next_asm_output| { @@ -1588,7 +1587,7 @@ fn renderExpression( var it = asm_node.inputs.iterator(0); while (true) { - const asm_input = ??it.next(); + const asm_input = it.next().?; const node = &(asm_input.*).base; if (it.peek()) |next_asm_input| { @@ -1620,7 +1619,7 @@ fn renderExpression( var it = asm_node.clobbers.iterator(0); while (true) { - const clobber_token = ??it.next(); + const clobber_token = it.next().?; if (it.peek() == null) { try renderToken(tree, stream, clobber_token.*, indent_once, start_col, Space.Newline); diff --git a/test/cases/bugs/656.zig b/test/cases/bugs/656.zig index a6035d51bb..f93f0ac4d5 100644 --- a/test/cases/bugs/656.zig +++ b/test/cases/bugs/656.zig @@ -9,7 +9,7 @@ const Value = struct { align_expr: ?u32, }; -test "nullable if after an if in a switch prong of a switch with 2 prongs in an else" { +test "optional if after an if in a switch prong of a switch with 2 prongs in an else" { foo(false, true); } diff --git a/test/cases/cast.zig b/test/cases/cast.zig index da3cba7d80..a56c470408 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -109,16 +109,16 @@ test "implicitly cast indirect pointer to maybe-indirect pointer" { const Self = this; x: u8, fn constConst(p: *const *const Self) u8 { - return (p.*).x; + return p.*.x; } fn maybeConstConst(p: ?*const *const Self) u8 { - return ((??p).*).x; + return p.?.*.x; } fn constConstConst(p: *const *const *const Self) u8 { - return (p.*.*).x; + return p.*.*.x; } fn maybeConstConstConst(p: ?*const *const *const Self) u8 { - return ((??p).*.*).x; + return p.?.*.*.x; } }; const s = S{ .x = 42 }; @@ -177,56 +177,56 @@ test "string literal to &const []const u8" { } test "implicitly cast from T to error!?T" { - castToMaybeTypeError(1); - comptime castToMaybeTypeError(1); + castToOptionalTypeError(1); + comptime castToOptionalTypeError(1); } const A = struct { a: i32, }; -fn castToMaybeTypeError(z: i32) void { +fn castToOptionalTypeError(z: i32) void { const x = i32(1); const y: error!?i32 = x; - assert(??(try y) == 1); + assert((try y).? == 1); const f = z; const g: error!?i32 = f; const a = A{ .a = z }; const b: error!?A = a; - assert((??(b catch unreachable)).a == 1); + assert((b catch unreachable).?.a == 1); } test "implicitly cast from int to error!?T" { - implicitIntLitToMaybe(); - comptime implicitIntLitToMaybe(); + implicitIntLitToOptional(); + comptime implicitIntLitToOptional(); } -fn implicitIntLitToMaybe() void { +fn implicitIntLitToOptional() void { const f: ?i32 = 1; const g: error!?i32 = 1; } test "return null from fn() error!?&T" { - const a = returnNullFromMaybeTypeErrorRef(); - const b = returnNullLitFromMaybeTypeErrorRef(); + const a = returnNullFromOptionalTypeErrorRef(); + const b = returnNullLitFromOptionalTypeErrorRef(); assert((try a) == null and (try b) == null); } -fn returnNullFromMaybeTypeErrorRef() error!?*A { +fn returnNullFromOptionalTypeErrorRef() error!?*A { const a: ?*A = null; return a; } -fn returnNullLitFromMaybeTypeErrorRef() error!?*A { +fn returnNullLitFromOptionalTypeErrorRef() error!?*A { return null; } test "peer type resolution: ?T and T" { - assert(??peerTypeTAndMaybeT(true, false) == 0); - assert(??peerTypeTAndMaybeT(false, false) == 3); + assert(peerTypeTAndOptionalT(true, false).? == 0); + assert(peerTypeTAndOptionalT(false, false).? == 3); comptime { - assert(??peerTypeTAndMaybeT(true, false) == 0); - assert(??peerTypeTAndMaybeT(false, false) == 3); + assert(peerTypeTAndOptionalT(true, false).? == 0); + assert(peerTypeTAndOptionalT(false, false).? == 3); } } -fn peerTypeTAndMaybeT(c: bool, b: bool) ?usize { +fn peerTypeTAndOptionalT(c: bool, b: bool) ?usize { if (c) { return if (b) null else usize(0); } @@ -251,11 +251,11 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 { } test "implicitly cast from [N]T to ?[]const T" { - assert(mem.eql(u8, ??castToMaybeSlice(), "hi")); - comptime assert(mem.eql(u8, ??castToMaybeSlice(), "hi")); + assert(mem.eql(u8, castToOptionalSlice().?, "hi")); + comptime assert(mem.eql(u8, castToOptionalSlice().?, "hi")); } -fn castToMaybeSlice() ?[]const u8 { +fn castToOptionalSlice() ?[]const u8 { return "hi"; } @@ -404,5 +404,5 @@ fn testCastPtrOfArrayToSliceAndPtr() void { test "cast *[1][*]const u8 to [*]const ?[*]const u8" { const window_name = [1][*]const u8{c"window name"}; const x: [*]const ?[*]const u8 = &window_name; - assert(mem.eql(u8, std.cstr.toSliceConst(??x[0]), "window name")); + assert(mem.eql(u8, std.cstr.toSliceConst(x[0].?), "window name")); } diff --git a/test/cases/error.zig b/test/cases/error.zig index ced49419d5..693631fe2d 100644 --- a/test/cases/error.zig +++ b/test/cases/error.zig @@ -140,7 +140,7 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) void { if (x) |v| assert(v == 1234) else |err| @compileError("bad"); } -test "syntax: nullable operator in front of error union operator" { +test "syntax: optional operator in front of error union operator" { comptime { assert(?error!i32 == ?(error!i32)); } diff --git a/test/cases/eval.zig b/test/cases/eval.zig index 9612466a86..08d3f3a841 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -12,7 +12,7 @@ fn fibonacci(x: i32) i32 { } fn unwrapAndAddOne(blah: ?i32) i32 { - return ??blah + 1; + return blah.? + 1; } const should_be_1235 = unwrapAndAddOne(1234); test "static add one" { diff --git a/test/cases/generics.zig b/test/cases/generics.zig index a76990e2a1..52aa013989 100644 --- a/test/cases/generics.zig +++ b/test/cases/generics.zig @@ -127,7 +127,7 @@ test "generic fn with implicit cast" { }) == 0); } fn getByte(ptr: ?*const u8) u8 { - return (??ptr).*; + return ptr.?.*; } fn getFirstByte(comptime T: type, mem: []const T) u8 { return getByte(@ptrCast(*const u8, &mem[0])); diff --git a/test/cases/misc.zig b/test/cases/misc.zig index 369d8e5cf3..beb0d6d456 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -505,7 +505,7 @@ test "@typeId" { assert(@typeId(@typeOf(1.0)) == Tid.ComptimeFloat); assert(@typeId(@typeOf(undefined)) == Tid.Undefined); assert(@typeId(@typeOf(null)) == Tid.Null); - assert(@typeId(?i32) == Tid.Nullable); + assert(@typeId(?i32) == Tid.Optional); assert(@typeId(error!i32) == Tid.ErrorUnion); assert(@typeId(error) == Tid.ErrorSet); assert(@typeId(AnEnum) == Tid.Enum); diff --git a/test/cases/null.zig b/test/cases/null.zig index bd78990ff4..62565784ac 100644 --- a/test/cases/null.zig +++ b/test/cases/null.zig @@ -1,6 +1,6 @@ const assert = @import("std").debug.assert; -test "nullable type" { +test "optional type" { const x: ?bool = true; if (x) |y| { @@ -33,7 +33,7 @@ test "test maybe object and get a pointer to the inner value" { b.* = false; } - assert(??maybe_bool == false); + assert(maybe_bool.? == false); } test "rhs maybe unwrap return" { @@ -47,9 +47,9 @@ test "maybe return" { } fn maybeReturnImpl() void { - assert(??foo(1235)); + assert(foo(1235).?); if (foo(null) != null) unreachable; - assert(!??foo(1234)); + assert(!foo(1234).?); } fn foo(x: ?i32) ?bool { @@ -102,12 +102,12 @@ fn testTestNullRuntime(x: ?i32) void { assert(!(x != null)); } -test "nullable void" { - nullableVoidImpl(); - comptime nullableVoidImpl(); +test "optional void" { + optionalVoidImpl(); + comptime optionalVoidImpl(); } -fn nullableVoidImpl() void { +fn optionalVoidImpl() void { assert(bar(null) == null); assert(bar({}) != null); } @@ -120,19 +120,19 @@ fn bar(x: ?void) ?void { } } -const StructWithNullable = struct { +const StructWithOptional = struct { field: ?i32, }; -var struct_with_nullable: StructWithNullable = undefined; +var struct_with_optional: StructWithOptional = undefined; -test "unwrap nullable which is field of global var" { - struct_with_nullable.field = null; - if (struct_with_nullable.field) |payload| { +test "unwrap optional which is field of global var" { + struct_with_optional.field = null; + if (struct_with_optional.field) |payload| { unreachable; } - struct_with_nullable.field = 1234; - if (struct_with_nullable.field) |payload| { + struct_with_optional.field = 1234; + if (struct_with_optional.field) |payload| { assert(payload == 1234); } else { unreachable; diff --git a/test/cases/reflection.zig b/test/cases/reflection.zig index 48fcc9ef03..3d3af3c889 100644 --- a/test/cases/reflection.zig +++ b/test/cases/reflection.zig @@ -2,7 +2,7 @@ const assert = @import("std").debug.assert; const mem = @import("std").mem; const reflection = this; -test "reflection: array, pointer, nullable, error union type child" { +test "reflection: array, pointer, optional, error union type child" { comptime { assert(([10]u8).Child == u8); assert((*u8).Child == u8); diff --git a/test/cases/type_info.zig b/test/cases/type_info.zig index b452c8e9f6..1bc58b14e1 100644 --- a/test/cases/type_info.zig +++ b/test/cases/type_info.zig @@ -88,15 +88,15 @@ fn testArray() void { assert(arr_info.Array.child == bool); } -test "type info: nullable type info" { - testNullable(); - comptime testNullable(); +test "type info: optional type info" { + testOptional(); + comptime testOptional(); } -fn testNullable() void { +fn testOptional() void { const null_info = @typeInfo(?void); - assert(TypeId(null_info) == TypeId.Nullable); - assert(null_info.Nullable.child == void); + assert(TypeId(null_info) == TypeId.Optional); + assert(null_info.Optional.child == void); } test "type info: promise info" { @@ -168,7 +168,7 @@ fn testUnion() void { assert(typeinfo_info.Union.tag_type == TypeId); assert(typeinfo_info.Union.fields.len == 25); assert(typeinfo_info.Union.fields[4].enum_field != null); - assert((??typeinfo_info.Union.fields[4].enum_field).value == 4); + assert(typeinfo_info.Union.fields[4].enum_field.?.value == 4); assert(typeinfo_info.Union.fields[4].field_type == @typeOf(@typeInfo(u8).Int)); assert(typeinfo_info.Union.defs.len == 20); diff --git a/test/cases/while.zig b/test/cases/while.zig index a95481668d..fe53522ea6 100644 --- a/test/cases/while.zig +++ b/test/cases/while.zig @@ -81,7 +81,7 @@ test "while with else" { assert(got_else == 1); } -test "while with nullable as condition" { +test "while with optional as condition" { numbers_left = 10; var sum: i32 = 0; while (getNumberOrNull()) |value| { @@ -90,7 +90,7 @@ test "while with nullable as condition" { assert(sum == 45); } -test "while with nullable as condition with else" { +test "while with optional as condition with else" { numbers_left = 10; var sum: i32 = 0; var got_else: i32 = 0; @@ -132,7 +132,7 @@ fn getNumberOrNull() ?i32 { }; } -test "while on nullable with else result follow else prong" { +test "while on optional with else result follow else prong" { const result = while (returnNull()) |value| { break value; } else @@ -140,8 +140,8 @@ test "while on nullable with else result follow else prong" { assert(result == 2); } -test "while on nullable with else result follow break prong" { - const result = while (returnMaybe(10)) |value| { +test "while on optional with else result follow break prong" { + const result = while (returnOptional(10)) |value| { break value; } else i32(2); @@ -210,7 +210,7 @@ fn testContinueOuter() void { fn returnNull() ?i32 { return null; } -fn returnMaybe(x: i32) ?i32 { +fn returnOptional(x: i32) ?i32 { return x; } fn returnError() error!i32 { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 102c4e428d..1c737a59e7 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1341,7 +1341,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ if (true) |x| { } \\} , - ".tmp_source.zig:2:9: error: expected nullable type, found 'bool'", + ".tmp_source.zig:2:9: error: expected optional type, found 'bool'", ); cases.add( @@ -1780,7 +1780,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "assign null to non-nullable pointer", + "assign null to non-optional pointer", \\const a: *u8 = null; \\ \\export fn entry() usize { return @sizeOf(@typeOf(a)); } @@ -2817,7 +2817,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "while expected bool, got nullable", + "while expected bool, got optional", \\export fn foo() void { \\ while (bar()) {} \\} @@ -2837,23 +2837,23 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "while expected nullable, got bool", + "while expected optional, got bool", \\export fn foo() void { \\ while (bar()) |x| {} \\} \\fn bar() bool { return true; } , - ".tmp_source.zig:2:15: error: expected nullable type, found 'bool'", + ".tmp_source.zig:2:15: error: expected optional type, found 'bool'", ); cases.add( - "while expected nullable, got error union", + "while expected optional, got error union", \\export fn foo() void { \\ while (bar()) |x| {} \\} \\fn bar() error!i32 { return 1; } , - ".tmp_source.zig:2:15: error: expected nullable type, found 'error!i32'", + ".tmp_source.zig:2:15: error: expected optional type, found 'error!i32'", ); cases.add( @@ -2867,7 +2867,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ); cases.add( - "while expected error union, got nullable", + "while expected error union, got optional", \\export fn foo() void { \\ while (bar()) |x| {} else |err| {} \\} diff --git a/test/tests.zig b/test/tests.zig index cc562331fe..b66441f628 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -282,8 +282,8 @@ pub const CompareOutputContext = struct { var stdout = Buffer.initNull(b.allocator); var stderr = Buffer.initNull(b.allocator); - var stdout_file_in_stream = io.FileInStream.init(&??child.stdout); - var stderr_file_in_stream = io.FileInStream.init(&??child.stderr); + var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?); + var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?); stdout_file_in_stream.stream.readAllBuffer(&stdout, max_stdout_size) catch unreachable; stderr_file_in_stream.stream.readAllBuffer(&stderr, max_stdout_size) catch unreachable; @@ -601,8 +601,8 @@ pub const CompileErrorContext = struct { var stdout_buf = Buffer.initNull(b.allocator); var stderr_buf = Buffer.initNull(b.allocator); - var stdout_file_in_stream = io.FileInStream.init(&??child.stdout); - var stderr_file_in_stream = io.FileInStream.init(&??child.stderr); + var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?); + var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?); stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable; stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable; @@ -872,8 +872,8 @@ pub const TranslateCContext = struct { var stdout_buf = Buffer.initNull(b.allocator); var stderr_buf = Buffer.initNull(b.allocator); - var stdout_file_in_stream = io.FileInStream.init(&??child.stdout); - var stderr_file_in_stream = io.FileInStream.init(&??child.stderr); + var stdout_file_in_stream = io.FileInStream.init(&child.stdout.?); + var stderr_file_in_stream = io.FileInStream.init(&child.stderr.?); stdout_file_in_stream.stream.readAllBuffer(&stdout_buf, max_stdout_size) catch unreachable; stderr_file_in_stream.stream.readAllBuffer(&stderr_buf, max_stdout_size) catch unreachable; -- cgit v1.2.3 From 77678b2cbc7ac9ba2d5d4725241f6a9f7ac64fa4 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 10 Jun 2018 01:13:51 -0400 Subject: breaking syntax change: orelse keyword instead of ?? (#1096) use the `zig-fmt-optional-default` branch to have zig fmt automatically do the changes. closes #1023 --- build.zig | 6 +++--- doc/docgen.zig | 6 +++--- doc/langref.html.in | 16 +++++++-------- src-self-hosted/main.zig | 14 ++++++------- src-self-hosted/module.zig | 8 ++++---- src/all_types.hpp | 7 ++++++- src/analyze.cpp | 1 + src/ast_render.cpp | 12 +++++++++-- src/ir.cpp | 31 ++++++++++++----------------- src/parser.cpp | 13 ++++++------ src/tokenizer.cpp | 27 ++++++------------------- src/tokenizer.hpp | 2 +- src/translate_c.cpp | 16 ++++++++------- std/atomic/queue.zig | 4 ++-- std/atomic/stack.zig | 4 ++-- std/buf_map.zig | 6 +++--- std/buf_set.zig | 4 ++-- std/build.zig | 24 +++++++++++----------- std/debug/index.zig | 20 +++++++++---------- std/heap.zig | 10 +++++----- std/linked_list.zig | 4 ++-- std/os/index.zig | 14 ++++++------- std/os/linux/vdso.zig | 8 ++++---- std/os/path.zig | 12 +++++------ std/os/windows/util.zig | 2 +- std/special/build_runner.zig | 10 +++++----- std/unicode.zig | 2 +- std/zig/parse.zig | 47 ++++++++++++++++++++++---------------------- std/zig/render.zig | 8 ++++---- test/cases/cast.zig | 6 +++--- test/cases/null.zig | 10 +++++----- test/compile_errors.zig | 2 +- test/translate_c.zig | 20 +++++++++---------- 33 files changed, 187 insertions(+), 189 deletions(-) (limited to 'src/analyze.cpp') diff --git a/build.zig b/build.zig index eada37816c..fd154c7504 100644 --- a/build.zig +++ b/build.zig @@ -102,11 +102,11 @@ pub fn build(b: *Builder) !void { b.default_step.dependOn(&exe.step); - const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") ?? false; + const skip_self_hosted = b.option(bool, "skip-self-hosted", "Main test suite skips building self hosted compiler") orelse false; if (!skip_self_hosted) { test_step.dependOn(&exe.step); } - const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") ?? false; + const verbose_link_exe = b.option(bool, "verbose-link", "Print link command for self hosted compiler") orelse false; exe.setVerboseLink(verbose_link_exe); b.installArtifact(exe); @@ -114,7 +114,7 @@ pub fn build(b: *Builder) !void { installCHeaders(b, c_header_files); const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter"); - const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") ?? false; + const with_lldb = b.option(bool, "with-lldb", "Run tests in LLDB to get a backtrace if one fails") orelse false; test_step.dependOn(docs_step); diff --git a/doc/docgen.zig b/doc/docgen.zig index ed0e1be273..3283d146b0 100644 --- a/doc/docgen.zig +++ b/doc/docgen.zig @@ -25,13 +25,13 @@ pub fn main() !void { if (!args_it.skip()) @panic("expected self arg"); - const zig_exe = try (args_it.next(allocator) ?? @panic("expected zig exe arg")); + const zig_exe = try (args_it.next(allocator) orelse @panic("expected zig exe arg")); defer allocator.free(zig_exe); - const in_file_name = try (args_it.next(allocator) ?? @panic("expected input arg")); + const in_file_name = try (args_it.next(allocator) orelse @panic("expected input arg")); defer allocator.free(in_file_name); - const out_file_name = try (args_it.next(allocator) ?? @panic("expected output arg")); + const out_file_name = try (args_it.next(allocator) orelse @panic("expected output arg")); defer allocator.free(out_file_name); var in_file = try os.File.openRead(allocator, in_file_name); diff --git a/doc/langref.html.in b/doc/langref.html.in index 4c4a637095..0ada8a5196 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -985,7 +985,7 @@ a ^= b
    -
    a ?? b
    +
    a orelse b
    • {#link|Optionals#}
    • @@ -998,7 +998,7 @@ a ^= b
      const value: ?u32 = null;
      -const unwrapped = value ?? 1234;
      +const unwrapped = value orelse 1234;
       unwrapped == 1234
      @@ -1011,7 +1011,7 @@ unwrapped == 1234 Equivalent to: -
      a ?? unreachable
      +
      a orelse unreachable
      const value: ?u32 = 5678;
      @@ -1278,7 +1278,7 @@ x{} x.* x.?
       == != < > <= >=
       and
       or
      -?? catch
      +orelse catch
       = *= /= %= += -= <<= >>= &= ^= |=
      {#header_close#} {#header_close#} @@ -3062,7 +3062,7 @@ fn createFoo(param: i32) !Foo { // but we want to return it if the function succeeds. errdefer deallocateFoo(foo); - const tmp_buf = allocateTmpBuffer() ?? return error.OutOfMemory; + const tmp_buf = allocateTmpBuffer() orelse return error.OutOfMemory; // tmp_buf is truly a temporary resource, and we for sure want to clean it up // before this block leaves scope defer deallocateTmpBuffer(tmp_buf); @@ -3219,13 +3219,13 @@ struct Foo *do_a_thing(void) { extern fn malloc(size: size_t) ?*u8; fn doAThing() ?*Foo { - const ptr = malloc(1234) ?? return null; + const ptr = malloc(1234) orelse return null; // ... } {#code_end#}

      Here, Zig is at least as convenient, if not more, than C. And, the type of "ptr" - is *u8 not ?*u8. The ?? operator + is *u8 not ?*u8. The orelse keyword unwrapped the optional type and therefore ptr is guaranteed to be non-null everywhere it is used in the function.

      @@ -5941,7 +5941,7 @@ AsmClobbers= ":" list(String, ",") UnwrapExpression = BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression -UnwrapOptional = "??" Expression +UnwrapOptional = "orelse" Expression UnwrapError = "catch" option("|" Symbol "|") Expression diff --git a/src-self-hosted/main.zig b/src-self-hosted/main.zig index 64734f077a..1c91ab9cbe 100644 --- a/src-self-hosted/main.zig +++ b/src-self-hosted/main.zig @@ -212,7 +212,7 @@ fn cmdBuild(allocator: *Allocator, args: []const []const u8) !void { const build_runner_path = try os.path.join(allocator, special_dir, "build_runner.zig"); defer allocator.free(build_runner_path); - const build_file = flags.single("build-file") ?? "build.zig"; + const build_file = flags.single("build-file") orelse "build.zig"; const build_file_abs = try os.path.resolve(allocator, ".", build_file); defer allocator.free(build_file_abs); @@ -516,7 +516,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo const basename = os.path.basename(in_file.?); var it = mem.split(basename, "."); - const root_name = it.next() ?? { + const root_name = it.next() orelse { try stderr.write("file name cannot be empty\n"); os.exit(1); }; @@ -535,7 +535,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo const zig_root_source_file = in_file; - const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") ?? "zig-cache"[0..]) catch { + const full_cache_dir = os.path.resolve(allocator, ".", flags.single("cache-dir") orelse "zig-cache"[0..]) catch { os.exit(1); }; defer allocator.free(full_cache_dir); @@ -555,9 +555,9 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo ); defer module.destroy(); - module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") ?? "0", 10); - module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") ?? "0", 10); - module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") ?? "0", 10); + module.version_major = try std.fmt.parseUnsigned(u32, flags.single("ver-major") orelse "0", 10); + module.version_minor = try std.fmt.parseUnsigned(u32, flags.single("ver-minor") orelse "0", 10); + module.version_patch = try std.fmt.parseUnsigned(u32, flags.single("ver-patch") orelse "0", 10); module.is_test = false; @@ -652,7 +652,7 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Mo } try module.build(); - try module.link(flags.single("out-file") ?? null); + try module.link(flags.single("out-file") orelse null); if (flags.present("print-timing-info")) { // codegen_print_timing_info(g, stderr); diff --git a/src-self-hosted/module.zig b/src-self-hosted/module.zig index a7ddf3f9e9..575105f25f 100644 --- a/src-self-hosted/module.zig +++ b/src-self-hosted/module.zig @@ -130,13 +130,13 @@ pub const Module = struct { var name_buffer = try Buffer.init(allocator, name); errdefer name_buffer.deinit(); - const context = c.LLVMContextCreate() ?? return error.OutOfMemory; + const context = c.LLVMContextCreate() orelse return error.OutOfMemory; errdefer c.LLVMContextDispose(context); - const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) ?? return error.OutOfMemory; + const module = c.LLVMModuleCreateWithNameInContext(name_buffer.ptr(), context) orelse return error.OutOfMemory; errdefer c.LLVMDisposeModule(module); - const builder = c.LLVMCreateBuilderInContext(context) ?? return error.OutOfMemory; + const builder = c.LLVMCreateBuilderInContext(context) orelse return error.OutOfMemory; errdefer c.LLVMDisposeBuilder(builder); const module_ptr = try allocator.create(Module); @@ -223,7 +223,7 @@ pub const Module = struct { c.ZigLLVMParseCommandLineOptions(self.llvm_argv.len + 1, c_compatible_args.ptr); } - const root_src_path = self.root_src_path ?? @panic("TODO handle null root src path"); + const root_src_path = self.root_src_path orelse @panic("TODO handle null root src path"); const root_src_real_path = os.path.real(self.allocator, root_src_path) catch |err| { try printError("unable to get real path '{}': {}", root_src_path, err); return err; diff --git a/src/all_types.hpp b/src/all_types.hpp index 2a5a0ad740..ab219e4e56 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -387,6 +387,7 @@ enum NodeType { NodeTypeSliceExpr, NodeTypeFieldAccessExpr, NodeTypePtrDeref, + NodeTypeUnwrapOptional, NodeTypeUse, NodeTypeBoolLiteral, NodeTypeNullLiteral, @@ -575,6 +576,10 @@ struct AstNodeCatchExpr { AstNode *op2; }; +struct AstNodeUnwrapOptional { + AstNode *expr; +}; + enum CastOp { CastOpNoCast, // signifies the function call expression is not a cast CastOpNoop, // fn call expr is a cast, but does nothing @@ -624,7 +629,6 @@ enum PrefixOp { PrefixOpNegation, PrefixOpNegationWrap, PrefixOpOptional, - PrefixOpUnwrapOptional, PrefixOpAddrOf, }; @@ -909,6 +913,7 @@ struct AstNode { AstNodeTestDecl test_decl; AstNodeBinOpExpr bin_op_expr; AstNodeCatchExpr unwrap_err_expr; + AstNodeUnwrapOptional unwrap_optional; AstNodePrefixOpExpr prefix_op_expr; AstNodePointerType pointer_type; AstNodeFnCallExpr fn_call_expr; diff --git a/src/analyze.cpp b/src/analyze.cpp index ed261148ea..0aa5ea5dcb 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3308,6 +3308,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) { case NodeTypeAsmExpr: case NodeTypeFieldAccessExpr: case NodeTypePtrDeref: + case NodeTypeUnwrapOptional: case NodeTypeStructField: case NodeTypeContainerInitExpr: case NodeTypeStructValueField: diff --git a/src/ast_render.cpp b/src/ast_render.cpp index 2c8c03b226..2ace00885d 100644 --- a/src/ast_render.cpp +++ b/src/ast_render.cpp @@ -50,7 +50,7 @@ static const char *bin_op_str(BinOpType bin_op) { case BinOpTypeAssignBitXor: return "^="; case BinOpTypeAssignBitOr: return "|="; case BinOpTypeAssignMergeErrorSets: return "||="; - case BinOpTypeUnwrapOptional: return "??"; + case BinOpTypeUnwrapOptional: return "orelse"; case BinOpTypeArrayCat: return "++"; case BinOpTypeArrayMult: return "**"; case BinOpTypeErrorUnion: return "!"; @@ -67,7 +67,6 @@ static const char *prefix_op_str(PrefixOp prefix_op) { case PrefixOpBoolNot: return "!"; case PrefixOpBinNot: return "~"; case PrefixOpOptional: return "?"; - case PrefixOpUnwrapOptional: return "??"; case PrefixOpAddrOf: return "&"; } zig_unreachable(); @@ -222,6 +221,8 @@ static const char *node_type_str(NodeType node_type) { return "FieldAccessExpr"; case NodeTypePtrDeref: return "PtrDerefExpr"; + case NodeTypeUnwrapOptional: + return "UnwrapOptional"; case NodeTypeContainerDecl: return "ContainerDecl"; case NodeTypeStructField: @@ -711,6 +712,13 @@ static void render_node_extra(AstRender *ar, AstNode *node, bool grouped) { fprintf(ar->f, ".*"); break; } + case NodeTypeUnwrapOptional: + { + AstNode *lhs = node->data.unwrap_optional.expr; + render_node_ungrouped(ar, lhs); + fprintf(ar->f, ".?"); + break; + } case NodeTypeUndefinedLiteral: fprintf(ar->f, "undefined"); break; diff --git a/src/ir.cpp b/src/ir.cpp index 02606fc4aa..96eb5f7434 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -4661,21 +4661,6 @@ static IrInstruction *ir_gen_err_assert_ok(IrBuilder *irb, Scope *scope, AstNode return ir_build_load_ptr(irb, scope, source_node, payload_ptr); } -static IrInstruction *ir_gen_maybe_assert_ok(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval) { - assert(node->type == NodeTypePrefixOpExpr); - AstNode *expr_node = node->data.prefix_op_expr.primary_expr; - - IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); - if (maybe_ptr == irb->codegen->invalid_instruction) - return irb->codegen->invalid_instruction; - - IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true); - if (lval.is_ptr) - return unwrapped_ptr; - - return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); -} - static IrInstruction *ir_gen_bool_not(IrBuilder *irb, Scope *scope, AstNode *node) { assert(node->type == NodeTypePrefixOpExpr); AstNode *expr_node = node->data.prefix_op_expr.primary_expr; @@ -4705,8 +4690,6 @@ static IrInstruction *ir_gen_prefix_op_expr(IrBuilder *irb, Scope *scope, AstNod return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpNegationWrap), lval); case PrefixOpOptional: return ir_lval_wrap(irb, scope, ir_gen_prefix_op_id(irb, scope, node, IrUnOpOptional), lval); - case PrefixOpUnwrapOptional: - return ir_gen_maybe_assert_ok(irb, scope, node, lval); case PrefixOpAddrOf: { AstNode *expr_node = node->data.prefix_op_expr.primary_expr; return ir_lval_wrap(irb, scope, ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR), lval); @@ -6541,7 +6524,6 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_build_load_ptr(irb, scope, node, ptr_instruction); } case NodeTypePtrDeref: { - assert(node->type == NodeTypePtrDeref); AstNode *expr_node = node->data.ptr_deref_expr.target; IrInstruction *value = ir_gen_node_extra(irb, expr_node, scope, lval); if (value == irb->codegen->invalid_instruction) @@ -6549,6 +6531,19 @@ static IrInstruction *ir_gen_node_raw(IrBuilder *irb, AstNode *node, Scope *scop return ir_build_un_op(irb, scope, node, IrUnOpDereference, value); } + case NodeTypeUnwrapOptional: { + AstNode *expr_node = node->data.unwrap_optional.expr; + + IrInstruction *maybe_ptr = ir_gen_node_extra(irb, expr_node, scope, LVAL_PTR); + if (maybe_ptr == irb->codegen->invalid_instruction) + return irb->codegen->invalid_instruction; + + IrInstruction *unwrapped_ptr = ir_build_unwrap_maybe(irb, scope, node, maybe_ptr, true); + if (lval.is_ptr) + return unwrapped_ptr; + + return ir_build_load_ptr(irb, scope, node, unwrapped_ptr); + } case NodeTypeThisLiteral: return ir_lval_wrap(irb, scope, ir_gen_this_literal(irb, scope, node), lval); case NodeTypeBoolLiteral: diff --git a/src/parser.cpp b/src/parser.cpp index 2ee69f81ab..adb1633f5d 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -1151,9 +1151,8 @@ static AstNode *ast_parse_suffix_op_expr(ParseContext *pc, size_t *token_index, } else if (token->id == TokenIdQuestion) { *token_index += 1; - AstNode *node = ast_create_node(pc, NodeTypePrefixOpExpr, first_token); - node->data.prefix_op_expr.prefix_op = PrefixOpUnwrapOptional; - node->data.prefix_op_expr.primary_expr = primary_expr; + AstNode *node = ast_create_node(pc, NodeTypeUnwrapOptional, first_token); + node->data.unwrap_optional.expr = primary_expr; primary_expr = node; } else { @@ -1173,7 +1172,6 @@ static PrefixOp tok_to_prefix_op(Token *token) { case TokenIdMinusPercent: return PrefixOpNegationWrap; case TokenIdTilde: return PrefixOpBinNot; case TokenIdQuestion: return PrefixOpOptional; - case TokenIdDoubleQuestion: return PrefixOpUnwrapOptional; case TokenIdAmpersand: return PrefixOpAddrOf; default: return PrefixOpInvalid; } @@ -2312,7 +2310,7 @@ static BinOpType ast_parse_ass_op(ParseContext *pc, size_t *token_index, bool ma /* UnwrapExpression : BoolOrExpression (UnwrapOptional | UnwrapError) | BoolOrExpression -UnwrapOptional : "??" BoolOrExpression +UnwrapOptional = "orelse" Expression UnwrapError = "catch" option("|" Symbol "|") Expression */ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, bool mandatory) { @@ -2322,7 +2320,7 @@ static AstNode *ast_parse_unwrap_expr(ParseContext *pc, size_t *token_index, boo Token *token = &pc->tokens->at(*token_index); - if (token->id == TokenIdDoubleQuestion) { + if (token->id == TokenIdKeywordOrElse) { *token_index += 1; AstNode *rhs = ast_parse_expression(pc, token_index, true); @@ -3035,6 +3033,9 @@ void ast_visit_node_children(AstNode *node, void (*visit)(AstNode **, void *cont case NodeTypePtrDeref: visit_field(&node->data.ptr_deref_expr.target, visit, context); break; + case NodeTypeUnwrapOptional: + visit_field(&node->data.unwrap_optional.expr, visit, context); + break; case NodeTypeUse: visit_field(&node->data.use.expr, visit, context); break; diff --git a/src/tokenizer.cpp b/src/tokenizer.cpp index cfabdf11ad..2950b4eb49 100644 --- a/src/tokenizer.cpp +++ b/src/tokenizer.cpp @@ -134,6 +134,7 @@ static const struct ZigKeyword zig_keywords[] = { {"noalias", TokenIdKeywordNoAlias}, {"null", TokenIdKeywordNull}, {"or", TokenIdKeywordOr}, + {"orelse", TokenIdKeywordOrElse}, {"packed", TokenIdKeywordPacked}, {"promise", TokenIdKeywordPromise}, {"pub", TokenIdKeywordPub}, @@ -215,7 +216,6 @@ enum TokenizeState { TokenizeStateSawGreaterThanGreaterThan, TokenizeStateSawDot, TokenizeStateSawDotDot, - TokenizeStateSawQuestionMark, TokenizeStateSawAtSign, TokenizeStateCharCode, TokenizeStateError, @@ -532,6 +532,10 @@ void tokenize(Buf *buf, Tokenization *out) { begin_token(&t, TokenIdComma); end_token(&t); break; + case '?': + begin_token(&t, TokenIdQuestion); + end_token(&t); + break; case '{': begin_token(&t, TokenIdLBrace); end_token(&t); @@ -624,28 +628,10 @@ void tokenize(Buf *buf, Tokenization *out) { begin_token(&t, TokenIdDot); t.state = TokenizeStateSawDot; break; - case '?': - begin_token(&t, TokenIdQuestion); - t.state = TokenizeStateSawQuestionMark; - break; default: invalid_char_error(&t, c); } break; - case TokenizeStateSawQuestionMark: - switch (c) { - case '?': - set_token_id(&t, t.cur_tok, TokenIdDoubleQuestion); - end_token(&t); - t.state = TokenizeStateStart; - break; - default: - t.pos -= 1; - end_token(&t); - t.state = TokenizeStateStart; - continue; - } - break; case TokenizeStateSawDot: switch (c) { case '.': @@ -1480,7 +1466,6 @@ void tokenize(Buf *buf, Tokenization *out) { case TokenizeStateSawGreaterThan: case TokenizeStateSawGreaterThanGreaterThan: case TokenizeStateSawDot: - case TokenizeStateSawQuestionMark: case TokenizeStateSawAtSign: case TokenizeStateSawStarPercent: case TokenizeStateSawPlusPercent: @@ -1545,7 +1530,6 @@ const char * token_name(TokenId id) { case TokenIdDash: return "-"; case TokenIdDivEq: return "/="; case TokenIdDot: return "."; - case TokenIdDoubleQuestion: return "??"; case TokenIdEllipsis2: return ".."; case TokenIdEllipsis3: return "..."; case TokenIdEof: return "EOF"; @@ -1582,6 +1566,7 @@ const char * token_name(TokenId id) { case TokenIdKeywordNoAlias: return "noalias"; case TokenIdKeywordNull: return "null"; case TokenIdKeywordOr: return "or"; + case TokenIdKeywordOrElse: return "orelse"; case TokenIdKeywordPacked: return "packed"; case TokenIdKeywordPromise: return "promise"; case TokenIdKeywordPub: return "pub"; diff --git a/src/tokenizer.hpp b/src/tokenizer.hpp index 7c617f85c6..75c7feb476 100644 --- a/src/tokenizer.hpp +++ b/src/tokenizer.hpp @@ -41,7 +41,6 @@ enum TokenId { TokenIdDash, TokenIdDivEq, TokenIdDot, - TokenIdDoubleQuestion, TokenIdEllipsis2, TokenIdEllipsis3, TokenIdEof, @@ -76,6 +75,7 @@ enum TokenId { TokenIdKeywordNoAlias, TokenIdKeywordNull, TokenIdKeywordOr, + TokenIdKeywordOrElse, TokenIdKeywordPacked, TokenIdKeywordPromise, TokenIdKeywordPub, diff --git a/src/translate_c.cpp b/src/translate_c.cpp index aaaf5a1edb..db46d31c5b 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -260,6 +260,12 @@ static AstNode *trans_create_node_prefix_op(Context *c, PrefixOp op, AstNode *ch return node; } +static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child_node) { + AstNode *node = trans_create_node(c, NodeTypeUnwrapOptional); + node->data.unwrap_optional.expr = child_node; + return node; +} + static AstNode *trans_create_node_bin_op(Context *c, AstNode *lhs_node, BinOpType op, AstNode *rhs_node) { AstNode *node = trans_create_node(c, NodeTypeBinOpExpr); node->data.bin_op_expr.op1 = lhs_node; @@ -382,7 +388,7 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r fn_def->data.fn_def.fn_proto = fn_proto; fn_proto->data.fn_proto.fn_def_node = fn_def; - AstNode *unwrap_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, ref_node); + AstNode *unwrap_node = trans_create_node_unwrap_null(c, ref_node); AstNode *fn_call_node = trans_create_node(c, NodeTypeFnCallExpr); fn_call_node->data.fn_call_expr.fn_ref_expr = unwrap_node; @@ -409,10 +415,6 @@ static AstNode *trans_create_node_inline_fn(Context *c, Buf *fn_name, AstNode *r return fn_def; } -static AstNode *trans_create_node_unwrap_null(Context *c, AstNode *child) { - return trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, child); -} - static AstNode *get_global(Context *c, Buf *name) { { auto entry = c->global_table.maybe_get(name); @@ -1963,7 +1965,7 @@ static AstNode *trans_unary_operator(Context *c, ResultUsed result_used, TransSc bool is_fn_ptr = qual_type_is_fn_ptr(stmt->getSubExpr()->getType()); if (is_fn_ptr) return value_node; - AstNode *unwrapped = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, value_node); + AstNode *unwrapped = trans_create_node_unwrap_null(c, value_node); return trans_create_node_ptr_deref(c, unwrapped); } case UO_Plus: @@ -2587,7 +2589,7 @@ static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope * } } if (callee_node == nullptr) { - callee_node = trans_create_node_prefix_op(c, PrefixOpUnwrapOptional, callee_raw_node); + callee_node = trans_create_node_unwrap_null(c, callee_raw_node); } } else { callee_node = callee_raw_node; diff --git a/std/atomic/queue.zig b/std/atomic/queue.zig index 142c958173..4f856d9e01 100644 --- a/std/atomic/queue.zig +++ b/std/atomic/queue.zig @@ -33,8 +33,8 @@ pub fn Queue(comptime T: type) type { pub fn get(self: *Self) ?*Node { var head = @atomicLoad(*Node, &self.head, AtomicOrder.SeqCst); while (true) { - const node = head.next ?? return null; - head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return node; + const node = head.next orelse return null; + head = @cmpxchgWeak(*Node, &self.head, head, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return node; } } }; diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig index 15611188d2..77fa1a9100 100644 --- a/std/atomic/stack.zig +++ b/std/atomic/stack.zig @@ -28,14 +28,14 @@ pub fn Stack(comptime T: type) type { var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); while (true) { node.next = root; - root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break; + root = @cmpxchgWeak(?*Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse break; } } pub fn pop(self: *Self) ?*Node { var root = @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst); while (true) { - root = @cmpxchgWeak(?*Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root; + root = @cmpxchgWeak(?*Node, &self.root, root, (root orelse return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) orelse return root; } } diff --git a/std/buf_map.zig b/std/buf_map.zig index 0d4f3a6d5e..a82d1b731a 100644 --- a/std/buf_map.zig +++ b/std/buf_map.zig @@ -19,7 +19,7 @@ pub const BufMap = struct { pub fn deinit(self: *const BufMap) void { var it = self.hash_map.iterator(); while (true) { - const entry = it.next() ?? break; + const entry = it.next() orelse break; self.free(entry.key); self.free(entry.value); } @@ -37,12 +37,12 @@ pub const BufMap = struct { } pub fn get(self: *const BufMap, key: []const u8) ?[]const u8 { - const entry = self.hash_map.get(key) ?? return null; + const entry = self.hash_map.get(key) orelse return null; return entry.value; } pub fn delete(self: *BufMap, key: []const u8) void { - const entry = self.hash_map.remove(key) ?? return; + const entry = self.hash_map.remove(key) orelse return; self.free(entry.key); self.free(entry.value); } diff --git a/std/buf_set.zig b/std/buf_set.zig index 03a050ed8b..ab2d8e7c34 100644 --- a/std/buf_set.zig +++ b/std/buf_set.zig @@ -17,7 +17,7 @@ pub const BufSet = struct { pub fn deinit(self: *const BufSet) void { var it = self.hash_map.iterator(); while (true) { - const entry = it.next() ?? break; + const entry = it.next() orelse break; self.free(entry.key); } @@ -33,7 +33,7 @@ pub const BufSet = struct { } pub fn delete(self: *BufSet, key: []const u8) void { - const entry = self.hash_map.remove(key) ?? return; + const entry = self.hash_map.remove(key) orelse return; self.free(entry.key); } diff --git a/std/build.zig b/std/build.zig index fed02e0815..5733aec17d 100644 --- a/std/build.zig +++ b/std/build.zig @@ -136,7 +136,7 @@ pub const Builder = struct { } pub fn setInstallPrefix(self: *Builder, maybe_prefix: ?[]const u8) void { - self.prefix = maybe_prefix ?? "/usr/local"; // TODO better default + self.prefix = maybe_prefix orelse "/usr/local"; // TODO better default self.lib_dir = os.path.join(self.allocator, self.prefix, "lib") catch unreachable; self.exe_dir = os.path.join(self.allocator, self.prefix, "bin") catch unreachable; } @@ -312,9 +312,9 @@ pub const Builder = struct { if (os.getEnvVarOwned(self.allocator, "NIX_CFLAGS_COMPILE")) |nix_cflags_compile| { var it = mem.split(nix_cflags_compile, " "); while (true) { - const word = it.next() ?? break; + const word = it.next() orelse break; if (mem.eql(u8, word, "-isystem")) { - const include_path = it.next() ?? { + const include_path = it.next() orelse { warn("Expected argument after -isystem in NIX_CFLAGS_COMPILE\n"); break; }; @@ -330,9 +330,9 @@ pub const Builder = struct { if (os.getEnvVarOwned(self.allocator, "NIX_LDFLAGS")) |nix_ldflags| { var it = mem.split(nix_ldflags, " "); while (true) { - const word = it.next() ?? break; + const word = it.next() orelse break; if (mem.eql(u8, word, "-rpath")) { - const rpath = it.next() ?? { + const rpath = it.next() orelse { warn("Expected argument after -rpath in NIX_LDFLAGS\n"); break; }; @@ -362,7 +362,7 @@ pub const Builder = struct { } self.available_options_list.append(available_option) catch unreachable; - const entry = self.user_input_options.get(name) ?? return null; + const entry = self.user_input_options.get(name) orelse return null; entry.value.used = true; switch (type_id) { TypeId.Bool => switch (entry.value.value) { @@ -416,9 +416,9 @@ pub const Builder = struct { pub fn standardReleaseOptions(self: *Builder) builtin.Mode { if (self.release_mode) |mode| return mode; - const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") ?? false; - const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") ?? false; - const release_small = self.option(bool, "release-small", "size optimizations on and safety off") ?? false; + const release_safe = self.option(bool, "release-safe", "optimizations on and safety on") orelse false; + const release_fast = self.option(bool, "release-fast", "optimizations on and safety off") orelse false; + const release_small = self.option(bool, "release-small", "size optimizations on and safety off") orelse false; const mode = if (release_safe and !release_fast and !release_small) builtin.Mode.ReleaseSafe else if (release_fast and !release_safe and !release_small) builtin.Mode.ReleaseFast else if (release_small and !release_fast and !release_safe) builtin.Mode.ReleaseSmall else if (!release_fast and !release_safe and !release_small) builtin.Mode.Debug else x: { warn("Multiple release modes (of -Drelease-safe, -Drelease-fast and -Drelease-small)"); @@ -518,7 +518,7 @@ pub const Builder = struct { // make sure all args are used var it = self.user_input_options.iterator(); while (true) { - const entry = it.next() ?? break; + const entry = it.next() orelse break; if (!entry.value.used) { warn("Invalid option: -D{}\n\n", entry.key); self.markInvalidUserInput(); @@ -1246,7 +1246,7 @@ pub const LibExeObjStep = struct { { var it = self.link_libs.iterator(); while (true) { - const entry = it.next() ?? break; + const entry = it.next() orelse break; zig_args.append("--library") catch unreachable; zig_args.append(entry.key) catch unreachable; } @@ -1696,7 +1696,7 @@ pub const TestStep = struct { { var it = self.link_libs.iterator(); while (true) { - const entry = it.next() ?? break; + const entry = it.next() orelse break; try zig_args.append("--library"); try zig_args.append(entry.key); } diff --git a/std/debug/index.zig b/std/debug/index.zig index be47ab76bc..25f7a58b25 100644 --- a/std/debug/index.zig +++ b/std/debug/index.zig @@ -208,7 +208,7 @@ fn printSourceAtAddress(debug_info: *ElfStackTrace, out_stream: var, address: us .name = "???", .address = address, }; - const symbol = debug_info.symbol_table.search(address) ?? &unknown; + const symbol = debug_info.symbol_table.search(address) orelse &unknown; try out_stream.print(WHITE ++ "{}" ++ RESET ++ ": " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n", symbol.name, address); }, else => { @@ -268,10 +268,10 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) !*ElfStackTrace { try st.elf.openFile(allocator, &st.self_exe_file); errdefer st.elf.close(); - st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo; - st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo; - st.debug_str = (try st.elf.findSection(".debug_str")) ?? return error.MissingDebugInfo; - st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo; + st.debug_info = (try st.elf.findSection(".debug_info")) orelse return error.MissingDebugInfo; + st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) orelse return error.MissingDebugInfo; + st.debug_str = (try st.elf.findSection(".debug_str")) orelse return error.MissingDebugInfo; + st.debug_line = (try st.elf.findSection(".debug_line")) orelse return error.MissingDebugInfo; st.debug_ranges = (try st.elf.findSection(".debug_ranges")); try scanAllCompileUnits(st); return st; @@ -443,7 +443,7 @@ const Die = struct { } fn getAttrAddr(self: *const Die, id: u64) !u64 { - const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; + const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Address => |value| value, else => error.InvalidDebugInfo, @@ -451,7 +451,7 @@ const Die = struct { } fn getAttrSecOffset(self: *const Die, id: u64) !u64 { - const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; + const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), FormValue.SecOffset => |value| value, @@ -460,7 +460,7 @@ const Die = struct { } fn getAttrUnsignedLe(self: *const Die, id: u64) !u64 { - const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; + const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.Const => |value| value.asUnsignedLe(), else => error.InvalidDebugInfo, @@ -468,7 +468,7 @@ const Die = struct { } fn getAttrString(self: *const Die, st: *ElfStackTrace, id: u64) ![]u8 { - const form_value = self.getAttr(id) ?? return error.MissingDebugInfo; + const form_value = self.getAttr(id) orelse return error.MissingDebugInfo; return switch (form_value.*) { FormValue.String => |value| value, FormValue.StrPtr => |offset| getString(st, offset), @@ -748,7 +748,7 @@ fn parseDie(st: *ElfStackTrace, abbrev_table: *const AbbrevTable, is_64: bool) ! var in_file_stream = io.FileInStream.init(in_file); const in_stream = &in_file_stream.stream; const abbrev_code = try readULeb128(in_stream); - const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) ?? return error.InvalidDebugInfo; + const table_entry = getAbbrevTableEntry(abbrev_table, abbrev_code) orelse return error.InvalidDebugInfo; var result = Die{ .tag_id = table_entry.tag_id, diff --git a/std/heap.zig b/std/heap.zig index d1fbf9ca0a..172bc24118 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -97,12 +97,12 @@ pub const DirectAllocator = struct { }, Os.windows => { const amt = n + alignment + @sizeOf(usize); - const heap_handle = self.heap_handle ?? blk: { - const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory; + const heap_handle = self.heap_handle orelse blk: { + const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) orelse return error.OutOfMemory; self.heap_handle = hh; break :blk hh; }; - const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory; + const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory; const root_addr = @ptrToInt(ptr); const rem = @rem(root_addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); @@ -142,7 +142,7 @@ pub const DirectAllocator = struct { const root_addr = @intToPtr(*align(1) usize, old_record_addr).*; const old_ptr = @intToPtr(*c_void, root_addr); const amt = new_size + alignment + @sizeOf(usize); - const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) ?? blk: { + const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: { if (new_size > old_mem.len) return error.OutOfMemory; const new_record_addr = old_record_addr - new_size + old_mem.len; @intToPtr(*align(1) usize, new_record_addr).* = root_addr; @@ -343,7 +343,7 @@ pub const ThreadSafeFixedBufferAllocator = struct { if (new_end_index > self.buffer.len) { return error.OutOfMemory; } - end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index..new_end_index]; + end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) orelse return self.buffer[adjusted_index..new_end_index]; } } diff --git a/std/linked_list.zig b/std/linked_list.zig index 536c6d24d0..9e32b7d9da 100644 --- a/std/linked_list.zig +++ b/std/linked_list.zig @@ -169,7 +169,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Returns: /// A pointer to the last node in the list. pub fn pop(list: *Self) ?*Node { - const last = list.last ?? return null; + const last = list.last orelse return null; list.remove(last); return last; } @@ -179,7 +179,7 @@ fn BaseLinkedList(comptime T: type, comptime ParentType: type, comptime field_na /// Returns: /// A pointer to the first node in the list. pub fn popFirst(list: *Self) ?*Node { - const first = list.first ?? return null; + const first = list.first orelse return null; list.remove(first); return first; } diff --git a/std/os/index.zig b/std/os/index.zig index 807b2c398b..6a13ff94d4 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -425,7 +425,7 @@ pub fn posixExecve(argv: []const []const u8, env_map: *const BufMap, allocator: return posixExecveErrnoToErr(posix.getErrno(posix.execve(argv_buf[0].?, argv_buf.ptr, envp_buf.ptr))); } - const PATH = getEnvPosix("PATH") ?? "/usr/local/bin:/bin/:/usr/bin"; + const PATH = getEnvPosix("PATH") orelse "/usr/local/bin:/bin/:/usr/bin"; // PATH.len because it is >= the largest search_path // +1 for the / to join the search path and exe_path // +1 for the null terminating byte @@ -490,7 +490,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap { errdefer result.deinit(); if (is_windows) { - const ptr = windows.GetEnvironmentStringsA() ?? return error.OutOfMemory; + const ptr = windows.GetEnvironmentStringsA() orelse return error.OutOfMemory; defer assert(windows.FreeEnvironmentStringsA(ptr) != 0); var i: usize = 0; @@ -573,7 +573,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) ![]u8 { return allocator.shrink(u8, buf, result); } } else { - const result = getEnvPosix(key) ?? return error.EnvironmentVariableNotFound; + const result = getEnvPosix(key) orelse return error.EnvironmentVariableNotFound; return mem.dupe(allocator, u8, result); } } @@ -1641,7 +1641,7 @@ pub const ArgIterator = struct { if (builtin.os == Os.windows) { return self.inner.next(allocator); } else { - return mem.dupe(allocator, u8, self.inner.next() ?? return null); + return mem.dupe(allocator, u8, self.inner.next() orelse return null); } } @@ -2457,9 +2457,9 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread } }; - const heap_handle = windows.GetProcessHeap() ?? return SpawnThreadError.OutOfMemory; + const heap_handle = windows.GetProcessHeap() orelse return SpawnThreadError.OutOfMemory; const byte_count = @alignOf(WinThread.OuterContext) + @sizeOf(WinThread.OuterContext); - const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) ?? return SpawnThreadError.OutOfMemory; + const bytes_ptr = windows.HeapAlloc(heap_handle, 0, byte_count) orelse return SpawnThreadError.OutOfMemory; errdefer assert(windows.HeapFree(heap_handle, 0, bytes_ptr) != 0); const bytes = @ptrCast([*]u8, bytes_ptr)[0..byte_count]; const outer_context = std.heap.FixedBufferAllocator.init(bytes).allocator.create(WinThread.OuterContext) catch unreachable; @@ -2468,7 +2468,7 @@ pub fn spawnThread(context: var, comptime startFn: var) SpawnThreadError!*Thread outer_context.thread.data.alloc_start = bytes_ptr; const parameter = if (@sizeOf(Context) == 0) null else @ptrCast(*c_void, &outer_context.inner); - outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) ?? { + outer_context.thread.data.handle = windows.CreateThread(null, default_stack_size, WinThread.threadMain, parameter, 0, null) orelse { const err = windows.GetLastError(); return switch (err) { else => os.unexpectedErrorWindows(err), diff --git a/std/os/linux/vdso.zig b/std/os/linux/vdso.zig index 1414b8185b..cbd0cd1df5 100644 --- a/std/os/linux/vdso.zig +++ b/std/os/linux/vdso.zig @@ -28,7 +28,7 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { } } } - const dynv = maybe_dynv ?? return 0; + const dynv = maybe_dynv orelse return 0; if (base == @maxValue(usize)) return 0; var maybe_strings: ?[*]u8 = null; @@ -52,9 +52,9 @@ pub fn lookup(vername: []const u8, name: []const u8) usize { } } - const strings = maybe_strings ?? return 0; - const syms = maybe_syms ?? return 0; - const hashtab = maybe_hashtab ?? return 0; + const strings = maybe_strings orelse return 0; + const syms = maybe_syms orelse return 0; + const hashtab = maybe_hashtab orelse return 0; if (maybe_verdef == null) maybe_versym = null; const OK_TYPES = (1 << elf.STT_NOTYPE | 1 << elf.STT_OBJECT | 1 << elf.STT_FUNC | 1 << elf.STT_COMMON); diff --git a/std/os/path.zig b/std/os/path.zig index 430dda2934..a3ad23b1a9 100644 --- a/std/os/path.zig +++ b/std/os/path.zig @@ -182,8 +182,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath { } var it = mem.split(path, []u8{this_sep}); - _ = (it.next() ?? return relative_path); - _ = (it.next() ?? return relative_path); + _ = (it.next() orelse return relative_path); + _ = (it.next() orelse return relative_path); return WindowsPath{ .is_abs = isAbsoluteWindows(path), .kind = WindowsPath.Kind.NetworkShare, @@ -200,8 +200,8 @@ pub fn windowsParsePath(path: []const u8) WindowsPath { } var it = mem.split(path, []u8{this_sep}); - _ = (it.next() ?? return relative_path); - _ = (it.next() ?? return relative_path); + _ = (it.next() orelse return relative_path); + _ = (it.next() orelse return relative_path); return WindowsPath{ .is_abs = isAbsoluteWindows(path), .kind = WindowsPath.Kind.NetworkShare, @@ -923,7 +923,7 @@ pub fn relativeWindows(allocator: *Allocator, from: []const u8, to: []const u8) var from_it = mem.split(resolved_from, "/\\"); var to_it = mem.split(resolved_to, "/\\"); while (true) { - const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest()); + const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest()); const to_rest = to_it.rest(); if (to_it.next()) |to_component| { // TODO ASCII is wrong, we actually need full unicode support to compare paths. @@ -974,7 +974,7 @@ pub fn relativePosix(allocator: *Allocator, from: []const u8, to: []const u8) ![ var from_it = mem.split(resolved_from, "/"); var to_it = mem.split(resolved_to, "/"); while (true) { - const from_component = from_it.next() ?? return mem.dupe(allocator, u8, to_it.rest()); + const from_component = from_it.next() orelse return mem.dupe(allocator, u8, to_it.rest()); const to_rest = to_it.rest(); if (to_it.next()) |to_component| { if (mem.eql(u8, from_component, to_component)) diff --git a/std/os/windows/util.zig b/std/os/windows/util.zig index 7170346108..f93a673be0 100644 --- a/std/os/windows/util.zig +++ b/std/os/windows/util.zig @@ -153,7 +153,7 @@ pub fn createWindowsEnvBlock(allocator: *mem.Allocator, env_map: *const BufMap) pub fn windowsLoadDll(allocator: *mem.Allocator, dll_path: []const u8) !windows.HMODULE { const padded_buff = try cstr.addNullByte(allocator, dll_path); defer allocator.free(padded_buff); - return windows.LoadLibraryA(padded_buff.ptr) ?? error.DllNotFound; + return windows.LoadLibraryA(padded_buff.ptr) orelse error.DllNotFound; } pub fn windowsUnloadDll(hModule: windows.HMODULE) void { diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig index 3471d6ed21..e4f04df6d0 100644 --- a/std/special/build_runner.zig +++ b/std/special/build_runner.zig @@ -27,15 +27,15 @@ pub fn main() !void { // skip my own exe name _ = arg_it.skip(); - const zig_exe = try unwrapArg(arg_it.next(allocator) ?? { + const zig_exe = try unwrapArg(arg_it.next(allocator) orelse { warn("Expected first argument to be path to zig compiler\n"); return error.InvalidArgs; }); - const build_root = try unwrapArg(arg_it.next(allocator) ?? { + const build_root = try unwrapArg(arg_it.next(allocator) orelse { warn("Expected second argument to be build root directory path\n"); return error.InvalidArgs; }); - const cache_root = try unwrapArg(arg_it.next(allocator) ?? { + const cache_root = try unwrapArg(arg_it.next(allocator) orelse { warn("Expected third argument to be cache root directory path\n"); return error.InvalidArgs; }); @@ -84,12 +84,12 @@ pub fn main() !void { } else if (mem.eql(u8, arg, "--help")) { return usage(&builder, false, try stdout_stream); } else if (mem.eql(u8, arg, "--prefix")) { - prefix = try unwrapArg(arg_it.next(allocator) ?? { + prefix = try unwrapArg(arg_it.next(allocator) orelse { warn("Expected argument after --prefix\n\n"); return usageAndErr(&builder, false, try stderr_stream); }); } else if (mem.eql(u8, arg, "--search-prefix")) { - const search_prefix = try unwrapArg(arg_it.next(allocator) ?? { + const search_prefix = try unwrapArg(arg_it.next(allocator) orelse { warn("Expected argument after --search-prefix\n\n"); return usageAndErr(&builder, false, try stderr_stream); }); diff --git a/std/unicode.zig b/std/unicode.zig index 21ae12f59c..ec808ca4fe 100644 --- a/std/unicode.zig +++ b/std/unicode.zig @@ -220,7 +220,7 @@ const Utf8Iterator = struct { } pub fn nextCodepoint(it: *Utf8Iterator) ?u32 { - const slice = it.nextCodepointSlice() ?? return null; + const slice = it.nextCodepointSlice() orelse return null; switch (slice.len) { 1 => return u32(slice[0]), diff --git a/std/zig/parse.zig b/std/zig/parse.zig index 9f8ef3c3d6..5752f69409 100644 --- a/std/zig/parse.zig +++ b/std/zig/parse.zig @@ -43,7 +43,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { // skip over line comments at the top of the file while (true) { - const next_tok = tok_it.peek() ?? break; + const next_tok = tok_it.peek() orelse break; if (next_tok.id != Token.Id.LineComment) break; _ = tok_it.next(); } @@ -197,7 +197,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { const lib_name_token = nextToken(&tok_it, &tree); const lib_name_token_index = lib_name_token.index; const lib_name_token_ptr = lib_name_token.ptr; - break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) ?? { + break :blk (try parseStringLiteral(arena, &tok_it, lib_name_token_ptr, lib_name_token_index, &tree)) orelse { prevToken(&tok_it, &tree); break :blk null; }; @@ -1434,13 +1434,14 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { try stack.append(State{ .ExpectTokenSave = ExpectTokenSave{ .id = Token.Id.AngleBracketRight, - .ptr = &async_node.rangle_bracket.? }, + .ptr = &async_node.rangle_bracket.?, + }, }); try stack.append(State{ .TypeExprBegin = OptionalCtx{ .RequiredNull = &async_node.allocator_type } }); continue; }, State.AsyncEnd => |ctx| { - const node = ctx.ctx.get() ?? continue; + const node = ctx.ctx.get() orelse continue; switch (node.id) { ast.Node.Id.FnProto => { @@ -1813,7 +1814,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { continue; }, State.RangeExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Ellipsis3)) |ellipsis3| { const node = try arena.construct(ast.Node.InfixOp{ @@ -1835,7 +1836,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.AssignmentExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -1865,7 +1866,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.UnwrapExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -1900,7 +1901,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BoolOrExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Keyword_or)) |or_token| { const node = try arena.construct(ast.Node.InfixOp{ @@ -1924,7 +1925,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BoolAndExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Keyword_and)) |and_token| { const node = try arena.construct(ast.Node.InfixOp{ @@ -1948,7 +1949,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.ComparisonExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -1978,7 +1979,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BinaryOrExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Pipe)) |pipe| { const node = try arena.construct(ast.Node.InfixOp{ @@ -2002,7 +2003,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BinaryXorExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Caret)) |caret| { const node = try arena.construct(ast.Node.InfixOp{ @@ -2026,7 +2027,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BinaryAndExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Ampersand)) |ampersand| { const node = try arena.construct(ast.Node.InfixOp{ @@ -2050,7 +2051,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.BitShiftExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -2080,7 +2081,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.AdditionExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -2110,7 +2111,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.MultiplyExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -2141,7 +2142,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.CurlySuffixExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (tok_it.peek().?.id == Token.Id.Period) { const node = try arena.construct(ast.Node.SuffixOp{ @@ -2189,7 +2190,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.TypeExprEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; if (eatToken(&tok_it, &tree, Token.Id.Bang)) |bang| { const node = try arena.construct(ast.Node.InfixOp{ @@ -2269,7 +2270,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { }, State.SuffixOpExpressionEnd => |opt_ctx| { - const lhs = opt_ctx.get() ?? continue; + const lhs = opt_ctx.get() orelse continue; const token = nextToken(&tok_it, &tree); const token_index = token.index; @@ -2418,7 +2419,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { continue; }, Token.Id.StringLiteral, Token.Id.MultilineStringLiteralLine => { - opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) ?? unreachable); + opt_ctx.store((try parseStringLiteral(arena, &tok_it, token.ptr, token.index, &tree)) orelse unreachable); continue; }, Token.Id.LParen => { @@ -2648,7 +2649,7 @@ pub fn parse(allocator: *mem.Allocator, source: []const u8) !ast.Tree { const token = nextToken(&tok_it, &tree); const token_index = token.index; const token_ptr = token.ptr; - opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) ?? { + opt_ctx.store((try parseStringLiteral(arena, &tok_it, token_ptr, token_index, &tree)) orelse { prevToken(&tok_it, &tree); if (opt_ctx != OptionalCtx.Optional) { ((try tree.errors.addOne())).* = Error{ .ExpectedPrimaryExpr = Error.ExpectedPrimaryExpr{ .token = token_index } }; @@ -3348,7 +3349,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok assert(result.ptr.id != Token.Id.LineComment); while (true) { - const next_tok = tok_it.peek() ?? return result; + const next_tok = tok_it.peek() orelse return result; if (next_tok.id != Token.Id.LineComment) return result; _ = tok_it.next(); } @@ -3356,7 +3357,7 @@ fn nextToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) AnnotatedTok fn prevToken(tok_it: *ast.Tree.TokenList.Iterator, tree: *ast.Tree) void { while (true) { - const prev_tok = tok_it.prev() ?? return; + const prev_tok = tok_it.prev() orelse return; if (prev_tok.id == Token.Id.LineComment) continue; return; } diff --git a/std/zig/render.zig b/std/zig/render.zig index 0b8e4d1453..bc45768fa3 100644 --- a/std/zig/render.zig +++ b/std/zig/render.zig @@ -83,7 +83,7 @@ fn renderRoot( var start_col: usize = 0; var it = tree.root_node.decls.iterator(0); while (true) { - var decl = (it.next() ?? return).*; + var decl = (it.next() orelse return).*; // look for zig fmt: off comment var start_token_index = decl.firstToken(); zig_fmt_loop: while (start_token_index != 0) { @@ -112,7 +112,7 @@ fn renderRoot( const start = tree.tokens.at(start_token_index + 1).start; try stream.print("{}\n", tree.source[start..end_token.end]); while (tree.tokens.at(decl.firstToken()).start < end_token.end) { - decl = (it.next() ?? return).*; + decl = (it.next() orelse return).*; } break :zig_fmt_loop; } @@ -1993,7 +1993,7 @@ fn renderDocComments( indent: usize, start_col: *usize, ) (@typeOf(stream).Child.Error || Error)!void { - const comment = node.doc_comments ?? return; + const comment = node.doc_comments orelse return; var it = comment.lines.iterator(0); const first_token = node.firstToken(); while (it.next()) |line_token_index| { @@ -2021,7 +2021,7 @@ fn nodeIsBlock(base: *const ast.Node) bool { } fn nodeCausesSliceOpSpace(base: *ast.Node) bool { - const infix_op = base.cast(ast.Node.InfixOp) ?? return false; + const infix_op = base.cast(ast.Node.InfixOp) orelse return false; return switch (infix_op.op) { ast.Node.InfixOp.Op.Period => false, else => true, diff --git a/test/cases/cast.zig b/test/cases/cast.zig index a56c470408..ade1cf78aa 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -73,7 +73,7 @@ fn Struct(comptime T: type) type { fn maybePointer(self: ?*const Self) Self { const none = Self{ .x = if (T == void) void{} else 0 }; - return (self ?? &none).*; + return (self orelse &none).*; } }; } @@ -87,7 +87,7 @@ const Union = union { fn maybePointer(self: ?*const Union) Union { const none = Union{ .x = 0 }; - return (self ?? &none).*; + return (self orelse &none).*; } }; @@ -100,7 +100,7 @@ const Enum = enum { } fn maybePointer(self: ?*const Enum) Enum { - return (self ?? &Enum.None).*; + return (self orelse &Enum.None).*; } }; diff --git a/test/cases/null.zig b/test/cases/null.zig index 62565784ac..cdcfd23efb 100644 --- a/test/cases/null.zig +++ b/test/cases/null.zig @@ -15,13 +15,13 @@ test "optional type" { const next_x: ?i32 = null; - const z = next_x ?? 1234; + const z = next_x orelse 1234; assert(z == 1234); const final_x: ?i32 = 13; - const num = final_x ?? unreachable; + const num = final_x orelse unreachable; assert(num == 13); } @@ -38,7 +38,7 @@ test "test maybe object and get a pointer to the inner value" { test "rhs maybe unwrap return" { const x: ?bool = true; - const y = x ?? return; + const y = x orelse return; } test "maybe return" { @@ -53,7 +53,7 @@ fn maybeReturnImpl() void { } fn foo(x: ?i32) ?bool { - const value = x ?? return null; + const value = x orelse return null; return value > 1234; } @@ -140,6 +140,6 @@ test "unwrap optional which is field of global var" { } test "null with default unwrap" { - const x: i32 = null ?? 1; + const x: i32 = null orelse 1; assert(x == 1); } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 1c737a59e7..5ec2759032 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2296,7 +2296,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ \\ defer try canFail(); \\ - \\ const a = maybeInt() ?? return; + \\ const a = maybeInt() orelse return; \\} \\ \\fn canFail() error!void { } diff --git a/test/translate_c.zig b/test/translate_c.zig index 3489f9da21..417171d2c2 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -246,13 +246,13 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub extern var fn_ptr: ?extern fn() void; , \\pub inline fn foo() void { - \\ return (??fn_ptr)(); + \\ return fn_ptr.?(); \\} , \\pub extern var fn_ptr2: ?extern fn(c_int, f32) u8; , \\pub inline fn bar(arg0: c_int, arg1: f32) u8 { - \\ return (??fn_ptr2)(arg0, arg1); + \\ return fn_ptr2.?(arg0, arg1); \\} ); @@ -608,7 +608,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\ field: c_int, \\}; \\pub export fn read_field(foo: ?[*]struct_Foo) c_int { - \\ return (??foo).field; + \\ return foo.?.field; \\} ); @@ -969,11 +969,11 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub export fn bar() void { \\ var f: ?extern fn() void = foo; \\ var b: ?extern fn() c_int = baz; - \\ (??f)(); - \\ (??f)(); + \\ f.?(); + \\ f.?(); \\ foo(); - \\ _ = (??b)(); - \\ _ = (??b)(); + \\ _ = b.?(); + \\ _ = b.?(); \\ _ = baz(); \\} ); @@ -984,7 +984,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\} , \\pub export fn foo(x: ?[*]c_int) void { - \\ (??x).* = 1; + \\ x.?.* = 1; \\} ); @@ -1012,7 +1012,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub fn foo() c_int { \\ var x: c_int = 1234; \\ var ptr: ?[*]c_int = &x; - \\ return (??ptr).*; + \\ return ptr.?.*; \\} ); @@ -1119,7 +1119,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void { \\pub const glClearPFN = PFNGLCLEARPROC; , \\pub inline fn glClearUnion(arg0: GLbitfield) void { - \\ return (??glProcs.gl.Clear)(arg0); + \\ return glProcs.gl.Clear.?(arg0); \\} , \\pub const OpenGLProcs = union_OpenGLProcs; -- cgit v1.2.3 From 4ec09ac243afa0b784669e618ec09e9e444a0275 Mon Sep 17 00:00:00 2001 From: Alexandros Naskos Date: Thu, 14 Jun 2018 17:57:28 +0300 Subject: Enabled optional types of zero bit types with no LLVM DI type. (#1110) * Zero bit optional types do not need a LLVM DI type --- src/analyze.cpp | 3 ++- test/cases/null.zig | 11 +++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 0aa5ea5dcb..cbeac7bc21 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -522,7 +522,6 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { TypeTableEntry *entry = new_type_table_entry(TypeTableEntryIdOptional); assert(child_type->type_ref || child_type->zero_bits); - assert(child_type->di_type); entry->is_copyable = type_is_copyable(g, child_type); buf_resize(&entry->name, 0); @@ -532,12 +531,14 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { entry->type_ref = LLVMInt1Type(); entry->di_type = g->builtin_types.entry_bool->di_type; } else if (type_is_codegen_pointer(child_type)) { + assert(child_type->di_type); // this is an optimization but also is necessary for calling C // functions where all pointers are maybe pointers // function types are technically pointers entry->type_ref = child_type->type_ref; entry->di_type = child_type->di_type; } else { + assert(child_type->di_type); // create a struct with a boolean whether this is the null value LLVMTypeRef elem_types[] = { child_type->type_ref, diff --git a/test/cases/null.zig b/test/cases/null.zig index cdcfd23efb..d2a9aaed55 100644 --- a/test/cases/null.zig +++ b/test/cases/null.zig @@ -143,3 +143,14 @@ test "null with default unwrap" { const x: i32 = null orelse 1; assert(x == 1); } + +test "optional types" { + comptime { + const opt_type_struct = StructWithOptionalType { .t=u8, }; + assert(opt_type_struct.t != null and opt_type_struct.t.? == u8); + } +} + +const StructWithOptionalType = struct { + t: ?type, +}; -- cgit v1.2.3 From 59b3dc8907f76b93caa689732e878a5bfa2f65c2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 13 Jun 2018 22:40:38 -0400 Subject: allow passing by non-copying value closes #733 --- doc/langref.html.in | 37 ++++++++++++++----------------------- src/analyze.cpp | 11 ++++------- test/cases/fn.zig | 13 +++++++++++++ test/compile_errors.zig | 23 ----------------------- 4 files changed, 31 insertions(+), 53 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 814de721a6..b32c8165e2 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2797,39 +2797,30 @@ fn foo() void { } {#code_end#} {#header_open|Pass-by-value Parameters#}

      - In Zig, structs, unions, and enums with payloads cannot be passed by value - to a function. + In Zig, structs, unions, and enums with payloads can be passed directly to a function:

      - {#code_begin|test_err|not copyable; cannot pass by value#} -const Foo = struct { + {#code_begin|test#} +const Point = struct { x: i32, + y: i32, }; -fn bar(foo: Foo) void {} - -test "pass aggregate type by value to function" { - bar(Foo {.x = 12,}); +fn foo(point: Point) i32 { + return point.x + point.y; } - {#code_end#} -

      - Instead, one must use *const. Zig allows implicitly casting something - to a const pointer to it: -

      - {#code_begin|test#} -const Foo = struct { - x: i32, -}; -fn bar(foo: *const Foo) void {} +const assert = @import("std").debug.assert; -test "implicitly cast to const pointer" { - bar(Foo {.x = 12,}); +test "pass aggregate type by non-copy value to function" { + assert(foo(Point{ .x = 1, .y = 2 }) == 3); } {#code_end#}

      - However, - the C ABI does allow passing structs and unions by value. So functions which - use the C calling convention may pass structs and unions by value. + In this case, the value may be passed by reference, or by value, whichever way + Zig decides will be faster. +

      +

      + For extern functions, Zig follows the C ABI for passing structs and unions by value.

      {#header_close#} {#header_open|Function Reflection#} diff --git a/src/analyze.cpp b/src/analyze.cpp index cbeac7bc21..758bc1a045 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1135,7 +1135,10 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { gen_param_info->src_index = i; gen_param_info->gen_index = SIZE_MAX; - type_ensure_zero_bits_known(g, type_entry); + ensure_complete_type(g, type_entry); + if (type_is_invalid(type_entry)) + return g->builtin_types.entry_invalid; + if (type_has_bits(type_entry)) { TypeTableEntry *gen_type; if (handle_is_ptr(type_entry)) { @@ -1546,12 +1549,6 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdUnion: case TypeTableEntryIdFn: case TypeTableEntryIdPromise: - ensure_complete_type(g, type_entry); - if (calling_convention_allows_zig_types(fn_type_id.cc) && !type_is_copyable(g, type_entry)) { - add_node_error(g, param_node->data.param_decl.type, - buf_sprintf("type '%s' is not copyable; cannot pass by value", buf_ptr(&type_entry->name))); - return g->builtin_types.entry_invalid; - } break; } FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index]; diff --git a/test/cases/fn.zig b/test/cases/fn.zig index dfb254c6aa..2426a411df 100644 --- a/test/cases/fn.zig +++ b/test/cases/fn.zig @@ -119,3 +119,16 @@ test "assign inline fn to const variable" { } inline fn inlineFn() void {} + +test "pass by non-copying value" { + assert(bar(Point{ .x = 1, .y = 2 }) == 3); +} + +const Point = struct { + x: i32, + y: i32, +}; + +fn bar(pt: Point) i32 { + return pt.x + pt.y; +} diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 06f17a37ee..60ba255172 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -2573,15 +2573,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { break :x tc; }); - cases.add( - "pass non-copyable type by value to function", - \\const Point = struct { x: i32, y: i32, }; - \\fn foo(p: Point) void { } - \\export fn entry() usize { return @sizeOf(@typeOf(foo)); } - , - ".tmp_source.zig:2:11: error: type 'Point' is not copyable; cannot pass by value", - ); - cases.add( "implicit cast from array to mutable slice", \\var global_array: [10]i32 = undefined; @@ -4066,20 +4057,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ".tmp_source.zig:3:5: note: field 'A' has type 'i32'", ); - cases.add( - "self-referencing function pointer field", - \\const S = struct { - \\ f: fn(_: S) void, - \\}; - \\fn f(_: S) void { - \\} - \\export fn entry() void { - \\ var _ = S { .f = f }; - \\} - , - ".tmp_source.zig:4:9: error: type 'S' is not copyable; cannot pass by value", - ); - cases.add( "taking offset of void field in struct", \\const Empty = struct { -- cgit v1.2.3 From 8fd7cc11e167c0b23892d6f22841bb6856d0f499 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 18 Jun 2018 11:12:15 -0400 Subject: disallow opaque as a return type of fn type syntax closes #1115 --- src/analyze.cpp | 1 + src/ir.cpp | 5 +++++ test/compile_errors.zig | 10 ++++++++++ 3 files changed, 16 insertions(+) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 758bc1a045..10cdb0af6f 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1022,6 +1022,7 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { ensure_complete_type(g, fn_type_id->return_type); if (type_is_invalid(fn_type_id->return_type)) return g->builtin_types.entry_invalid; + assert(fn_type_id->return_type->id != TypeTableEntryIdOpaque); } else { zig_panic("TODO implement inferred return types https://github.com/ziglang/zig/issues/447"); } diff --git a/src/ir.cpp b/src/ir.cpp index a312b501ab..c75a3ae7c1 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -18676,6 +18676,11 @@ static TypeTableEntry *ir_analyze_instruction_fn_proto(IrAnalyze *ira, IrInstruc fn_type_id.return_type = ir_resolve_type(ira, return_type_value); if (type_is_invalid(fn_type_id.return_type)) return ira->codegen->builtin_types.entry_invalid; + if (fn_type_id.return_type->id == TypeTableEntryIdOpaque) { + ir_add_error(ira, instruction->return_type, + buf_sprintf("return type cannot be opaque")); + return ira->codegen->builtin_types.entry_invalid; + } if (fn_type_id.cc == CallingConventionAsync) { if (instruction->async_allocator_type_value == nullptr) { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 23337ca479..8c5abaaccc 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,15 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "use c_void as return type of fn ptr", + \\export fn entry() void { + \\ const a: fn () c_void = undefined; + \\} + , + ".tmp_source.zig:2:20: error: return type cannot be opaque", + ); + cases.add( "non int passed to @intToFloat", \\export fn entry() void { @@ -9,6 +18,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { , ".tmp_source.zig:2:32: error: expected int type, found 'comptime_float'", ); + cases.add( "use implicit casts to assign null to non-nullable pointer", \\export fn entry() void { -- cgit v1.2.3 From 7c99c30bf406342a45833963ce630bb104aef00e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 19 Jun 2018 19:35:35 -0400 Subject: fix calling method with comptime pass-by-non-copyign-value self arg closes #1124 --- src/analyze.cpp | 11 +++++++++++ test/cases/eval.zig | 14 ++++++++++++++ 2 files changed, 25 insertions(+) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 10cdb0af6f..479abef16a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1470,6 +1470,17 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c calling_convention_name(fn_type_id.cc))); return g->builtin_types.entry_invalid; } + if (param_node->data.param_decl.type != nullptr) { + TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type); + if (type_is_invalid(type_entry)) { + return g->builtin_types.entry_invalid; + } + FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index]; + param_info->type = type_entry; + param_info->is_noalias = param_node->data.param_decl.is_noalias; + fn_type_id.next_param_index += 1; + } + return get_generic_fn_type(g, &fn_type_id); } else if (param_is_var_args) { if (fn_type_id.cc == CallingConventionC) { diff --git a/test/cases/eval.zig b/test/cases/eval.zig index 6c919e17a6..756ffe339a 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -623,3 +623,17 @@ test "function which returns struct with type field causes implicit comptime" { const ty = wrap(i32).T; assert(ty == i32); } + +test "call method with comptime pass-by-non-copying-value self parameter" { + const S = struct { + a: u8, + + fn b(comptime s: this) u8 { + return s.a; + } + }; + + const s = S{ .a = 2 }; + var b = s.b(); + assert(b == 2); +} -- cgit v1.2.3 From 459d72f8736ebd8372b9050c17e5f3bc00092573 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 21 Jun 2018 17:41:49 -0400 Subject: fix compiler crash for invalid enum closes #1079 closes #1147 --- src/analyze.cpp | 5 +++-- test/compile_errors.zig | 13 +++++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 479abef16a..5160a19e81 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -2318,8 +2318,9 @@ static void resolve_enum_zero_bits(CodeGen *g, TypeTableEntry *enum_type) { return; if (enum_type->data.enumeration.zero_bits_loop_flag) { - enum_type->data.enumeration.zero_bits_known = true; - enum_type->data.enumeration.zero_bits_loop_flag = false; + add_node_error(g, enum_type->data.enumeration.decl_node, + buf_sprintf("'%s' depends on itself", buf_ptr(&enum_type->name))); + enum_type->data.enumeration.is_invalid = true; return; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 17896f9ab9..2247f0af96 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,19 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "enum field value references enum", + \\pub const Foo = extern enum { + \\ A = Foo.B, + \\ C = D, + \\}; + \\export fn entry() void { + \\ var s: Foo = Foo.E; + \\} + , + ".tmp_source.zig:1:17: error: 'Foo' depends on itself", + ); + cases.add( "@floatToInt comptime safety", \\comptime { -- cgit v1.2.3 From af95e1557214df4a1a34a712efc2f8dafb502c82 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 26 Jun 2018 15:10:11 -0400 Subject: rename get_maybe_type to get_optional_type --- src/all_types.hpp | 2 +- src/analyze.cpp | 12 ++++++------ src/analyze.hpp | 2 +- src/ir.cpp | 38 +++++++++++++++++++------------------- 4 files changed, 27 insertions(+), 27 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/all_types.hpp b/src/all_types.hpp index 12e054cbeb..019dcb182e 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1233,7 +1233,7 @@ struct TypeTableEntry { // use these fields to make sure we don't duplicate type table entries for the same type TypeTableEntry *pointer_parent[2]; // [0 - mut, 1 - const] - TypeTableEntry *maybe_parent; + TypeTableEntry *optional_parent; TypeTableEntry *promise_parent; TypeTableEntry *promise_frame_parent; // If we generate a constant name value for this type, we memoize it here. diff --git a/src/analyze.cpp b/src/analyze.cpp index 5160a19e81..c018ee4e92 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -482,7 +482,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) return return_type->promise_frame_parent; } - TypeTableEntry *awaiter_handle_type = get_maybe_type(g, g->builtin_types.entry_promise); + TypeTableEntry *awaiter_handle_type = get_optional_type(g, g->builtin_types.entry_promise); TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false); ZigList field_names = {}; @@ -513,9 +513,9 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) return entry; } -TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { - if (child_type->maybe_parent) { - TypeTableEntry *entry = child_type->maybe_parent; +TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type) { + if (child_type->optional_parent) { + TypeTableEntry *entry = child_type->optional_parent; return entry; } else { ensure_complete_type(g, child_type); @@ -592,7 +592,7 @@ TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type) { entry->data.maybe.child_type = child_type; - child_type->maybe_parent = entry; + child_type->optional_parent = entry; return entry; } } @@ -2996,7 +2996,7 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { return wrong_panic_prototype(g, proto_node, fn_type); } - TypeTableEntry *optional_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g)); + TypeTableEntry *optional_ptr_to_stack_trace_type = get_optional_type(g, get_ptr_to_stack_trace_type(g)); if (fn_type_id->param_info[1].type != optional_ptr_to_stack_trace_type) { return wrong_panic_prototype(g, proto_node, fn_type); } diff --git a/src/analyze.hpp b/src/analyze.hpp index 88e06b2390..c2730197e2 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -24,7 +24,7 @@ TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits); TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type); TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type); TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id); -TypeTableEntry *get_maybe_type(CodeGen *g, TypeTableEntry *child_type); +TypeTableEntry *get_optional_type(CodeGen *g, TypeTableEntry *child_type); TypeTableEntry *get_array_type(CodeGen *g, TypeTableEntry *child_type, uint64_t array_size); TypeTableEntry *get_slice_type(CodeGen *g, TypeTableEntry *ptr_type); TypeTableEntry *get_partial_container_type(CodeGen *g, Scope *scope, ContainerKind kind, diff --git a/src/ir.cpp b/src/ir.cpp index c6078e755d..1930bbb248 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3044,7 +3044,7 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig IrInstruction *replacement_value = irb->exec->coro_handle; IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, @@ -6654,7 +6654,7 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr); IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle); IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node, promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr, AtomicRmwOp_xchg, AtomicOrderSeqCst); @@ -6988,7 +6988,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec VariableTableEntry *await_handle_var = ir_create_var(irb, node, coro_scope, nullptr, false, false, true, const_bool_false); IrInstruction *null_value = ir_build_const_null(irb, coro_scope, node); IrInstruction *await_handle_type_val = ir_build_const_type(irb, coro_scope, node, - get_maybe_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); + get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); ir_build_var_decl(irb, coro_scope, node, await_handle_var, await_handle_type_val, nullptr, null_value); irb->exec->await_handle_var_ptr = ir_build_var_ptr(irb, coro_scope, node, await_handle_var); @@ -8762,7 +8762,7 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } else if (prev_inst->value.type->id == TypeTableEntryIdOptional) { return prev_inst->value.type; } else { - return get_maybe_type(ira->codegen, prev_inst->value.type); + return get_optional_type(ira->codegen, prev_inst->value.type); } } else { return prev_inst->value.type; @@ -12127,7 +12127,7 @@ static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, { if (instruction->optional == IrInstructionErrorReturnTrace::Null) { TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); - TypeTableEntry *optional_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); + TypeTableEntry *optional_type = get_optional_type(ira->codegen, ptr_to_stack_trace_type); if (!exec_has_err_ret_trace(ira->codegen, ira->new_irb.exec)) { ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); assert(get_codegen_ptr_type(optional_type) != nullptr); @@ -13105,7 +13105,7 @@ static TypeTableEntry *ir_analyze_maybe(IrAnalyze *ira, IrInstructionUnOp *un_op case TypeTableEntryIdPromise: { ConstExprValue *out_val = ir_build_const_from(ira, &un_op_instruction->base); - out_val->data.x_type = get_maybe_type(ira->codegen, type_entry); + out_val->data.x_type = get_optional_type(ira->codegen, type_entry); return ira->codegen->builtin_types.entry_type; } case TypeTableEntryIdUnreachable: @@ -16326,7 +16326,7 @@ static bool ir_make_type_info_defs(IrAnalyze *ira, ConstExprValue *out_val, Scop true, false, PtrLenUnknown, get_abi_alignment(ira->codegen, ira->codegen->builtin_types.entry_u8), 0, 0); - fn_def_fields[6].type = get_maybe_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); + fn_def_fields[6].type = get_optional_type(ira->codegen, get_slice_type(ira->codegen, u8_ptr)); if (fn_node->is_extern && buf_len(fn_node->lib_name) > 0) { fn_def_fields[6].data.x_optional = create_const_vals(1); ConstExprValue *lib_name = create_const_str_lit(ira->codegen, fn_node->lib_name); @@ -16609,7 +16609,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t // child: ?type ensure_field_index(result->type, "child", 0); fields[0].special = ConstValSpecialStatic; - fields[0].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_type); + fields[0].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); if (type_entry->data.promise.result_type == nullptr) fields[0].data.x_optional = nullptr; @@ -16763,7 +16763,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t // tag_type: ?type ensure_field_index(result->type, "tag_type", 1); fields[1].special = ConstValSpecialStatic; - fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_type); + fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); AstNode *union_decl_node = type_entry->data.unionation.decl_node; if (union_decl_node->data.container_decl.auto_enum || @@ -16803,7 +16803,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t ConstExprValue *inner_fields = create_const_vals(3); inner_fields[1].special = ConstValSpecialStatic; - inner_fields[1].type = get_maybe_type(ira->codegen, type_info_enum_field_type); + inner_fields[1].type = get_optional_type(ira->codegen, type_info_enum_field_type); if (fields[1].data.x_optional == nullptr) { inner_fields[1].data.x_optional = nullptr; @@ -16874,7 +16874,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t ConstExprValue *inner_fields = create_const_vals(3); inner_fields[1].special = ConstValSpecialStatic; - inner_fields[1].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_usize); + inner_fields[1].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_usize); if (!type_has_bits(struct_field->type_entry)) { inner_fields[1].data.x_optional = nullptr; @@ -16934,7 +16934,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t // return_type: ?type ensure_field_index(result->type, "return_type", 3); fields[3].special = ConstValSpecialStatic; - fields[3].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_type); + fields[3].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); if (type_entry->data.fn.fn_type_id.return_type == nullptr) fields[3].data.x_optional = nullptr; else { @@ -16947,7 +16947,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t // async_allocator_type: type ensure_field_index(result->type, "async_allocator_type", 4); fields[4].special = ConstValSpecialStatic; - fields[4].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_type); + fields[4].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); if (type_entry->data.fn.fn_type_id.async_allocator_type == nullptr) fields[4].data.x_optional = nullptr; else { @@ -16990,7 +16990,7 @@ static ConstExprValue *ir_make_type_info_value(IrAnalyze *ira, TypeTableEntry *t inner_fields[1].type = ira->codegen->builtin_types.entry_bool; inner_fields[1].data.x_bool = fn_param_info->is_noalias; inner_fields[2].special = ConstValSpecialStatic; - inner_fields[2].type = get_maybe_type(ira->codegen, ira->codegen->builtin_types.entry_type); + inner_fields[2].type = get_optional_type(ira->codegen, ira->codegen->builtin_types.entry_type); if (arg_is_generic) inner_fields[2].data.x_optional = nullptr; @@ -17342,7 +17342,7 @@ static TypeTableEntry *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstruct IrInstruction *result = ir_build_cmpxchg(&ira->new_irb, instruction->base.scope, instruction->base.source_node, nullptr, casted_ptr, casted_cmp_value, casted_new_value, nullptr, nullptr, instruction->is_weak, operand_type, success_order, failure_order); - result->value.type = get_maybe_type(ira->codegen, operand_type); + result->value.type = get_optional_type(ira->codegen, operand_type); ir_link_new_instruction(result, &instruction->base); ir_add_alloca(ira, result, result->value.type); return result->value.type; @@ -19013,7 +19013,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 old_align_bytes = ptr_type->data.pointer.alignment; TypeTableEntry *better_ptr_type = adjust_ptr_align(ira->codegen, ptr_type, align_bytes); - result_type = get_maybe_type(ira->codegen, better_ptr_type); + result_type = get_optional_type(ira->codegen, better_ptr_type); } else if (target_type->id == TypeTableEntryIdOptional && target_type->data.maybe.child_type->id == TypeTableEntryIdFn) { @@ -19021,7 +19021,7 @@ static IrInstruction *ir_align_cast(IrAnalyze *ira, IrInstruction *target, uint3 old_align_bytes = fn_type_id.alignment; fn_type_id.alignment = align_bytes; TypeTableEntry *fn_type = get_fn_type(ira->codegen, &fn_type_id); - result_type = get_maybe_type(ira->codegen, fn_type); + result_type = get_optional_type(ira->codegen, fn_type); } else if (is_slice(target_type)) { TypeTableEntry *slice_ptr_type = target_type->data.structure.fields[slice_ptr_index].type_entry; old_align_bytes = slice_ptr_type->data.pointer.alignment; @@ -19782,7 +19782,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_free(IrAnalyze *ira, IrInstru instruction->base.source_node, coro_id, coro_handle); ir_link_new_instruction(result, &instruction->base); TypeTableEntry *ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_maybe_type(ira->codegen, ptr_type); + result->value.type = get_optional_type(ira->codegen, ptr_type); return result->value.type; } @@ -19850,7 +19850,7 @@ static TypeTableEntry *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, instruction->base.source_node, alloc_fn, coro_size); ir_link_new_instruction(result, &instruction->base); TypeTableEntry *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); - result->value.type = get_maybe_type(ira->codegen, u8_ptr_type); + result->value.type = get_optional_type(ira->codegen, u8_ptr_type); return result->value.type; } -- cgit v1.2.3 From fd75e73ee9818f12fd81d8fdb3cb949c492d664a Mon Sep 17 00:00:00 2001 From: Ben Noordhuis Date: Wed, 27 Jun 2018 16:20:04 +0200 Subject: add f16 type Add support for half-precision floating point operations. Introduce `__extendhfsf2` and `__truncsfhf2` in std/special/compiler_rt. Add `__gnu_h2f_ieee` and `__gnu_f2h_ieee` as aliases that are used in Windows builds. The logic in std/special/compiler_rt/extendXfYf2.zig has been reworked and can now operate on 16 bits floating point types. `extendXfYf2()` and `truncXfYf2()` are marked `inline` to work around a not entirely understood stack alignment issue on Windows when calling the f16 versions of the builtins. closes #1122 --- CMakeLists.txt | 16 +++ src/all_types.hpp | 2 + src/analyze.cpp | 15 +++ src/bigfloat.cpp | 8 ++ src/bigfloat.hpp | 2 + src/codegen.cpp | 4 + src/ir.cpp | 151 ++++++++++++++++++++++++++- src/util.hpp | 19 ++++ std/special/compiler_rt/extendXfYf2.zig | 56 +++++----- std/special/compiler_rt/extendXfYf2_test.zig | 46 ++++++++ std/special/compiler_rt/index.zig | 5 + std/special/compiler_rt/truncXfYf2.zig | 111 ++++++++++++++++++++ std/special/compiler_rt/truncXfYf2_test.zig | 64 ++++++++++++ test/cases/cast.zig | 28 ++++- test/cases/math.zig | 12 ++- test/cases/misc.zig | 1 + 16 files changed, 505 insertions(+), 35 deletions(-) create mode 100644 std/special/compiler_rt/truncXfYf2.zig create mode 100644 std/special/compiler_rt/truncXfYf2_test.zig (limited to 'src/analyze.cpp') diff --git a/CMakeLists.txt b/CMakeLists.txt index 99de2328d2..789da4a8a6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -261,12 +261,15 @@ endif() set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/f128M_isSignalingNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF16UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF32UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_commonNaNToF64UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f128MToCommonNaN.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f16UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f32UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_f64UIToCommonNaN.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/s_propagateNaNF16UI.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/8086/softfloat_raiseFlags.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_add.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_div.c" @@ -293,8 +296,20 @@ set(EMBEDDED_SOFTFLOAT_SOURCES "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui32_r_minMag.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f128M_to_ui64_r_minMag.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_add.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_div.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_eq.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_lt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_mul.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_rem.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_roundToInt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sqrt.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_sub.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f16_to_f64.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f32_to_f128M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f128M.c" + "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/f64_to_f16.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_add256M.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addCarryM.c" "${CMAKE_SOURCE_DIR}/deps/SoftFloat-3e/source/s_addComplCarryM.c" @@ -572,6 +587,7 @@ set(ZIG_STD_FILES "special/compiler_rt/floatuntidf.zig" "special/compiler_rt/muloti4.zig" "special/compiler_rt/index.zig" + "special/compiler_rt/truncXfYf2.zig" "special/compiler_rt/udivmod.zig" "special/compiler_rt/udivmoddi4.zig" "special/compiler_rt/udivmodti4.zig" diff --git a/src/all_types.hpp b/src/all_types.hpp index 019dcb182e..5d449491c8 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -258,6 +258,7 @@ struct ConstExprValue { // populated if special == ConstValSpecialStatic BigInt x_bigint; BigFloat x_bigfloat; + float16_t x_f16; float x_f32; double x_f64; float128_t x_f128; @@ -1598,6 +1599,7 @@ struct CodeGen { TypeTableEntry *entry_i128; TypeTableEntry *entry_isize; TypeTableEntry *entry_usize; + TypeTableEntry *entry_f16; TypeTableEntry *entry_f32; TypeTableEntry *entry_f64; TypeTableEntry *entry_f128; diff --git a/src/analyze.cpp b/src/analyze.cpp index c018ee4e92..25cc1c79d0 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4668,6 +4668,13 @@ static uint32_t hash_const_val(ConstExprValue *const_val) { } case TypeTableEntryIdFloat: switch (const_val->type->data.floating.bit_count) { + case 16: + { + uint16_t result; + static_assert(sizeof(result) == sizeof(const_val->data.x_f16), ""); + memcpy(&result, &const_val->data.x_f16, sizeof(result)); + return result * 65537u; + } case 32: { uint32_t result; @@ -5128,6 +5135,9 @@ void init_const_float(ConstExprValue *const_val, TypeTableEntry *type, double va bigfloat_init_64(&const_val->data.x_bigfloat, value); } else if (type->id == TypeTableEntryIdFloat) { switch (type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = zig_double_to_f16(value); + break; case 32: const_val->data.x_f32 = value; break; @@ -5441,6 +5451,8 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { case TypeTableEntryIdFloat: assert(a->type->data.floating.bit_count == b->type->data.floating.bit_count); switch (a->type->data.floating.bit_count) { + case 16: + return f16_eq(a->data.x_f16, b->data.x_f16); case 32: return a->data.x_f32 == b->data.x_f32; case 64: @@ -5614,6 +5626,9 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) { return; case TypeTableEntryIdFloat: switch (type_entry->data.floating.bit_count) { + case 16: + buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16)); + return; case 32: buf_appendf(buf, "%f", const_val->data.x_f32); return; diff --git a/src/bigfloat.cpp b/src/bigfloat.cpp index dcb6db61db..cc442fa3b7 100644 --- a/src/bigfloat.cpp +++ b/src/bigfloat.cpp @@ -18,6 +18,10 @@ void bigfloat_init_128(BigFloat *dest, float128_t x) { dest->value = x; } +void bigfloat_init_16(BigFloat *dest, float16_t x) { + f16_to_f128M(x, &dest->value); +} + void bigfloat_init_32(BigFloat *dest, float x) { float32_t f32_val; memcpy(&f32_val, &x, sizeof(float)); @@ -146,6 +150,10 @@ Cmp bigfloat_cmp(const BigFloat *op1, const BigFloat *op2) { } } +float16_t bigfloat_to_f16(const BigFloat *bigfloat) { + return f128M_to_f16(&bigfloat->value); +} + float bigfloat_to_f32(const BigFloat *bigfloat) { float32_t f32_value = f128M_to_f32(&bigfloat->value); float result; diff --git a/src/bigfloat.hpp b/src/bigfloat.hpp index e212c30c87..c6ae567945 100644 --- a/src/bigfloat.hpp +++ b/src/bigfloat.hpp @@ -22,6 +22,7 @@ struct BigFloat { struct Buf; +void bigfloat_init_16(BigFloat *dest, float16_t x); void bigfloat_init_32(BigFloat *dest, float x); void bigfloat_init_64(BigFloat *dest, double x); void bigfloat_init_128(BigFloat *dest, float128_t x); @@ -29,6 +30,7 @@ void bigfloat_init_bigfloat(BigFloat *dest, const BigFloat *x); void bigfloat_init_bigint(BigFloat *dest, const BigInt *op); int bigfloat_init_buf_base10(BigFloat *dest, const uint8_t *buf_ptr, size_t buf_len); +float16_t bigfloat_to_f16(const BigFloat *bigfloat); float bigfloat_to_f32(const BigFloat *bigfloat); double bigfloat_to_f64(const BigFloat *bigfloat); float128_t bigfloat_to_f128(const BigFloat *bigfloat); diff --git a/src/codegen.cpp b/src/codegen.cpp index abec5a8ec7..4419f4fc84 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -17,6 +17,7 @@ #include "os.hpp" #include "translate_c.hpp" #include "target.hpp" +#include "util.hpp" #include "zig_llvm.h" #include @@ -5211,6 +5212,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c const_val->data.x_err_set->value, false); case TypeTableEntryIdFloat: switch (type_entry->data.floating.bit_count) { + case 16: + return LLVMConstReal(type_entry->type_ref, zig_f16_to_double(const_val->data.x_f16)); case 32: return LLVMConstReal(type_entry->type_ref, const_val->data.x_f32); case 64: @@ -6195,6 +6198,7 @@ static void define_builtin_types(CodeGen *g) { *field = entry; g->primitive_type_table.put(&entry->name, entry); }; + add_fp_entry(g, "f16", 16, LLVMHalfType(), &g->builtin_types.entry_f16); add_fp_entry(g, "f32", 32, LLVMFloatType(), &g->builtin_types.entry_f32); add_fp_entry(g, "f64", 64, LLVMDoubleType(), &g->builtin_types.entry_f64); add_fp_entry(g, "f128", 128, LLVMFP128Type(), &g->builtin_types.entry_f128); diff --git a/src/ir.cpp b/src/ir.cpp index 76178f2437..694f912145 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -11,9 +11,10 @@ #include "ir.hpp" #include "ir_print.hpp" #include "os.hpp" -#include "translate_c.hpp" #include "range_set.hpp" #include "softfloat.hpp" +#include "translate_c.hpp" +#include "util.hpp" struct IrExecContext { ConstExprValue *mem_slot_list; @@ -7238,6 +7239,11 @@ static bool float_has_fraction(ConstExprValue *const_val) { return bigfloat_has_fraction(&const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + { + float16_t floored = f16_roundToInt(const_val->data.x_f16, softfloat_round_minMag, false); + return !f16_eq(floored, const_val->data.x_f16); + } case 32: return floorf(const_val->data.x_f32) != const_val->data.x_f32; case 64: @@ -7261,6 +7267,9 @@ static void float_append_buf(Buf *buf, ConstExprValue *const_val) { bigfloat_append_buf(buf, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + buf_appendf(buf, "%f", zig_f16_to_double(const_val->data.x_f16)); + break; case 32: buf_appendf(buf, "%f", const_val->data.x_f32); break; @@ -7296,6 +7305,17 @@ static void float_init_bigint(BigInt *bigint, ConstExprValue *const_val) { bigint_init_bigfloat(bigint, &const_val->data.x_bigfloat); } else if (const_val->type->id == TypeTableEntryIdFloat) { switch (const_val->type->data.floating.bit_count) { + case 16: + { + double x = zig_f16_to_double(const_val->data.x_f16); + if (x >= 0) { + bigint_init_unsigned(bigint, (uint64_t)x); + } else { + bigint_init_unsigned(bigint, (uint64_t)-x); + bigint->is_negative = true; + } + break; + } case 32: if (const_val->data.x_f32 >= 0) { bigint_init_unsigned(bigint, (uint64_t)(const_val->data.x_f32)); @@ -7332,6 +7352,9 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { bigfloat_init_bigfloat(&dest_val->data.x_bigfloat, bigfloat); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = bigfloat_to_f16(bigfloat); + break; case 32: dest_val->data.x_f32 = bigfloat_to_f32(bigfloat); break; @@ -7349,11 +7372,39 @@ static void float_init_bigfloat(ConstExprValue *dest_val, BigFloat *bigfloat) { } } +static void float_init_f16(ConstExprValue *dest_val, float16_t x) { + if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { + bigfloat_init_16(&dest_val->data.x_bigfloat, x); + } else if (dest_val->type->id == TypeTableEntryIdFloat) { + switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = x; + break; + case 32: + dest_val->data.x_f32 = zig_f16_to_double(x); + break; + case 64: + dest_val->data.x_f64 = zig_f16_to_double(x); + break; + case 128: + f16_to_f128M(x, &dest_val->data.x_f128); + break; + default: + zig_unreachable(); + } + } else { + zig_unreachable(); + } +} + static void float_init_f32(ConstExprValue *dest_val, float x) { if (dest_val->type->id == TypeTableEntryIdComptimeFloat) { bigfloat_init_32(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = zig_double_to_f16(x); + break; case 32: dest_val->data.x_f32 = x; break; @@ -7380,6 +7431,9 @@ static void float_init_f64(ConstExprValue *dest_val, double x) { bigfloat_init_64(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = zig_double_to_f16(x); + break; case 32: dest_val->data.x_f32 = x; break; @@ -7406,6 +7460,9 @@ static void float_init_f128(ConstExprValue *dest_val, float128_t x) { bigfloat_init_128(&dest_val->data.x_bigfloat, x); } else if (dest_val->type->id == TypeTableEntryIdFloat) { switch (dest_val->type->data.floating.bit_count) { + case 16: + dest_val->data.x_f16 = f128M_to_f16(&x); + break; case 32: { float32_t f32_val = f128M_to_f32(&x); @@ -7436,6 +7493,9 @@ static void float_init_float(ConstExprValue *dest_val, ConstExprValue *src_val) float_init_bigfloat(dest_val, &src_val->data.x_bigfloat); } else if (src_val->type->id == TypeTableEntryIdFloat) { switch (src_val->type->data.floating.bit_count) { + case 16: + float_init_f16(dest_val, src_val->data.x_f16); + break; case 32: float_init_f32(dest_val, src_val->data.x_f32); break; @@ -7459,6 +7519,14 @@ static Cmp float_cmp(ConstExprValue *op1, ConstExprValue *op2) { return bigfloat_cmp(&op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + if (f16_lt(op1->data.x_f16, op2->data.x_f16)) { + return CmpLT; + } else if (f16_lt(op2->data.x_f16, op1->data.x_f16)) { + return CmpGT; + } else { + return CmpEQ; + } case 32: if (op1->data.x_f32 > op2->data.x_f32) { return CmpGT; @@ -7496,6 +7564,17 @@ static Cmp float_cmp_zero(ConstExprValue *op) { return bigfloat_cmp_zero(&op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + { + const float16_t zero = zig_double_to_f16(0); + if (f16_lt(op->data.x_f16, zero)) { + return CmpLT; + } else if (f16_lt(zero, op->data.x_f16)) { + return CmpGT; + } else { + return CmpEQ; + } + } case 32: if (op->data.x_f32 < 0.0) { return CmpLT; @@ -7537,6 +7616,9 @@ static void float_add(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_add(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_add(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 + op2->data.x_f32; return; @@ -7561,6 +7643,9 @@ static void float_sub(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_sub(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_sub(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 - op2->data.x_f32; return; @@ -7585,6 +7670,9 @@ static void float_mul(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_mul(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_mul(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 * op2->data.x_f32; return; @@ -7609,6 +7697,9 @@ static void float_div(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_div(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32; return; @@ -7633,6 +7724,19 @@ static void float_div_trunc(ConstExprValue *out_val, ConstExprValue *op1, ConstE bigfloat_div_trunc(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + { + double a = zig_f16_to_double(op1->data.x_f16); + double b = zig_f16_to_double(op2->data.x_f16); + double c = a / b; + if (c >= 0.0) { + c = floor(c); + } else { + c = ceil(c); + } + out_val->data.x_f16 = zig_double_to_f16(c); + return; + } case 32: out_val->data.x_f32 = op1->data.x_f32 / op2->data.x_f32; if (out_val->data.x_f32 >= 0.0) { @@ -7668,6 +7772,10 @@ static void float_div_floor(ConstExprValue *out_val, ConstExprValue *op1, ConstE bigfloat_div_floor(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_div(op1->data.x_f16, op2->data.x_f16); + out_val->data.x_f16 = f16_roundToInt(out_val->data.x_f16, softfloat_round_min, false); + return; case 32: out_val->data.x_f32 = floorf(op1->data.x_f32 / op2->data.x_f32); return; @@ -7693,6 +7801,9 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_rem(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_rem(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = fmodf(op1->data.x_f32, op2->data.x_f32); return; @@ -7710,6 +7821,16 @@ static void float_rem(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal } } +// c = a - b * trunc(a / b) +static float16_t zig_f16_mod(float16_t a, float16_t b) { + float16_t c; + c = f16_div(a, b); + c = f16_roundToInt(c, softfloat_round_min, true); + c = f16_mul(b, c); + c = f16_sub(a, c); + return c; +} + // c = a - b * trunc(a / b) static void zig_f128M_mod(const float128_t* a, const float128_t* b, float128_t* c) { f128M_div(a, b, c); @@ -7725,6 +7846,9 @@ static void float_mod(ConstExprValue *out_val, ConstExprValue *op1, ConstExprVal bigfloat_mod(&out_val->data.x_bigfloat, &op1->data.x_bigfloat, &op2->data.x_bigfloat); } else if (op1->type->id == TypeTableEntryIdFloat) { switch (op1->type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = zig_f16_mod(op1->data.x_f16, op2->data.x_f16); + return; case 32: out_val->data.x_f32 = fmodf(fmodf(op1->data.x_f32, op2->data.x_f32) + op2->data.x_f32, op2->data.x_f32); return; @@ -7748,6 +7872,12 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) { bigfloat_negate(&out_val->data.x_bigfloat, &op->data.x_bigfloat); } else if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + { + const float16_t zero = zig_double_to_f16(0); + out_val->data.x_f16 = f16_sub(zero, op->data.x_f16); + return; + } case 32: out_val->data.x_f32 = -op->data.x_f32; return; @@ -7770,6 +7900,9 @@ static void float_negate(ConstExprValue *out_val, ConstExprValue *op) { void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) { if (op->type->id == TypeTableEntryIdFloat) { switch (op->type->data.floating.bit_count) { + case 16: + memcpy(buf, &op->data.x_f16, 2); // TODO wrong when compiler is big endian + return; case 32: memcpy(buf, &op->data.x_f32, 4); // TODO wrong when compiler is big endian return; @@ -7790,6 +7923,9 @@ void float_write_ieee597(ConstExprValue *op, uint8_t *buf, bool is_big_endian) { void float_read_ieee597(ConstExprValue *val, uint8_t *buf, bool is_big_endian) { if (val->type->id == TypeTableEntryIdFloat) { switch (val->type->data.floating.bit_count) { + case 16: + memcpy(&val->data.x_f16, buf, 2); // TODO wrong when compiler is big endian + return; case 32: memcpy(&val->data.x_f32, buf, 4); // TODO wrong when compiler is big endian return; @@ -8817,6 +8953,9 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_ if (other_val->type->id == TypeTableEntryIdComptimeFloat) { assert(new_type->id == TypeTableEntryIdFloat); switch (new_type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = bigfloat_to_f16(&other_val->data.x_bigfloat); + break; case 32: const_val->data.x_f32 = bigfloat_to_f32(&other_val->data.x_bigfloat); break; @@ -8847,6 +8986,9 @@ static bool eval_const_expr_implicit_cast(IrAnalyze *ira, IrInstruction *source_ BigFloat bigfloat; bigfloat_init_bigint(&bigfloat, &other_val->data.x_bigint); switch (new_type->data.floating.bit_count) { + case 16: + const_val->data.x_f16 = bigfloat_to_f16(&bigfloat); + break; case 32: const_val->data.x_f32 = bigfloat_to_f32(&bigfloat); break; @@ -20104,6 +20246,9 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction bigfloat_sqrt(&out_val->data.x_bigfloat, &val->data.x_bigfloat); } else if (float_type->id == TypeTableEntryIdFloat) { switch (float_type->data.floating.bit_count) { + case 16: + out_val->data.x_f16 = f16_sqrt(val->data.x_f16); + break; case 32: out_val->data.x_f32 = sqrtf(val->data.x_f32); break; @@ -20124,7 +20269,9 @@ static TypeTableEntry *ir_analyze_instruction_sqrt(IrAnalyze *ira, IrInstruction } assert(float_type->id == TypeTableEntryIdFloat); - if (float_type->data.floating.bit_count != 32 && float_type->data.floating.bit_count != 64) { + if (float_type->data.floating.bit_count != 16 && + float_type->data.floating.bit_count != 32 && + float_type->data.floating.bit_count != 64) { ir_add_error(ira, instruction->type, buf_sprintf("compiler TODO: add implementation of sqrt for '%s'", buf_ptr(&float_type->name))); return ira->codegen->builtin_types.entry_invalid; } diff --git a/src/util.hpp b/src/util.hpp index 52baab7ace..b0402137bd 100644 --- a/src/util.hpp +++ b/src/util.hpp @@ -31,6 +31,8 @@ #endif +#include "softfloat.hpp" + #define BREAKPOINT __asm("int $0x03") ATTRIBUTE_COLD @@ -165,4 +167,21 @@ static inline uint8_t log2_u64(uint64_t x) { return (63 - clzll(x)); } +static inline float16_t zig_double_to_f16(double x) { + float64_t y; + static_assert(sizeof(x) == sizeof(y), ""); + memcpy(&y, &x, sizeof(x)); + return f64_to_f16(y); +} + + +// Return value is safe to coerce to float even when |x| is NaN or Infinity. +static inline double zig_f16_to_double(float16_t x) { + float64_t y = f16_to_f64(x); + double z; + static_assert(sizeof(y) == sizeof(z), ""); + memcpy(&z, &y, sizeof(y)); + return z; +} + #endif diff --git a/std/special/compiler_rt/extendXfYf2.zig b/std/special/compiler_rt/extendXfYf2.zig index 6fa8cf4654..099e27b74a 100644 --- a/std/special/compiler_rt/extendXfYf2.zig +++ b/std/special/compiler_rt/extendXfYf2.zig @@ -10,9 +10,13 @@ pub extern fn __extendsftf2(a: f32) f128 { return extendXfYf2(f128, f32, a); } +pub extern fn __extendhfsf2(a: u16) f32 { + return extendXfYf2(f32, f16, @bitCast(f16, a)); +} + const CHAR_BIT = 8; -pub fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { +inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits); const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits); const srcSigBits = std.math.floatMantissaBits(src_t); @@ -22,22 +26,22 @@ pub fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { // Various constants whose values follow from the type parameters. // Any reasonable optimizer will fold and propagate all of these. - const srcBits: i32 = @sizeOf(src_t) * CHAR_BIT; - const srcExpBits: i32 = srcBits - srcSigBits - 1; - const srcInfExp: i32 = (1 << srcExpBits) - 1; - const srcExpBias: i32 = srcInfExp >> 1; + const srcBits = @sizeOf(src_t) * CHAR_BIT; + const srcExpBits = srcBits - srcSigBits - 1; + const srcInfExp = (1 << srcExpBits) - 1; + const srcExpBias = srcInfExp >> 1; - const srcMinNormal: src_rep_t = src_rep_t(1) << srcSigBits; - const srcInfinity: src_rep_t = src_rep_t(@bitCast(u32, srcInfExp)) << srcSigBits; - const srcSignMask: src_rep_t = src_rep_t(1) << @intCast(SrcShift, srcSigBits +% srcExpBits); - const srcAbsMask: src_rep_t = srcSignMask -% 1; - const srcQNaN: src_rep_t = src_rep_t(1) << @intCast(SrcShift, srcSigBits -% 1); - const srcNaNCode: src_rep_t = srcQNaN -% 1; + const srcMinNormal = 1 << srcSigBits; + const srcInfinity = srcInfExp << srcSigBits; + const srcSignMask = 1 << (srcSigBits + srcExpBits); + const srcAbsMask = srcSignMask - 1; + const srcQNaN = 1 << (srcSigBits - 1); + const srcNaNCode = srcQNaN - 1; - const dstBits: i32 = @sizeOf(dst_t) * CHAR_BIT; - const dstExpBits: i32 = dstBits - dstSigBits - 1; - const dstInfExp: i32 = (1 << dstExpBits) - 1; - const dstExpBias: i32 = dstInfExp >> 1; + const dstBits = @sizeOf(dst_t) * CHAR_BIT; + const dstExpBits = dstBits - dstSigBits - 1; + const dstInfExp = (1 << dstExpBits) - 1; + const dstExpBias = dstInfExp >> 1; const dstMinNormal: dst_rep_t = dst_rep_t(1) << dstSigBits; @@ -47,38 +51,36 @@ pub fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { const sign: src_rep_t = aRep & srcSignMask; var absResult: dst_rep_t = undefined; - // If @sizeOf(src_rep_t) < @sizeOf(int), the subtraction result is promoted - // to (signed) int. To avoid that, explicitly cast to src_rep_t. - if ((src_rep_t)(aAbs -% srcMinNormal) < srcInfinity -% srcMinNormal) { + if (aAbs -% srcMinNormal < srcInfinity - srcMinNormal) { // a is a normal number. // Extend to the destination type by shifting the significand and // exponent into the proper position and rebiasing the exponent. - absResult = dst_rep_t(aAbs) << (dstSigBits -% srcSigBits); - absResult += dst_rep_t(@bitCast(u32, dstExpBias -% srcExpBias)) << dstSigBits; + absResult = dst_rep_t(aAbs) << (dstSigBits - srcSigBits); + absResult += (dstExpBias - srcExpBias) << dstSigBits; } else if (aAbs >= srcInfinity) { // a is NaN or infinity. // Conjure the result by beginning with infinity, then setting the qNaN // bit (if needed) and right-aligning the rest of the trailing NaN // payload field. - absResult = dst_rep_t(@bitCast(u32, dstInfExp)) << dstSigBits; - absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits); - absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits); + absResult = dstInfExp << dstSigBits; + absResult |= dst_rep_t(aAbs & srcQNaN) << (dstSigBits - srcSigBits); + absResult |= dst_rep_t(aAbs & srcNaNCode) << (dstSigBits - srcSigBits); } else if (aAbs != 0) { // a is denormal. // renormalize the significand and clear the leading bit, then insert // the correct adjusted exponent in the destination type. - const scale: i32 = @clz(aAbs) - @clz(srcMinNormal); + const scale: u32 = @clz(aAbs) - @clz(src_rep_t(srcMinNormal)); absResult = dst_rep_t(aAbs) << @intCast(DstShift, dstSigBits - srcSigBits + scale); absResult ^= dstMinNormal; - const resultExponent: i32 = dstExpBias - srcExpBias - scale + 1; - absResult |= dst_rep_t(@bitCast(u32, resultExponent)) << @intCast(DstShift, dstSigBits); + const resultExponent: u32 = dstExpBias - srcExpBias - scale + 1; + absResult |= @intCast(dst_rep_t, resultExponent) << dstSigBits; } else { // a is zero. absResult = 0; } // Apply the signbit to (dst_t)abs(a). - const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << @intCast(DstShift, dstBits - srcBits); + const result: dst_rep_t align(@alignOf(dst_t)) = absResult | dst_rep_t(sign) << (dstBits - srcBits); return @bitCast(dst_t, result); } diff --git a/std/special/compiler_rt/extendXfYf2_test.zig b/std/special/compiler_rt/extendXfYf2_test.zig index 84fb410fbb..0168de12a5 100644 --- a/std/special/compiler_rt/extendXfYf2_test.zig +++ b/std/special/compiler_rt/extendXfYf2_test.zig @@ -1,4 +1,5 @@ const __extenddftf2 = @import("extendXfYf2.zig").__extenddftf2; +const __extendhfsf2 = @import("extendXfYf2.zig").__extendhfsf2; const __extendsftf2 = @import("extendXfYf2.zig").__extendsftf2; const assert = @import("std").debug.assert; @@ -24,6 +25,22 @@ fn test__extenddftf2(a: f64, expectedHi: u64, expectedLo: u64) void { @panic("__extenddftf2 test failure"); } +fn test__extendhfsf2(a: u16, expected: u32) void { + const x = __extendhfsf2(a); + const rep = @bitCast(u32, x); + + if (rep == expected) { + if (rep & 0x7fffffff > 0x7f800000) { + return; // NaN is always unequal. + } + if (x == @bitCast(f32, expected)) { + return; + } + } + + @panic("__extendhfsf2 test failure"); +} + fn test__extendsftf2(a: f32, expectedHi: u64, expectedLo: u64) void { const x = __extendsftf2(a); @@ -68,6 +85,35 @@ test "extenddftf2" { test__extenddftf2(0x1.edcba987654321fp-45, 0x3fd2edcba9876543, 0x2000000000000000); } +test "extendhfsf2" { + test__extendhfsf2(0x7e00, 0x7fc00000); // qNaN + test__extendhfsf2(0x7f00, 0x7fe00000); // sNaN + + test__extendhfsf2(0, 0); // 0 + test__extendhfsf2(0x8000, 0x80000000); // -0 + + test__extendhfsf2(0x7c00, 0x7f800000); // inf + test__extendhfsf2(0xfc00, 0xff800000); // -inf + + test__extendhfsf2(0x0001, 0x33800000); // denormal (min), 2**-24 + test__extendhfsf2(0x8001, 0xb3800000); // denormal (min), -2**-24 + + test__extendhfsf2(0x03ff, 0x387fc000); // denormal (max), 2**-14 - 2**-24 + test__extendhfsf2(0x83ff, 0xb87fc000); // denormal (max), -2**-14 + 2**-24 + + test__extendhfsf2(0x0400, 0x38800000); // normal (min), 2**-14 + test__extendhfsf2(0x8400, 0xb8800000); // normal (min), -2**-14 + + test__extendhfsf2(0x7bff, 0x477fe000); // normal (max), 65504 + test__extendhfsf2(0xfbff, 0xc77fe000); // normal (max), -65504 + + test__extendhfsf2(0x3c01, 0x3f802000); // normal, 1 + 2**-10 + test__extendhfsf2(0xbc01, 0xbf802000); // normal, -1 - 2**-10 + + test__extendhfsf2(0x3555, 0x3eaaa000); // normal, approx. 1/3 + test__extendhfsf2(0xb555, 0xbeaaa000); // normal, approx. -1/3 +} + test "extendsftf2" { // qNaN test__extendsftf2(makeQNaN32(), 0x7fff800000000000, 0x0); diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig index c96e1587f8..fda8d9d8af 100644 --- a/std/special/compiler_rt/index.zig +++ b/std/special/compiler_rt/index.zig @@ -15,6 +15,8 @@ comptime { @export("__lttf2", @import("comparetf2.zig").__letf2, linkage); @export("__netf2", @import("comparetf2.zig").__letf2, linkage); @export("__gttf2", @import("comparetf2.zig").__getf2, linkage); + @export("__gnu_h2f_ieee", @import("extendXfYf2.zig").__extendhfsf2, linkage); + @export("__gnu_f2h_ieee", @import("truncXfYf2.zig").__truncsfhf2, linkage); } @export("__unordtf2", @import("comparetf2.zig").__unordtf2, linkage); @@ -22,6 +24,9 @@ comptime { @export("__floatuntidf", @import("floatuntidf.zig").__floatuntidf, linkage); @export("__extenddftf2", @import("extendXfYf2.zig").__extenddftf2, linkage); @export("__extendsftf2", @import("extendXfYf2.zig").__extendsftf2, linkage); + @export("__extendhfsf2", @import("extendXfYf2.zig").__extendhfsf2, linkage); + + @export("__truncsfhf2", @import("truncXfYf2.zig").__truncsfhf2, linkage); @export("__fixunssfsi", @import("fixunssfsi.zig").__fixunssfsi, linkage); @export("__fixunssfdi", @import("fixunssfdi.zig").__fixunssfdi, linkage); diff --git a/std/special/compiler_rt/truncXfYf2.zig b/std/special/compiler_rt/truncXfYf2.zig new file mode 100644 index 0000000000..f08c6ae34f --- /dev/null +++ b/std/special/compiler_rt/truncXfYf2.zig @@ -0,0 +1,111 @@ +const std = @import("std"); + +pub extern fn __truncsfhf2(a: f32) u16 { + return @bitCast(u16, truncXfYf2(f16, f32, a)); +} + +const CHAR_BIT = 8; + +inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t { + const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits); + const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits); + const srcSigBits = std.math.floatMantissaBits(src_t); + const dstSigBits = std.math.floatMantissaBits(dst_t); + const SrcShift = std.math.Log2Int(src_rep_t); + const DstShift = std.math.Log2Int(dst_rep_t); + + // Various constants whose values follow from the type parameters. + // Any reasonable optimizer will fold and propagate all of these. + const srcBits = @sizeOf(src_t) * CHAR_BIT; + const srcExpBits = srcBits - srcSigBits - 1; + const srcInfExp = (1 << srcExpBits) - 1; + const srcExpBias = srcInfExp >> 1; + + const srcMinNormal = 1 << srcSigBits; + const srcSignificandMask = srcMinNormal - 1; + const srcInfinity = srcInfExp << srcSigBits; + const srcSignMask = 1 << (srcSigBits + srcExpBits); + const srcAbsMask = srcSignMask - 1; + const roundMask = (1 << (srcSigBits - dstSigBits)) - 1; + const halfway = 1 << (srcSigBits - dstSigBits - 1); + const srcQNaN = 1 << (srcSigBits - 1); + const srcNaNCode = srcQNaN - 1; + + const dstBits = @sizeOf(dst_t) * CHAR_BIT; + const dstExpBits = dstBits - dstSigBits - 1; + const dstInfExp = (1 << dstExpBits) - 1; + const dstExpBias = dstInfExp >> 1; + + const underflowExponent = srcExpBias + 1 - dstExpBias; + const overflowExponent = srcExpBias + dstInfExp - dstExpBias; + const underflow = underflowExponent << srcSigBits; + const overflow = overflowExponent << srcSigBits; + + const dstQNaN = 1 << (dstSigBits - 1); + const dstNaNCode = dstQNaN - 1; + + // Break a into a sign and representation of the absolute value + const aRep: src_rep_t = @bitCast(src_rep_t, a); + const aAbs: src_rep_t = aRep & srcAbsMask; + const sign: src_rep_t = aRep & srcSignMask; + var absResult: dst_rep_t = undefined; + + if (aAbs -% underflow < aAbs -% overflow) { + // The exponent of a is within the range of normal numbers in the + // destination format. We can convert by simply right-shifting with + // rounding and adjusting the exponent. + absResult = @truncate(dst_rep_t, aAbs >> (srcSigBits - dstSigBits)); + absResult -%= dst_rep_t(srcExpBias - dstExpBias) << dstSigBits; + + const roundBits: src_rep_t = aAbs & roundMask; + if (roundBits > halfway) { + // Round to nearest + absResult += 1; + } else if (roundBits == halfway) { + // Ties to even + absResult += absResult & 1; + } + } else if (aAbs > srcInfinity) { + // a is NaN. + // Conjure the result by beginning with infinity, setting the qNaN + // bit and inserting the (truncated) trailing NaN field. + absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + absResult |= dstQNaN; + absResult |= @intCast(dst_rep_t, ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode); + } else if (aAbs >= overflow) { + // a overflows to infinity. + absResult = @intCast(dst_rep_t, dstInfExp) << dstSigBits; + } else { + // a underflows on conversion to the destination type or is an exact + // zero. The result may be a denormal or zero. Extract the exponent + // to get the shift amount for the denormalization. + const aExp: u32 = aAbs >> srcSigBits; + const shift: u32 = srcExpBias - dstExpBias - aExp + 1; + + const significand: src_rep_t = (aRep & srcSignificandMask) | srcMinNormal; + + // Right shift by the denormalization amount with sticky. + if (shift > srcSigBits) { + absResult = 0; + } else { + const sticky: src_rep_t = significand << @intCast(SrcShift, srcBits - shift); + const denormalizedSignificand: src_rep_t = significand >> @intCast(SrcShift, shift) | sticky; + absResult = @intCast(dst_rep_t, denormalizedSignificand >> (srcSigBits - dstSigBits)); + const roundBits: src_rep_t = denormalizedSignificand & roundMask; + if (roundBits > halfway) { + // Round to nearest + absResult += 1; + } else if (roundBits == halfway) { + // Ties to even + absResult += absResult & 1; + } + } + } + + const result: dst_rep_t align(@alignOf(dst_t)) = absResult | @truncate(dst_rep_t, sign >> @intCast(SrcShift, srcBits - dstBits)); + return @bitCast(dst_t, result); +} + +test "import truncXfYf2" { + _ = @import("truncXfYf2_test.zig"); +} diff --git a/std/special/compiler_rt/truncXfYf2_test.zig b/std/special/compiler_rt/truncXfYf2_test.zig new file mode 100644 index 0000000000..e4dae7b5b0 --- /dev/null +++ b/std/special/compiler_rt/truncXfYf2_test.zig @@ -0,0 +1,64 @@ +const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2; + +fn test__truncsfhf2(a: u32, expected: u16) void { + const actual = __truncsfhf2(@bitCast(f32, a)); + + if (actual == expected) { + return; + } + + @panic("__truncsfhf2 test failure"); +} + +test "truncsfhf2" { + test__truncsfhf2(0x7fc00000, 0x7e00); // qNaN + test__truncsfhf2(0x7fe00000, 0x7f00); // sNaN + + test__truncsfhf2(0, 0); // 0 + test__truncsfhf2(0x80000000, 0x8000); // -0 + + test__truncsfhf2(0x7f800000, 0x7c00); // inf + test__truncsfhf2(0xff800000, 0xfc00); // -inf + + test__truncsfhf2(0x477ff000, 0x7c00); // 65520 -> inf + test__truncsfhf2(0xc77ff000, 0xfc00); // -65520 -> -inf + + test__truncsfhf2(0x71cc3892, 0x7c00); // 0x1.987124876876324p+100 -> inf + test__truncsfhf2(0xf1cc3892, 0xfc00); // -0x1.987124876876324p+100 -> -inf + + test__truncsfhf2(0x38800000, 0x0400); // normal (min), 2**-14 + test__truncsfhf2(0xb8800000, 0x8400); // normal (min), -2**-14 + + test__truncsfhf2(0x477fe000, 0x7bff); // normal (max), 65504 + test__truncsfhf2(0xc77fe000, 0xfbff); // normal (max), -65504 + + test__truncsfhf2(0x477fe100, 0x7bff); // normal, 65505 -> 65504 + test__truncsfhf2(0xc77fe100, 0xfbff); // normal, -65505 -> -65504 + + test__truncsfhf2(0x477fef00, 0x7bff); // normal, 65519 -> 65504 + test__truncsfhf2(0xc77fef00, 0xfbff); // normal, -65519 -> -65504 + + test__truncsfhf2(0x3f802000, 0x3c01); // normal, 1 + 2**-10 + test__truncsfhf2(0xbf802000, 0xbc01); // normal, -1 - 2**-10 + + test__truncsfhf2(0x3eaaa000, 0x3555); // normal, approx. 1/3 + test__truncsfhf2(0xbeaaa000, 0xb555); // normal, approx. -1/3 + + test__truncsfhf2(0x40490fdb, 0x4248); // normal, 3.1415926535 + test__truncsfhf2(0xc0490fdb, 0xc248); // normal, -3.1415926535 + + test__truncsfhf2(0x45cc3892, 0x6e62); // normal, 0x1.987124876876324p+12 + + test__truncsfhf2(0x3f800000, 0x3c00); // normal, 1 + test__truncsfhf2(0x38800000, 0x0400); // normal, 0x1.0p-14 + + test__truncsfhf2(0x33800000, 0x0001); // denormal (min), 2**-24 + test__truncsfhf2(0xb3800000, 0x8001); // denormal (min), -2**-24 + + test__truncsfhf2(0x387fc000, 0x03ff); // denormal (max), 2**-14 - 2**-24 + test__truncsfhf2(0xb87fc000, 0x83ff); // denormal (max), -2**-14 + 2**-24 + + test__truncsfhf2(0x35800000, 0x0010); // denormal, 0x1.0p-20 + test__truncsfhf2(0x33280000, 0x0001); // denormal, 0x1.5p-25 -> 0x1.0p-24 + test__truncsfhf2(0x33000000, 0x0000); // 0x1.0p-25 -> zero +} diff --git a/test/cases/cast.zig b/test/cases/cast.zig index 4209d87c1a..5688d90e11 100644 --- a/test/cases/cast.zig +++ b/test/cases/cast.zig @@ -350,13 +350,16 @@ fn testFloatToInts() void { assert(x == 10000); const y = @floatToInt(i32, f32(1e4)); assert(y == 10000); - expectFloatToInt(u8, 255.1, 255); - expectFloatToInt(i8, 127.2, 127); - expectFloatToInt(i8, -128.2, -128); + expectFloatToInt(f16, 255.1, u8, 255); + expectFloatToInt(f16, 127.2, i8, 127); + expectFloatToInt(f16, -128.2, i8, -128); + expectFloatToInt(f32, 255.1, u8, 255); + expectFloatToInt(f32, 127.2, i8, 127); + expectFloatToInt(f32, -128.2, i8, -128); } -fn expectFloatToInt(comptime T: type, f: f32, i: T) void { - assert(@floatToInt(T, f) == i); +fn expectFloatToInt(comptime F: type, f: F, comptime I: type, i: I) void { + assert(@floatToInt(I, f) == i); } test "cast u128 to f128 and back" { @@ -418,6 +421,16 @@ test "@intCast comptime_int" { } test "@floatCast comptime_int and comptime_float" { + { + const result = @floatCast(f16, 1234); + assert(@typeOf(result) == f16); + assert(result == 1234.0); + } + { + const result = @floatCast(f16, 1234.0); + assert(@typeOf(result) == f16); + assert(result == 1234.0); + } { const result = @floatCast(f32, 1234); assert(@typeOf(result) == f32); @@ -431,6 +444,11 @@ test "@floatCast comptime_int and comptime_float" { } test "comptime_int @intToFloat" { + { + const result = @intToFloat(f16, 1234); + assert(@typeOf(result) == f16); + assert(result == 1234.0); + } { const result = @intToFloat(f32, 1234); assert(@typeOf(result) == f32); diff --git a/test/cases/math.zig b/test/cases/math.zig index 08388d3df8..1807e5a1b0 100644 --- a/test/cases/math.zig +++ b/test/cases/math.zig @@ -6,15 +6,20 @@ test "division" { } fn testDivision() void { assert(div(u32, 13, 3) == 4); + assert(div(f16, 1.0, 2.0) == 0.5); assert(div(f32, 1.0, 2.0) == 0.5); assert(divExact(u32, 55, 11) == 5); assert(divExact(i32, -55, 11) == -5); + assert(divExact(f16, 55.0, 11.0) == 5.0); + assert(divExact(f16, -55.0, 11.0) == -5.0); assert(divExact(f32, 55.0, 11.0) == 5.0); assert(divExact(f32, -55.0, 11.0) == -5.0); assert(divFloor(i32, 5, 3) == 1); assert(divFloor(i32, -5, 3) == -2); + assert(divFloor(f16, 5.0, 3.0) == 1.0); + assert(divFloor(f16, -5.0, 3.0) == -2.0); assert(divFloor(f32, 5.0, 3.0) == 1.0); assert(divFloor(f32, -5.0, 3.0) == -2.0); assert(divFloor(i32, -0x80000000, -2) == 0x40000000); @@ -24,6 +29,8 @@ fn testDivision() void { assert(divTrunc(i32, 5, 3) == 1); assert(divTrunc(i32, -5, 3) == -1); + assert(divTrunc(f16, 5.0, 3.0) == 1.0); + assert(divTrunc(f16, -5.0, 3.0) == -1.0); assert(divTrunc(f32, 5.0, 3.0) == 1.0); assert(divTrunc(f32, -5.0, 3.0) == -1.0); @@ -435,10 +442,11 @@ test "comptime float rem int" { } test "remainder division" { + comptime remdiv(f16); comptime remdiv(f32); comptime remdiv(f64); comptime remdiv(f128); - remdiv(f32); + remdiv(f16); remdiv(f64); remdiv(f128); } @@ -453,6 +461,8 @@ test "@sqrt" { comptime testSqrt(f64, 12.0); testSqrt(f32, 13.0); comptime testSqrt(f32, 13.0); + testSqrt(f16, 13.0); + comptime testSqrt(f16, 13.0); const x = 14.0; const y = x * x; diff --git a/test/cases/misc.zig b/test/cases/misc.zig index d539f79a57..0f181a7b4e 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -53,6 +53,7 @@ test "@IntType builtin" { } test "floating point primitive bit counts" { + assert(f16.bit_count == 16); assert(f32.bit_count == 32); assert(f64.bit_count == 64); } -- cgit v1.2.3 From 19961c50e4db10fc4ada428928a7f5d1a2966da6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 27 Jun 2018 13:15:55 -0400 Subject: fix comptime @tagName crashing sometimes closes #1118 --- src/analyze.cpp | 1 + src/ir.cpp | 3 +++ test/cases/eval.zig | 5 +++++ test/cases/widening.zig | 9 +++++---- 4 files changed, 14 insertions(+), 4 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 25cc1c79d0..d5e69de1eb 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3728,6 +3728,7 @@ TypeUnionField *find_union_field_by_tag(TypeTableEntry *type_entry, const BigInt } TypeEnumField *find_enum_field_by_tag(TypeTableEntry *enum_type, const BigInt *tag) { + assert(enum_type->data.enumeration.zero_bits_known); for (uint32_t i = 0; i < enum_type->data.enumeration.src_field_count; i += 1) { TypeEnumField *field = &enum_type->data.enumeration.fields[i]; if (bigint_cmp(&field->value, tag) == CmpEQ) { diff --git a/src/ir.cpp b/src/ir.cpp index 6e424980f8..9ba01d1411 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -16053,6 +16053,9 @@ static TypeTableEntry *ir_analyze_instruction_enum_tag_name(IrAnalyze *ira, IrIn assert(target->value.type->id == TypeTableEntryIdEnum); if (instr_is_comptime(target)) { + type_ensure_zero_bits_known(ira->codegen, target->value.type); + if (type_is_invalid(target->value.type)) + return ira->codegen->builtin_types.entry_invalid; TypeEnumField *field = find_enum_field_by_tag(target->value.type, &target->value.data.x_bigint); ConstExprValue *array_val = create_const_str_lit(ira->codegen, field->name); ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); diff --git a/test/cases/eval.zig b/test/cases/eval.zig index 756ffe339a..83d2e80176 100644 --- a/test/cases/eval.zig +++ b/test/cases/eval.zig @@ -637,3 +637,8 @@ test "call method with comptime pass-by-non-copying-value self parameter" { var b = s.b(); assert(b == 2); } + +test "@tagName of @typeId" { + const str = @tagName(@typeId(u8)); + assert(std.mem.eql(u8, str, "Int")); +} diff --git a/test/cases/widening.zig b/test/cases/widening.zig index 18c12806d3..cf6ab4ca0f 100644 --- a/test/cases/widening.zig +++ b/test/cases/widening.zig @@ -19,8 +19,9 @@ test "implicit unsigned integer to signed integer" { } test "float widening" { - var a: f32 = 12.34; - var b: f64 = a; - var c: f128 = b; - assert(c == a); + var a: f16 = 12.34; + var b: f32 = a; + var c: f64 = b; + var d: f128 = c; + assert(d == a); } -- cgit v1.2.3 From 2fa588e81d60cfe319446bd0483c6bf296f40c40 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 27 Jun 2018 18:45:21 -0400 Subject: fix coroutine accessing freed memory closes #1164 --- src/analyze.cpp | 2 +- src/ir.cpp | 17 ++++++++++++++--- test/cases/coroutines.zig | 41 ++++++++++++++++++++++++++++++++--------- 3 files changed, 47 insertions(+), 13 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index d5e69de1eb..3c81d9ff9a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5583,7 +5583,7 @@ void render_const_val_ptr(CodeGen *g, Buf *buf, ConstExprValue *const_val, TypeT return; } case ConstPtrSpecialHardCodedAddr: - buf_appendf(buf, "(*%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->data.pointer.child_type->name), + buf_appendf(buf, "(%s)(%" ZIG_PRI_x64 ")", buf_ptr(&type_entry->name), const_val->data.x_ptr.data.hard_coded_addr.addr); return; case ConstPtrSpecialDiscard: diff --git a/src/ir.cpp b/src/ir.cpp index 9ba01d1411..98ed53d839 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -7112,6 +7112,12 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *dest_err_ret_trace_ptr = ir_build_load_ptr(irb, scope, node, err_ret_trace_ptr_field_ptr); ir_build_merge_err_ret_traces(irb, scope, node, coro_promise_ptr, err_ret_trace_ptr, dest_err_ret_trace_ptr); } + // Before we destroy the coroutine frame, we need to load the target promise into + // a register or local variable which does not get spilled into the frame, + // otherwise llvm tries to access memory inside the destroyed frame. + IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, + irb->exec->await_handle_var_ptr, false); + IrInstruction *await_handle_in_block = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_br(irb, scope, node, check_free_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, irb->exec->coro_final_cleanup_block); @@ -7126,6 +7132,14 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec incoming_values[1] = const_bool_true; IrInstruction *resume_awaiter = ir_build_phi(irb, scope, node, 2, incoming_blocks, incoming_values); + IrBasicBlock **merge_incoming_blocks = allocate(2); + IrInstruction **merge_incoming_values = allocate(2); + merge_incoming_blocks[0] = irb->exec->coro_final_cleanup_block; + merge_incoming_values[0] = ir_build_const_undefined(irb, scope, node); + merge_incoming_blocks[1] = irb->exec->coro_normal_final; + merge_incoming_values[1] = await_handle_in_block; + IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values); + Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME); IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node, ImplicitAllocatorIdLocalVar); @@ -7152,9 +7166,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); ir_set_cursor_at_end_and_append_block(irb, resume_block); - IrInstruction *unwrapped_await_handle_ptr = ir_build_unwrap_maybe(irb, scope, node, - irb->exec->await_handle_var_ptr, false); - IrInstruction *awaiter_handle = ir_build_load_ptr(irb, scope, node, unwrapped_await_handle_ptr); ir_build_coro_resume(irb, scope, node, awaiter_handle); ir_build_br(irb, scope, node, irb->exec->coro_suspend_block, const_bool_false); } diff --git a/test/cases/coroutines.zig b/test/cases/coroutines.zig index 4d2aa54a69..b3899b306b 100644 --- a/test/cases/coroutines.zig +++ b/test/cases/coroutines.zig @@ -5,7 +5,10 @@ const assert = std.debug.assert; var x: i32 = 1; test "create a coroutine and cancel it" { - const p = try async simpleAsyncFn(); + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + + const p = try async<&da.allocator> simpleAsyncFn(); comptime assert(@typeOf(p) == promise->void); cancel p; assert(x == 2); @@ -17,8 +20,11 @@ async fn simpleAsyncFn() void { } test "coroutine suspend, resume, cancel" { + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + seq('a'); - const p = try async testAsyncSeq(); + const p = try async<&da.allocator> testAsyncSeq(); seq('c'); resume p; seq('f'); @@ -43,7 +49,10 @@ fn seq(c: u8) void { } test "coroutine suspend with block" { - const p = try async testSuspendBlock(); + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + + const p = try async<&da.allocator> testSuspendBlock(); std.debug.assert(!result); resume a_promise; std.debug.assert(result); @@ -64,8 +73,11 @@ var await_a_promise: promise = undefined; var await_final_result: i32 = 0; test "coroutine await" { + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + await_seq('a'); - const p = async await_amain() catch unreachable; + const p = async<&da.allocator> await_amain() catch unreachable; await_seq('f'); resume await_a_promise; await_seq('i'); @@ -100,8 +112,11 @@ fn await_seq(c: u8) void { var early_final_result: i32 = 0; test "coroutine await early return" { + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + early_seq('a'); - const p = async early_amain() catch unreachable; + const p = async<&da.allocator> early_amain() catch unreachable; early_seq('f'); assert(early_final_result == 1234); assert(std.mem.eql(u8, early_points, "abcdef")); @@ -146,7 +161,9 @@ test "async function with dot syntax" { suspend; } }; - const p = try async S.foo(); + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + const p = try async<&da.allocator> S.foo(); cancel p; assert(S.y == 2); } @@ -157,7 +174,9 @@ test "async fn pointer in a struct field" { bar: async<*std.mem.Allocator> fn (*i32) void, }; var foo = Foo{ .bar = simpleAsyncFn2 }; - const p = (async foo.bar(&data)) catch unreachable; + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + const p = (async<&da.allocator> foo.bar(&data)) catch unreachable; assert(data == 2); cancel p; assert(data == 4); @@ -169,7 +188,9 @@ async<*std.mem.Allocator> fn simpleAsyncFn2(y: *i32) void { } test "async fn with inferred error set" { - const p = (async failing()) catch unreachable; + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + const p = (async<&da.allocator> failing()) catch unreachable; resume p; cancel p; } @@ -181,7 +202,9 @@ async fn failing() !void { test "error return trace across suspend points - early return" { const p = nonFailing(); resume p; - const p2 = try async printTrace(p); + var da = std.heap.DirectAllocator.init(); + defer da.deinit(); + const p2 = try async<&da.allocator> printTrace(p); cancel p2; } -- cgit v1.2.3 From 3ec38b249446d1a51391e263fbb8303af52e6751 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Thu, 28 Jun 2018 10:34:37 +0200 Subject: Implement const_values_equal for array type * This allows arrays to be passed by value at comptime --- src/analyze.cpp | 15 +++++++++++++-- test/cases/array.zig | 8 ++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 5160a19e81..e9b74a9c26 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5458,8 +5458,19 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { case TypeTableEntryIdPointer: case TypeTableEntryIdFn: return const_values_equal_ptr(a, b); - case TypeTableEntryIdArray: - zig_panic("TODO"); + case TypeTableEntryIdArray: { + assert(a->type->data.array.len == b->type->data.array.len); + size_t len = a->type->data.array.len; + ConstExprValue *a_elems = a->data.x_array.s_none.elements; + ConstExprValue *b_elems = b->data.x_array.s_none.elements; + + for (size_t i = 0; i < len; ++i) { + if (!const_values_equal(&a_elems[i], &b_elems[i])) + return false; + } + + return true; + } case TypeTableEntryIdStruct: for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) { ConstExprValue *field_a = &a->data.x_struct.fields[i]; diff --git a/test/cases/array.zig b/test/cases/array.zig index b481261b4f..b72491bcc0 100644 --- a/test/cases/array.zig +++ b/test/cases/array.zig @@ -152,3 +152,11 @@ fn testImplicitCastSingleItemPtr() void { slice[0] += 1; assert(byte == 101); } + +fn testArrayByValAtComptime(b: [2]u8) u8 { return b[0]; } + +test "comptime evalutating function that takes array by value" { + const arr = []u8{0,1}; + _ = comptime testArrayByValAtComptime(arr); + _ = comptime testArrayByValAtComptime(arr); +} -- cgit v1.2.3 From b1128b18d5395d85f1c483d8b35e33c57be80722 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Fri, 29 Jun 2018 08:41:16 +0200 Subject: Assert that array is not ConstArraySpecialUndef in const_values_equal --- src/analyze.cpp | 3 +++ 1 file changed, 3 insertions(+) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index e9b74a9c26..b3a302a1d4 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5460,6 +5460,9 @@ bool const_values_equal(ConstExprValue *a, ConstExprValue *b) { return const_values_equal_ptr(a, b); case TypeTableEntryIdArray: { assert(a->type->data.array.len == b->type->data.array.len); + assert(a->data.x_array.special != ConstArraySpecialUndef); + assert(b->data.x_array.special != ConstArraySpecialUndef); + size_t len = a->type->data.array.len; ConstExprValue *a_elems = a->data.x_array.s_none.elements; ConstExprValue *b_elems = b->data.x_array.s_none.elements; -- cgit v1.2.3 From 4c3f27ce1ea17b5236a022971ebace73a02b7c2b Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Fri, 29 Jun 2018 10:21:43 +0200 Subject: ir_resolve_const now checks recursivly for undef values --- src/analyze.cpp | 135 ++++++++++++++++++++++++++++++++++++++++++++++++ src/analyze.hpp | 1 + src/ir.cpp | 11 +++- test/compile_errors.zig | 15 ++++++ 4 files changed, 160 insertions(+), 2 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index b3a302a1d4..068ea48c0a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5288,6 +5288,141 @@ ConstExprValue *create_const_arg_tuple(CodeGen *g, size_t arg_index_start, size_ return const_val; } +bool contains_comptime_undefined_value(ConstExprValue *value) { + assert(value->special != ConstValSpecialRuntime); + if (value->special == ConstValSpecialUndef) + return true; + + switch (value->type->id) { + case TypeTableEntryIdInvalid: + zig_unreachable(); + + case TypeTableEntryIdPointer: { + ConstPtrValue *ptr = &value->data.x_ptr; + if (ptr->mut == ConstPtrMutRuntimeVar) + return false; + + switch (ptr->special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + return contains_comptime_undefined_value(ptr->data.ref.pointee); + case ConstPtrSpecialBaseArray: { + size_t index = ptr->data.base_array.elem_index; + ConstExprValue *arr = ptr->data.base_array.array_val; + if (arr->special == ConstValSpecialUndef) + return true; + if (arr->data.x_array.special == ConstArraySpecialUndef) + return true; + + return contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[index]); + } + case ConstPtrSpecialBaseStruct: { + size_t index = ptr->data.base_struct.field_index; + ConstExprValue *str = ptr->data.base_struct.struct_val; + if (str->special == ConstValSpecialUndef) + return true; + + return contains_comptime_undefined_value(&str->data.x_struct.fields[index]); + } + case ConstPtrSpecialFunction: // TODO: Can a fn ptr have an undefined value? + case ConstPtrSpecialDiscard: + case ConstPtrSpecialHardCodedAddr: + return false; + } + } + case TypeTableEntryIdArray: { + ConstArrayValue *arr = &value->data.x_array; + if (arr->special == ConstArraySpecialUndef) + return true; + + for (size_t i = 0; i < value->type->data.array.len; ++i) { + if (contains_comptime_undefined_value(&arr->s_none.elements[i])) + return true; + } + return false; + } + case TypeTableEntryIdStruct: { + ConstStructValue *str = &value->data.x_struct; + if (value->type->data.structure.is_slice) { + ConstExprValue *len = &str->fields[slice_len_index]; + ConstExprValue *ptr = &str->fields[slice_ptr_index]; + if (len->special == ConstValSpecialUndef) + return true; + if (ptr->special == ConstValSpecialUndef) + return true; + + switch (ptr->data.x_ptr.special) { + case ConstPtrSpecialRef: + return contains_comptime_undefined_value(ptr->data.x_ptr.data.ref.pointee); + case ConstPtrSpecialBaseArray: { + size_t offset = ptr->data.x_ptr.data.base_array.elem_index; + ConstExprValue *arr = ptr->data.x_ptr.data.base_array.array_val; + if (arr->special == ConstValSpecialUndef) + return true; + if (arr->data.x_array.special == ConstArraySpecialUndef) + return true; + + uint64_t slice_len = bigint_as_unsigned(&len->data.x_bigint); + for (size_t i = 0; i < slice_len; ++i) { + if (contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[i + offset])) + return true; + } + + return false; + } + case ConstPtrSpecialBaseStruct: + case ConstPtrSpecialInvalid: + case ConstPtrSpecialFunction: + case ConstPtrSpecialDiscard: + case ConstPtrSpecialHardCodedAddr: + zig_unreachable(); + } + } + + for (size_t i = 0; i < value->type->data.structure.src_field_count; ++i) { + if (contains_comptime_undefined_value(&str->fields[i])) + return true; + } + return false; + } + case TypeTableEntryIdOptional: + if (value->data.x_optional == nullptr) + return false; + + return contains_comptime_undefined_value(value->data.x_optional); + case TypeTableEntryIdErrorUnion: + // TODO: Can error union error be undefined? + if (value->data.x_err_union.err != nullptr) + return false; + + return contains_comptime_undefined_value(value->data.x_err_union.payload); + case TypeTableEntryIdUnion: + return contains_comptime_undefined_value(value->data.x_union.payload); + + case TypeTableEntryIdArgTuple: + case TypeTableEntryIdVoid: + case TypeTableEntryIdBool: + case TypeTableEntryIdUnreachable: + case TypeTableEntryIdInt: + case TypeTableEntryIdFloat: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdErrorSet: + case TypeTableEntryIdEnum: + case TypeTableEntryIdFn: + case TypeTableEntryIdNamespace: + case TypeTableEntryIdBlock: + case TypeTableEntryIdBoundFn: + case TypeTableEntryIdMetaType: + case TypeTableEntryIdOpaque: + case TypeTableEntryIdPromise: + return false; + } + zig_unreachable(); +} void init_const_undefined(CodeGen *g, ConstExprValue *const_val) { TypeTableEntry *wanted_type = const_val->type; diff --git a/src/analyze.hpp b/src/analyze.hpp index 88e06b2390..100f85d4d9 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -93,6 +93,7 @@ void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry); void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry); void complete_enum(CodeGen *g, TypeTableEntry *enum_type); bool ir_get_var_is_comptime(VariableTableEntry *var); +bool contains_comptime_undefined_value(ConstExprValue *value); bool const_values_equal(ConstExprValue *a, ConstExprValue *b); void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *const_val, bool is_max); void eval_min_max_value_int(CodeGen *g, TypeTableEntry *int_type, BigInt *bigint, bool is_max); diff --git a/src/ir.cpp b/src/ir.cpp index c6078e755d..2cce4a5044 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9148,8 +9148,15 @@ enum UndefAllowed { static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, UndefAllowed undef_allowed) { switch (value->value.special) { - case ConstValSpecialStatic: - return &value->value; + case ConstValSpecialStatic: { + ConstExprValue *res = &value->value; + if (undef_allowed == UndefBad && contains_comptime_undefined_value(res)) { + ir_add_error(ira, value, buf_sprintf("use of undefined value")); + return nullptr; + } + + return res; + } case ConstValSpecialRuntime: ir_add_error(ira, value, buf_sprintf("unable to evaluate constant expression")); return nullptr; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 2247f0af96..8749f5b560 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -4124,4 +4124,19 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { , ".tmp_source.zig:3:36: error: @ArgType could not resolve the type of arg 0 because 'fn(var)var' is generic", ); + + cases.add( + "Trying to pass undefined array to function taking comptime array by value", + \\fn a(comptime b: [2]u8) u8 { return b[0]; } + \\ + \\test "" { + \\ const arr: [2]u8 = undefined; + \\ _ = a(arr); + \\} + , + ".tmp_source.zig:5:11: error: use of undefined value", + ); + + + } -- cgit v1.2.3 From 58b1692182dc2f8da5b535f59e9a89cfab10a7b6 Mon Sep 17 00:00:00 2001 From: Jimmi HC Date: Fri, 29 Jun 2018 11:34:38 +0200 Subject: contains_comptime_undefined_value should not follow pointers --- src/analyze.cpp | 72 +-------------------------------------------------------- 1 file changed, 1 insertion(+), 71 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 068ea48c0a..4c200888d8 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5296,41 +5296,6 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { switch (value->type->id) { case TypeTableEntryIdInvalid: zig_unreachable(); - - case TypeTableEntryIdPointer: { - ConstPtrValue *ptr = &value->data.x_ptr; - if (ptr->mut == ConstPtrMutRuntimeVar) - return false; - - switch (ptr->special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - return contains_comptime_undefined_value(ptr->data.ref.pointee); - case ConstPtrSpecialBaseArray: { - size_t index = ptr->data.base_array.elem_index; - ConstExprValue *arr = ptr->data.base_array.array_val; - if (arr->special == ConstValSpecialUndef) - return true; - if (arr->data.x_array.special == ConstArraySpecialUndef) - return true; - - return contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[index]); - } - case ConstPtrSpecialBaseStruct: { - size_t index = ptr->data.base_struct.field_index; - ConstExprValue *str = ptr->data.base_struct.struct_val; - if (str->special == ConstValSpecialUndef) - return true; - - return contains_comptime_undefined_value(&str->data.x_struct.fields[index]); - } - case ConstPtrSpecialFunction: // TODO: Can a fn ptr have an undefined value? - case ConstPtrSpecialDiscard: - case ConstPtrSpecialHardCodedAddr: - return false; - } - } case TypeTableEntryIdArray: { ConstArrayValue *arr = &value->data.x_array; if (arr->special == ConstArraySpecialUndef) @@ -5344,42 +5309,6 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { } case TypeTableEntryIdStruct: { ConstStructValue *str = &value->data.x_struct; - if (value->type->data.structure.is_slice) { - ConstExprValue *len = &str->fields[slice_len_index]; - ConstExprValue *ptr = &str->fields[slice_ptr_index]; - if (len->special == ConstValSpecialUndef) - return true; - if (ptr->special == ConstValSpecialUndef) - return true; - - switch (ptr->data.x_ptr.special) { - case ConstPtrSpecialRef: - return contains_comptime_undefined_value(ptr->data.x_ptr.data.ref.pointee); - case ConstPtrSpecialBaseArray: { - size_t offset = ptr->data.x_ptr.data.base_array.elem_index; - ConstExprValue *arr = ptr->data.x_ptr.data.base_array.array_val; - if (arr->special == ConstValSpecialUndef) - return true; - if (arr->data.x_array.special == ConstArraySpecialUndef) - return true; - - uint64_t slice_len = bigint_as_unsigned(&len->data.x_bigint); - for (size_t i = 0; i < slice_len; ++i) { - if (contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[i + offset])) - return true; - } - - return false; - } - case ConstPtrSpecialBaseStruct: - case ConstPtrSpecialInvalid: - case ConstPtrSpecialFunction: - case ConstPtrSpecialDiscard: - case ConstPtrSpecialHardCodedAddr: - zig_unreachable(); - } - } - for (size_t i = 0; i < value->type->data.structure.src_field_count; ++i) { if (contains_comptime_undefined_value(&str->fields[i])) return true; @@ -5400,6 +5329,7 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { case TypeTableEntryIdUnion: return contains_comptime_undefined_value(value->data.x_union.payload); + case TypeTableEntryIdPointer: case TypeTableEntryIdArgTuple: case TypeTableEntryIdVoid: case TypeTableEntryIdBool: -- cgit v1.2.3 From 616fe798c801baa5fa7238f5fc576a5090938999 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 30 Jun 2018 17:35:05 +0200 Subject: Revert "contains_comptime_undefined_value should not follow pointers" This reverts commit 58b1692182dc2f8da5b535f59e9a89cfab10a7b6. --- src/analyze.cpp | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 4c200888d8..068ea48c0a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5296,6 +5296,41 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { switch (value->type->id) { case TypeTableEntryIdInvalid: zig_unreachable(); + + case TypeTableEntryIdPointer: { + ConstPtrValue *ptr = &value->data.x_ptr; + if (ptr->mut == ConstPtrMutRuntimeVar) + return false; + + switch (ptr->special) { + case ConstPtrSpecialInvalid: + zig_unreachable(); + case ConstPtrSpecialRef: + return contains_comptime_undefined_value(ptr->data.ref.pointee); + case ConstPtrSpecialBaseArray: { + size_t index = ptr->data.base_array.elem_index; + ConstExprValue *arr = ptr->data.base_array.array_val; + if (arr->special == ConstValSpecialUndef) + return true; + if (arr->data.x_array.special == ConstArraySpecialUndef) + return true; + + return contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[index]); + } + case ConstPtrSpecialBaseStruct: { + size_t index = ptr->data.base_struct.field_index; + ConstExprValue *str = ptr->data.base_struct.struct_val; + if (str->special == ConstValSpecialUndef) + return true; + + return contains_comptime_undefined_value(&str->data.x_struct.fields[index]); + } + case ConstPtrSpecialFunction: // TODO: Can a fn ptr have an undefined value? + case ConstPtrSpecialDiscard: + case ConstPtrSpecialHardCodedAddr: + return false; + } + } case TypeTableEntryIdArray: { ConstArrayValue *arr = &value->data.x_array; if (arr->special == ConstArraySpecialUndef) @@ -5309,6 +5344,42 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { } case TypeTableEntryIdStruct: { ConstStructValue *str = &value->data.x_struct; + if (value->type->data.structure.is_slice) { + ConstExprValue *len = &str->fields[slice_len_index]; + ConstExprValue *ptr = &str->fields[slice_ptr_index]; + if (len->special == ConstValSpecialUndef) + return true; + if (ptr->special == ConstValSpecialUndef) + return true; + + switch (ptr->data.x_ptr.special) { + case ConstPtrSpecialRef: + return contains_comptime_undefined_value(ptr->data.x_ptr.data.ref.pointee); + case ConstPtrSpecialBaseArray: { + size_t offset = ptr->data.x_ptr.data.base_array.elem_index; + ConstExprValue *arr = ptr->data.x_ptr.data.base_array.array_val; + if (arr->special == ConstValSpecialUndef) + return true; + if (arr->data.x_array.special == ConstArraySpecialUndef) + return true; + + uint64_t slice_len = bigint_as_unsigned(&len->data.x_bigint); + for (size_t i = 0; i < slice_len; ++i) { + if (contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[i + offset])) + return true; + } + + return false; + } + case ConstPtrSpecialBaseStruct: + case ConstPtrSpecialInvalid: + case ConstPtrSpecialFunction: + case ConstPtrSpecialDiscard: + case ConstPtrSpecialHardCodedAddr: + zig_unreachable(); + } + } + for (size_t i = 0; i < value->type->data.structure.src_field_count; ++i) { if (contains_comptime_undefined_value(&str->fields[i])) return true; @@ -5329,7 +5400,6 @@ bool contains_comptime_undefined_value(ConstExprValue *value) { case TypeTableEntryIdUnion: return contains_comptime_undefined_value(value->data.x_union.payload); - case TypeTableEntryIdPointer: case TypeTableEntryIdArgTuple: case TypeTableEntryIdVoid: case TypeTableEntryIdBool: -- cgit v1.2.3 From 01bd5c46e177ae59f72197063c374e845eea3ff3 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 30 Jun 2018 17:35:06 +0200 Subject: Revert "ir_resolve_const now checks recursivly for undef values" This reverts commit 4c3f27ce1ea17b5236a022971ebace73a02b7c2b. --- src/analyze.cpp | 135 ------------------------------------------------ src/analyze.hpp | 1 - src/ir.cpp | 11 +--- test/compile_errors.zig | 15 ------ 4 files changed, 2 insertions(+), 160 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 068ea48c0a..b3a302a1d4 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5288,141 +5288,6 @@ ConstExprValue *create_const_arg_tuple(CodeGen *g, size_t arg_index_start, size_ return const_val; } -bool contains_comptime_undefined_value(ConstExprValue *value) { - assert(value->special != ConstValSpecialRuntime); - if (value->special == ConstValSpecialUndef) - return true; - - switch (value->type->id) { - case TypeTableEntryIdInvalid: - zig_unreachable(); - - case TypeTableEntryIdPointer: { - ConstPtrValue *ptr = &value->data.x_ptr; - if (ptr->mut == ConstPtrMutRuntimeVar) - return false; - - switch (ptr->special) { - case ConstPtrSpecialInvalid: - zig_unreachable(); - case ConstPtrSpecialRef: - return contains_comptime_undefined_value(ptr->data.ref.pointee); - case ConstPtrSpecialBaseArray: { - size_t index = ptr->data.base_array.elem_index; - ConstExprValue *arr = ptr->data.base_array.array_val; - if (arr->special == ConstValSpecialUndef) - return true; - if (arr->data.x_array.special == ConstArraySpecialUndef) - return true; - - return contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[index]); - } - case ConstPtrSpecialBaseStruct: { - size_t index = ptr->data.base_struct.field_index; - ConstExprValue *str = ptr->data.base_struct.struct_val; - if (str->special == ConstValSpecialUndef) - return true; - - return contains_comptime_undefined_value(&str->data.x_struct.fields[index]); - } - case ConstPtrSpecialFunction: // TODO: Can a fn ptr have an undefined value? - case ConstPtrSpecialDiscard: - case ConstPtrSpecialHardCodedAddr: - return false; - } - } - case TypeTableEntryIdArray: { - ConstArrayValue *arr = &value->data.x_array; - if (arr->special == ConstArraySpecialUndef) - return true; - - for (size_t i = 0; i < value->type->data.array.len; ++i) { - if (contains_comptime_undefined_value(&arr->s_none.elements[i])) - return true; - } - return false; - } - case TypeTableEntryIdStruct: { - ConstStructValue *str = &value->data.x_struct; - if (value->type->data.structure.is_slice) { - ConstExprValue *len = &str->fields[slice_len_index]; - ConstExprValue *ptr = &str->fields[slice_ptr_index]; - if (len->special == ConstValSpecialUndef) - return true; - if (ptr->special == ConstValSpecialUndef) - return true; - - switch (ptr->data.x_ptr.special) { - case ConstPtrSpecialRef: - return contains_comptime_undefined_value(ptr->data.x_ptr.data.ref.pointee); - case ConstPtrSpecialBaseArray: { - size_t offset = ptr->data.x_ptr.data.base_array.elem_index; - ConstExprValue *arr = ptr->data.x_ptr.data.base_array.array_val; - if (arr->special == ConstValSpecialUndef) - return true; - if (arr->data.x_array.special == ConstArraySpecialUndef) - return true; - - uint64_t slice_len = bigint_as_unsigned(&len->data.x_bigint); - for (size_t i = 0; i < slice_len; ++i) { - if (contains_comptime_undefined_value(&arr->data.x_array.s_none.elements[i + offset])) - return true; - } - - return false; - } - case ConstPtrSpecialBaseStruct: - case ConstPtrSpecialInvalid: - case ConstPtrSpecialFunction: - case ConstPtrSpecialDiscard: - case ConstPtrSpecialHardCodedAddr: - zig_unreachable(); - } - } - - for (size_t i = 0; i < value->type->data.structure.src_field_count; ++i) { - if (contains_comptime_undefined_value(&str->fields[i])) - return true; - } - return false; - } - case TypeTableEntryIdOptional: - if (value->data.x_optional == nullptr) - return false; - - return contains_comptime_undefined_value(value->data.x_optional); - case TypeTableEntryIdErrorUnion: - // TODO: Can error union error be undefined? - if (value->data.x_err_union.err != nullptr) - return false; - - return contains_comptime_undefined_value(value->data.x_err_union.payload); - case TypeTableEntryIdUnion: - return contains_comptime_undefined_value(value->data.x_union.payload); - - case TypeTableEntryIdArgTuple: - case TypeTableEntryIdVoid: - case TypeTableEntryIdBool: - case TypeTableEntryIdUnreachable: - case TypeTableEntryIdInt: - case TypeTableEntryIdFloat: - case TypeTableEntryIdComptimeFloat: - case TypeTableEntryIdComptimeInt: - case TypeTableEntryIdUndefined: - case TypeTableEntryIdNull: - case TypeTableEntryIdErrorSet: - case TypeTableEntryIdEnum: - case TypeTableEntryIdFn: - case TypeTableEntryIdNamespace: - case TypeTableEntryIdBlock: - case TypeTableEntryIdBoundFn: - case TypeTableEntryIdMetaType: - case TypeTableEntryIdOpaque: - case TypeTableEntryIdPromise: - return false; - } - zig_unreachable(); -} void init_const_undefined(CodeGen *g, ConstExprValue *const_val) { TypeTableEntry *wanted_type = const_val->type; diff --git a/src/analyze.hpp b/src/analyze.hpp index 100f85d4d9..88e06b2390 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -93,7 +93,6 @@ void ensure_complete_type(CodeGen *g, TypeTableEntry *type_entry); void type_ensure_zero_bits_known(CodeGen *g, TypeTableEntry *type_entry); void complete_enum(CodeGen *g, TypeTableEntry *enum_type); bool ir_get_var_is_comptime(VariableTableEntry *var); -bool contains_comptime_undefined_value(ConstExprValue *value); bool const_values_equal(ConstExprValue *a, ConstExprValue *b); void eval_min_max_value(CodeGen *g, TypeTableEntry *type_entry, ConstExprValue *const_val, bool is_max); void eval_min_max_value_int(CodeGen *g, TypeTableEntry *int_type, BigInt *bigint, bool is_max); diff --git a/src/ir.cpp b/src/ir.cpp index 2cce4a5044..c6078e755d 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -9148,15 +9148,8 @@ enum UndefAllowed { static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, UndefAllowed undef_allowed) { switch (value->value.special) { - case ConstValSpecialStatic: { - ConstExprValue *res = &value->value; - if (undef_allowed == UndefBad && contains_comptime_undefined_value(res)) { - ir_add_error(ira, value, buf_sprintf("use of undefined value")); - return nullptr; - } - - return res; - } + case ConstValSpecialStatic: + return &value->value; case ConstValSpecialRuntime: ir_add_error(ira, value, buf_sprintf("unable to evaluate constant expression")); return nullptr; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 8749f5b560..2247f0af96 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -4124,19 +4124,4 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { , ".tmp_source.zig:3:36: error: @ArgType could not resolve the type of arg 0 because 'fn(var)var' is generic", ); - - cases.add( - "Trying to pass undefined array to function taking comptime array by value", - \\fn a(comptime b: [2]u8) u8 { return b[0]; } - \\ - \\test "" { - \\ const arr: [2]u8 = undefined; - \\ _ = a(arr); - \\} - , - ".tmp_source.zig:5:11: error: use of undefined value", - ); - - - } -- cgit v1.2.3 From 291afcf75ab458e54a8ccd78dfd1531debfd2e40 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 3 Jul 2018 14:20:26 -0400 Subject: fix runtime libc detection depending on locale closes #1165 --- src/analyze.cpp | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 6f94deb9fd..ca582dfc4c 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4417,22 +4417,14 @@ Buf *get_linux_libc_include_path(void) { } char *prev_newline = buf_ptr(out_stderr); ZigList search_paths = {}; - bool found_search_paths = false; for (;;) { char *newline = strchr(prev_newline, '\n'); if (newline == nullptr) { - zig_panic("unable to determine libc include path: bad output from C compiler command"); + break; } *newline = 0; - if (found_search_paths) { - if (strcmp(prev_newline, "End of search list.") == 0) { - break; - } + if (prev_newline[0] == ' ') { search_paths.append(prev_newline); - } else { - if (strcmp(prev_newline, "#include <...> search starts here:") == 0) { - found_search_paths = true; - } } prev_newline = newline + 1; } -- cgit v1.2.3 From 1cf7511dc9d449473748675a5e734e81ea7c85c2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 6 Jul 2018 16:20:46 -0400 Subject: add compile error notes for where struct definitions are closes #1202 --- src/analyze.cpp | 37 +++++++++++++++++++++++++++++++++++++ src/analyze.hpp | 1 + src/ir.cpp | 47 ++++++++++++++++++++++++++++++++++++----------- test/compile_errors.zig | 42 ++++++++++++++++++++++++++++++++++++------ 4 files changed, 110 insertions(+), 17 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index ca582dfc4c..643a85634e 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -212,6 +212,43 @@ static uint8_t bits_needed_for_unsigned(uint64_t x) { return (upper >= x) ? base : (base + 1); } +AstNode *type_decl_node(TypeTableEntry *type_entry) { + switch (type_entry->id) { + case TypeTableEntryIdInvalid: + zig_unreachable(); + case TypeTableEntryIdStruct: + return type_entry->data.structure.decl_node; + case TypeTableEntryIdEnum: + return type_entry->data.enumeration.decl_node; + case TypeTableEntryIdUnion: + return type_entry->data.unionation.decl_node; + case TypeTableEntryIdOpaque: + case TypeTableEntryIdMetaType: + case TypeTableEntryIdVoid: + case TypeTableEntryIdBool: + case TypeTableEntryIdUnreachable: + case TypeTableEntryIdInt: + case TypeTableEntryIdFloat: + case TypeTableEntryIdPointer: + case TypeTableEntryIdArray: + case TypeTableEntryIdComptimeFloat: + case TypeTableEntryIdComptimeInt: + case TypeTableEntryIdUndefined: + case TypeTableEntryIdNull: + case TypeTableEntryIdOptional: + case TypeTableEntryIdErrorUnion: + case TypeTableEntryIdErrorSet: + case TypeTableEntryIdFn: + case TypeTableEntryIdNamespace: + case TypeTableEntryIdBlock: + case TypeTableEntryIdBoundFn: + case TypeTableEntryIdArgTuple: + case TypeTableEntryIdPromise: + return nullptr; + } + zig_unreachable(); +} + bool type_is_complete(TypeTableEntry *type_entry) { switch (type_entry->id) { case TypeTableEntryIdInvalid: diff --git a/src/analyze.hpp b/src/analyze.hpp index c2730197e2..5168509fe0 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -202,5 +202,6 @@ uint32_t get_coro_frame_align_bytes(CodeGen *g); bool fn_type_can_fail(FnTypeId *fn_type_id); bool type_can_fail(TypeTableEntry *type_entry); bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type); +AstNode *type_decl_node(TypeTableEntry *type_entry); #endif diff --git a/src/ir.cpp b/src/ir.cpp index 204ebb332a..3ad7c77645 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -82,6 +82,7 @@ struct ConstCastSliceMismatch; struct ConstCastErrUnionErrSetMismatch; struct ConstCastErrUnionPayloadMismatch; struct ConstCastErrSetMismatch; +struct ConstCastTypeMismatch; struct ConstCastOnly { ConstCastResultId id; @@ -92,6 +93,7 @@ struct ConstCastOnly { ConstCastOptionalMismatch *optional; ConstCastErrUnionPayloadMismatch *error_union_payload; ConstCastErrUnionErrSetMismatch *error_union_error_set; + ConstCastTypeMismatch *type_mismatch; ConstCastOnly *return_type; ConstCastOnly *async_allocator_type; ConstCastOnly *null_wrap_ptr_child; @@ -100,6 +102,11 @@ struct ConstCastOnly { } data; }; +struct ConstCastTypeMismatch { + TypeTableEntry *wanted_type; + TypeTableEntry *actual_type; +}; + struct ConstCastOptionalMismatch { ConstCastOnly child; TypeTableEntry *wanted_child; @@ -8128,15 +8135,7 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry } // pointer const - if (wanted_type->id == TypeTableEntryIdPointer && - actual_type->id == TypeTableEntryIdPointer && - (actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) && - (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) && - (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) && - actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset && - actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count && - actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment) - { + if (wanted_type->id == TypeTableEntryIdPointer && actual_type->id == TypeTableEntryIdPointer) { ConstCastOnly child = types_match_const_cast_only(ira, wanted_type->data.pointer.child_type, actual_type->data.pointer.child_type, source_node, !wanted_type->data.pointer.is_const); if (child.id != ConstCastResultIdOk) { @@ -8145,8 +8144,17 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry result.data.pointer_mismatch->child = child; result.data.pointer_mismatch->wanted_child = wanted_type->data.pointer.child_type; result.data.pointer_mismatch->actual_child = actual_type->data.pointer.child_type; + return result; + } + if ((actual_type->data.pointer.ptr_len == wanted_type->data.pointer.ptr_len) && + (!actual_type->data.pointer.is_const || wanted_type->data.pointer.is_const) && + (!actual_type->data.pointer.is_volatile || wanted_type->data.pointer.is_volatile) && + actual_type->data.pointer.bit_offset == wanted_type->data.pointer.bit_offset && + actual_type->data.pointer.unaligned_bit_count == wanted_type->data.pointer.unaligned_bit_count && + actual_type->data.pointer.alignment >= wanted_type->data.pointer.alignment) + { + return result; } - return result; } // slice const @@ -8341,6 +8349,9 @@ static ConstCastOnly types_match_const_cast_only(IrAnalyze *ira, TypeTableEntry } result.id = ConstCastResultIdType; + result.data.type_mismatch = allocate_nonzero(1); + result.data.type_mismatch->wanted_type = wanted_type; + result.data.type_mismatch->actual_type = actual_type; return result; } @@ -10154,6 +10165,21 @@ static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCa report_recursive_error(ira, source_node, &cast_result->data.error_union_payload->child, msg); break; } + case ConstCastResultIdType: { + AstNode *wanted_decl_node = type_decl_node(cast_result->data.type_mismatch->wanted_type); + AstNode *actual_decl_node = type_decl_node(cast_result->data.type_mismatch->actual_type); + if (wanted_decl_node != nullptr) { + add_error_note(ira->codegen, parent_msg, wanted_decl_node, + buf_sprintf("%s declared here", + buf_ptr(&cast_result->data.type_mismatch->wanted_type->name))); + } + if (actual_decl_node != nullptr) { + add_error_note(ira->codegen, parent_msg, actual_decl_node, + buf_sprintf("%s declared here", + buf_ptr(&cast_result->data.type_mismatch->actual_type->name))); + } + break; + } case ConstCastResultIdFnAlign: // TODO case ConstCastResultIdFnCC: // TODO case ConstCastResultIdFnVarArgs: // TODO @@ -10163,7 +10189,6 @@ static void report_recursive_error(IrAnalyze *ira, AstNode *source_node, ConstCa case ConstCastResultIdFnGenericArgCount: // TODO case ConstCastResultIdFnArg: // TODO case ConstCastResultIdFnArgNoAlias: // TODO - case ConstCastResultIdType: // TODO case ConstCastResultIdUnresolvedInferredErrSet: // TODO case ConstCastResultIdAsyncAllocatorType: // TODO case ConstCastResultIdNullWrapPtr: // TODO diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 8bd5480395..d508c7c36c 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,40 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.addCase(x: { + const tc = cases.create( + "wrong same named struct", + \\const a = @import("a.zig"); + \\const b = @import("b.zig"); + \\ + \\export fn entry() void { + \\ var a1: a.Foo = undefined; + \\ bar(&a1); + \\} + \\ + \\fn bar(x: *b.Foo) void {} + , + ".tmp_source.zig:6:10: error: expected type '*Foo', found '*Foo'", + ".tmp_source.zig:6:10: note: pointer type child 'Foo' cannot cast into pointer type child 'Foo'", + "a.zig:1:17: note: Foo declared here", + "b.zig:1:17: note: Foo declared here", + ); + + tc.addSourceFile("a.zig", + \\pub const Foo = struct { + \\ x: i32, + \\}; + ); + + tc.addSourceFile("b.zig", + \\pub const Foo = struct { + \\ z: f64, + \\}; + ); + + break :x tc; + }); + cases.add( "enum field value references enum", \\pub const Foo = extern enum { @@ -358,9 +392,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { ".tmp_source.zig:3:14: note: other value is here", ); - - cases.add( - "invalid cast from integral type to enum", + cases.add("invalid cast from integral type to enum", \\const E = enum(usize) { One, Two }; \\ \\export fn entry() void { @@ -372,9 +404,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ E.One => {}, \\ } \\} - , - ".tmp_source.zig:9:10: error: expected type 'usize', found 'E'" - ); + , ".tmp_source.zig:9:10: error: expected type 'usize', found 'E'"); cases.add( "range operator in switch used on error set", -- cgit v1.2.3 From d8295c188946b0f07d62420c2f08c940f70b03ac Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 7 Jul 2018 00:25:32 -0400 Subject: add @popCount intrinsic --- doc/langref.html.in | 15 +++++++++-- src/all_types.hpp | 12 +++++++++ src/analyze.cpp | 4 +++ src/bigint.cpp | 31 ++++++++++++++++++++++ src/bigint.hpp | 2 ++ src/codegen.cpp | 21 ++++++++++++++- src/ir.cpp | 68 +++++++++++++++++++++++++++++++++++++++++++++++++ src/ir_print.cpp | 9 +++++++ test/behavior.zig | 7 ++--- test/cases/popcount.zig | 24 +++++++++++++++++ test/compile_errors.zig | 18 +++++++++++++ 11 files changed, 205 insertions(+), 6 deletions(-) create mode 100644 test/cases/popcount.zig (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 5c1cc130ac..8eaffb64ad 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -5013,7 +5013,7 @@ comptime {

      If x is zero, @clz returns T.bit_count.

      - + {#see_also|@ctz|@popCount#} {#header_close#} {#header_open|@cmpxchgStrong#}
      @cmpxchgStrong(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T
      @@ -5149,6 +5149,7 @@ test "main" {

      If x is zero, @ctz returns T.bit_count.

      + {#see_also|@clz|@popCount#} {#header_close#} {#header_open|@divExact#}
      @divExact(numerator: T, denominator: T) T
      @@ -5631,6 +5632,16 @@ test "call foo" {
    {#see_also|Root Source File#} {#header_close#} + {#header_open|@popCount#} +
    @popCount(integer: var) var
    +

    Counts the number of bits set in an integer.

    +

    + If integer is known at {#link|comptime#}, the return type is comptime_int. + Otherwise, the return type is an unsigned integer with the minimum number + of bits that can represent the bit count of the integer type. +

    + {#see_also|@ctz|@clz#} + {#header_close#} {#header_open|@ptrCast#}
    @ptrCast(comptime DestType: type, value: var) DestType

    @@ -7337,7 +7348,7 @@ hljs.registerLanguage("zig", function(t) { a = t.IR + "\\s*\\(", c = { keyword: "const align var extern stdcallcc nakedcc volatile export pub noalias inline struct packed enum union break return try catch test continue unreachable comptime and or asm defer errdefer if else switch while for fn use bool f32 f64 void type noreturn error i8 u8 i16 u16 i32 u32 i64 u64 isize usize i8w u8w i16w i32w u32w i64w u64w isizew usizew c_short c_ushort c_int c_uint c_long c_ulong c_longlong c_ulonglong resume cancel await async orelse", - built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum", + built_in: "atomicLoad breakpoint returnAddress frameAddress fieldParentPtr setFloatMode IntType OpaqueType compileError compileLog setCold setRuntimeSafety setEvalBranchQuota offsetOf memcpy inlineCall setGlobalLinkage divTrunc divFloor enumTagName intToPtr ptrToInt panic ptrCast intCast floatCast intToFloat floatToInt boolToInt bytesToSlice sliceToBytes errSetCast bitCast rem mod memset sizeOf alignOf alignCast maxValue minValue memberCount memberName memberType typeOf addWithOverflow subWithOverflow mulWithOverflow shlWithOverflow shlExact shrExact cInclude cDefine cUndef ctz clz popCount import cImport errorName embedFile cmpxchgStrong cmpxchgWeak fence divExact truncate atomicRmw sqrt field typeInfo typeName newStackCall errorToInt intToError enumToInt intToEnum", literal: "true false null undefined" }, n = [e, t.CLCM, t.CBCM, s, r]; diff --git a/src/all_types.hpp b/src/all_types.hpp index 4d97be468c..6dcf1894d8 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1352,6 +1352,7 @@ enum BuiltinFnId { BuiltinFnIdCompileLog, BuiltinFnIdCtz, BuiltinFnIdClz, + BuiltinFnIdPopCount, BuiltinFnIdImport, BuiltinFnIdCImport, BuiltinFnIdErrName, @@ -1477,6 +1478,7 @@ bool type_id_eql(TypeId a, TypeId b); enum ZigLLVMFnId { ZigLLVMFnIdCtz, ZigLLVMFnIdClz, + ZigLLVMFnIdPopCount, ZigLLVMFnIdOverflowArithmetic, ZigLLVMFnIdFloor, ZigLLVMFnIdCeil, @@ -1499,6 +1501,9 @@ struct ZigLLVMFnKey { struct { uint32_t bit_count; } clz; + struct { + uint32_t bit_count; + } pop_count; struct { uint32_t bit_count; } floating; @@ -2050,6 +2055,7 @@ enum IrInstructionId { IrInstructionIdUnionTag, IrInstructionIdClz, IrInstructionIdCtz, + IrInstructionIdPopCount, IrInstructionIdImport, IrInstructionIdCImport, IrInstructionIdCInclude, @@ -2545,6 +2551,12 @@ struct IrInstructionClz { IrInstruction *value; }; +struct IrInstructionPopCount { + IrInstruction base; + + IrInstruction *value; +}; + struct IrInstructionUnionTag { IrInstruction base; diff --git a/src/analyze.cpp b/src/analyze.cpp index 643a85634e..9b60f7374a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -5976,6 +5976,8 @@ uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) { return (uint32_t)(x.data.ctz.bit_count) * (uint32_t)810453934; case ZigLLVMFnIdClz: return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817; + case ZigLLVMFnIdPopCount: + return (uint32_t)(x.data.clz.bit_count) * (uint32_t)101195049; case ZigLLVMFnIdFloor: return (uint32_t)(x.data.floating.bit_count) * (uint32_t)1899859168; case ZigLLVMFnIdCeil: @@ -5998,6 +6000,8 @@ bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) { return a.data.ctz.bit_count == b.data.ctz.bit_count; case ZigLLVMFnIdClz: return a.data.clz.bit_count == b.data.clz.bit_count; + case ZigLLVMFnIdPopCount: + return a.data.pop_count.bit_count == b.data.pop_count.bit_count; case ZigLLVMFnIdFloor: case ZigLLVMFnIdCeil: case ZigLLVMFnIdSqrt: diff --git a/src/bigint.cpp b/src/bigint.cpp index bb227a7c3d..bf18b9a1bf 100644 --- a/src/bigint.cpp +++ b/src/bigint.cpp @@ -1593,6 +1593,37 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base) { } } +size_t bigint_popcount_unsigned(const BigInt *bi) { + assert(!bi->is_negative); + if (bi->digit_count == 0) + return 0; + + size_t count = 0; + size_t bit_count = bi->digit_count * 64; + for (size_t i = 0; i < bit_count; i += 1) { + if (bit_at_index(bi, i)) + count += 1; + } + return count; +} + +size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count) { + if (bit_count == 0) + return 0; + if (bi->digit_count == 0) + return 0; + + BigInt twos_comp = {0}; + to_twos_complement(&twos_comp, bi, bit_count); + + size_t count = 0; + for (size_t i = 0; i < bit_count; i += 1) { + if (bit_at_index(&twos_comp, i)) + count += 1; + } + return count; +} + size_t bigint_ctz(const BigInt *bi, size_t bit_count) { if (bit_count == 0) return 0; diff --git a/src/bigint.hpp b/src/bigint.hpp index 9f044c8722..48b222a227 100644 --- a/src/bigint.hpp +++ b/src/bigint.hpp @@ -81,6 +81,8 @@ void bigint_append_buf(Buf *buf, const BigInt *op, uint64_t base); size_t bigint_ctz(const BigInt *bi, size_t bit_count); size_t bigint_clz(const BigInt *bi, size_t bit_count); +size_t bigint_popcount_signed(const BigInt *bi, size_t bit_count); +size_t bigint_popcount_unsigned(const BigInt *bi); size_t bigint_bits_needed(const BigInt *op); diff --git a/src/codegen.cpp b/src/codegen.cpp index 26ee106959..54e2da7d61 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3426,14 +3426,22 @@ static LLVMValueRef ir_render_unwrap_maybe(CodeGen *g, IrExecutable *executable, static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, BuiltinFnId fn_id) { ZigLLVMFnKey key = {}; const char *fn_name; + uint32_t n_args; if (fn_id == BuiltinFnIdCtz) { fn_name = "cttz"; + n_args = 2; key.id = ZigLLVMFnIdCtz; key.data.ctz.bit_count = (uint32_t)int_type->data.integral.bit_count; } else if (fn_id == BuiltinFnIdClz) { fn_name = "ctlz"; + n_args = 2; key.id = ZigLLVMFnIdClz; key.data.clz.bit_count = (uint32_t)int_type->data.integral.bit_count; + } else if (fn_id == BuiltinFnIdPopCount) { + fn_name = "ctpop"; + n_args = 1; + key.id = ZigLLVMFnIdPopCount; + key.data.pop_count.bit_count = (uint32_t)int_type->data.integral.bit_count; } else { zig_unreachable(); } @@ -3448,7 +3456,7 @@ static LLVMValueRef get_int_builtin_fn(CodeGen *g, TypeTableEntry *int_type, Bui int_type->type_ref, LLVMInt1Type(), }; - LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, 2, false); + LLVMTypeRef fn_type = LLVMFunctionType(int_type->type_ref, param_types, n_args, false); LLVMValueRef fn_val = LLVMAddFunction(g->module, llvm_name, fn_type); assert(LLVMGetIntrinsicID(fn_val)); @@ -3481,6 +3489,14 @@ static LLVMValueRef ir_render_ctz(CodeGen *g, IrExecutable *executable, IrInstru return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int); } +static LLVMValueRef ir_render_pop_count(CodeGen *g, IrExecutable *executable, IrInstructionPopCount *instruction) { + TypeTableEntry *int_type = instruction->value->value.type; + LLVMValueRef fn_val = get_int_builtin_fn(g, int_type, BuiltinFnIdPopCount); + LLVMValueRef operand = ir_llvm_value(g, instruction->value); + LLVMValueRef wrong_size_int = LLVMBuildCall(g->builder, fn_val, &operand, 1, ""); + return gen_widen_or_shorten(g, false, int_type, instruction->base.value.type, wrong_size_int); +} + static LLVMValueRef ir_render_switch_br(CodeGen *g, IrExecutable *executable, IrInstructionSwitchBr *instruction) { LLVMValueRef target_value = ir_llvm_value(g, instruction->target_value); LLVMBasicBlockRef else_block = instruction->else_block->llvm_block; @@ -4831,6 +4847,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_clz(g, executable, (IrInstructionClz *)instruction); case IrInstructionIdCtz: return ir_render_ctz(g, executable, (IrInstructionCtz *)instruction); + case IrInstructionIdPopCount: + return ir_render_pop_count(g, executable, (IrInstructionPopCount *)instruction); case IrInstructionIdSwitchBr: return ir_render_switch_br(g, executable, (IrInstructionSwitchBr *)instruction); case IrInstructionIdPhi: @@ -6342,6 +6360,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdCUndef, "cUndef", 1); create_builtin_fn(g, BuiltinFnIdCtz, "ctz", 1); create_builtin_fn(g, BuiltinFnIdClz, "clz", 1); + create_builtin_fn(g, BuiltinFnIdPopCount, "popCount", 1); create_builtin_fn(g, BuiltinFnIdImport, "import", 1); create_builtin_fn(g, BuiltinFnIdCImport, "cImport", 1); create_builtin_fn(g, BuiltinFnIdErrName, "errorName", 1); diff --git a/src/ir.cpp b/src/ir.cpp index 3ad7c77645..98b1bd85ad 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -427,6 +427,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCtz *) { return IrInstructionIdCtz; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionPopCount *) { + return IrInstructionIdPopCount; +} + static constexpr IrInstructionId ir_instruction_id(IrInstructionUnionTag *) { return IrInstructionIdUnionTag; } @@ -1725,6 +1729,15 @@ static IrInstruction *ir_build_ctz_from(IrBuilder *irb, IrInstruction *old_instr return new_instruction; } +static IrInstruction *ir_build_pop_count(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *value) { + IrInstructionPopCount *instruction = ir_build_instruction(irb, scope, source_node); + instruction->value = value; + + ir_ref_instruction(value, irb->current_basic_block); + + return &instruction->base; +} + static IrInstruction *ir_build_switch_br(IrBuilder *irb, Scope *scope, AstNode *source_node, IrInstruction *target_value, IrBasicBlock *else_block, size_t case_count, IrInstructionSwitchBrCase *cases, IrInstruction *is_comptime, IrInstruction *switch_prongs_void) @@ -3841,6 +3854,16 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo IrInstruction *ctz = ir_build_ctz(irb, scope, node, arg0_value); return ir_lval_wrap(irb, scope, ctz, lval); } + case BuiltinFnIdPopCount: + { + AstNode *arg0_node = node->data.fn_call_expr.params.at(0); + IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope); + if (arg0_value == irb->codegen->invalid_instruction) + return arg0_value; + + IrInstruction *instr = ir_build_pop_count(irb, scope, node, arg0_value); + return ir_lval_wrap(irb, scope, instr, lval); + } case BuiltinFnIdClz: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); @@ -15275,6 +15298,48 @@ static TypeTableEntry *ir_analyze_instruction_clz(IrAnalyze *ira, IrInstructionC } } +static TypeTableEntry *ir_analyze_instruction_pop_count(IrAnalyze *ira, IrInstructionPopCount *instruction) { + IrInstruction *value = instruction->value->other; + if (type_is_invalid(value->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + if (value->value.type->id != TypeTableEntryIdInt && value->value.type->id != TypeTableEntryIdComptimeInt) { + ir_add_error(ira, value, + buf_sprintf("expected integer type, found '%s'", buf_ptr(&value->value.type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + + if (instr_is_comptime(value)) { + ConstExprValue *val = ir_resolve_const(ira, value, UndefBad); + if (!val) + return ira->codegen->builtin_types.entry_invalid; + if (bigint_cmp_zero(&val->data.x_bigint) != CmpLT) { + size_t result = bigint_popcount_unsigned(&val->data.x_bigint); + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + bigint_init_unsigned(&out_val->data.x_bigint, result); + return ira->codegen->builtin_types.entry_num_lit_int; + } + if (value->value.type->id == TypeTableEntryIdComptimeInt) { + Buf *val_buf = buf_alloc(); + bigint_append_buf(val_buf, &val->data.x_bigint, 10); + ir_add_error(ira, &instruction->base, + buf_sprintf("@popCount on negative %s value %s", + buf_ptr(&value->value.type->name), buf_ptr(val_buf))); + return ira->codegen->builtin_types.entry_invalid; + } + size_t result = bigint_popcount_signed(&val->data.x_bigint, value->value.type->data.integral.bit_count); + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + bigint_init_unsigned(&out_val->data.x_bigint, result); + return ira->codegen->builtin_types.entry_num_lit_int; + } + + IrInstruction *result = ir_build_pop_count(&ira->new_irb, instruction->base.scope, + instruction->base.source_node, value); + result->value.type = get_smallest_unsigned_int_type(ira->codegen, value->value.type->data.integral.bit_count); + ir_link_new_instruction(result, &instruction->base); + return result->value.type; +} + static IrInstruction *ir_analyze_union_tag(IrAnalyze *ira, IrInstruction *source_instr, IrInstruction *value) { if (type_is_invalid(value->value.type)) return ira->codegen->invalid_instruction; @@ -20534,6 +20599,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_clz(ira, (IrInstructionClz *)instruction); case IrInstructionIdCtz: return ir_analyze_instruction_ctz(ira, (IrInstructionCtz *)instruction); + case IrInstructionIdPopCount: + return ir_analyze_instruction_pop_count(ira, (IrInstructionPopCount *)instruction); case IrInstructionIdSwitchBr: return ir_analyze_instruction_switch_br(ira, (IrInstructionSwitchBr *)instruction); case IrInstructionIdSwitchTarget: @@ -20892,6 +20959,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdUnwrapOptional: case IrInstructionIdClz: case IrInstructionIdCtz: + case IrInstructionIdPopCount: case IrInstructionIdSwitchVar: case IrInstructionIdSwitchTarget: case IrInstructionIdUnionTag: diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 5e5a71382c..780cf9e756 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -501,6 +501,12 @@ static void ir_print_ctz(IrPrint *irp, IrInstructionCtz *instruction) { fprintf(irp->f, ")"); } +static void ir_print_pop_count(IrPrint *irp, IrInstructionPopCount *instruction) { + fprintf(irp->f, "@popCount("); + ir_print_other_instruction(irp, instruction->value); + fprintf(irp->f, ")"); +} + static void ir_print_switch_br(IrPrint *irp, IrInstructionSwitchBr *instruction) { fprintf(irp->f, "switch ("); ir_print_other_instruction(irp, instruction->target_value); @@ -1425,6 +1431,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdCtz: ir_print_ctz(irp, (IrInstructionCtz *)instruction); break; + case IrInstructionIdPopCount: + ir_print_pop_count(irp, (IrInstructionPopCount *)instruction); + break; case IrInstructionIdClz: ir_print_clz(irp, (IrInstructionClz *)instruction); break; diff --git a/test/behavior.zig b/test/behavior.zig index d47eb8fd6c..450dded56c 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -8,17 +8,17 @@ comptime { _ = @import("cases/atomics.zig"); _ = @import("cases/bitcast.zig"); _ = @import("cases/bool.zig"); + _ = @import("cases/bugs/1111.zig"); _ = @import("cases/bugs/394.zig"); _ = @import("cases/bugs/655.zig"); _ = @import("cases/bugs/656.zig"); _ = @import("cases/bugs/828.zig"); _ = @import("cases/bugs/920.zig"); - _ = @import("cases/bugs/1111.zig"); _ = @import("cases/byval_arg_var.zig"); _ = @import("cases/cast.zig"); _ = @import("cases/const_slice_child.zig"); - _ = @import("cases/coroutines.zig"); _ = @import("cases/coroutine_await_struct.zig"); + _ = @import("cases/coroutines.zig"); _ = @import("cases/defer.zig"); _ = @import("cases/enum.zig"); _ = @import("cases/enum_with_members.zig"); @@ -36,11 +36,12 @@ comptime { _ = @import("cases/math.zig"); _ = @import("cases/merge_error_sets.zig"); _ = @import("cases/misc.zig"); - _ = @import("cases/optional.zig"); _ = @import("cases/namespace_depends_on_compile_var/index.zig"); _ = @import("cases/new_stack_call.zig"); _ = @import("cases/null.zig"); + _ = @import("cases/optional.zig"); _ = @import("cases/pointers.zig"); + _ = @import("cases/popcount.zig"); _ = @import("cases/pub_enum/index.zig"); _ = @import("cases/ref_var_in_if_after_if_2nd_switch_prong.zig"); _ = @import("cases/reflection.zig"); diff --git a/test/cases/popcount.zig b/test/cases/popcount.zig new file mode 100644 index 0000000000..7dc7f28c0e --- /dev/null +++ b/test/cases/popcount.zig @@ -0,0 +1,24 @@ +const assert = @import("std").debug.assert; + +test "@popCount" { + comptime testPopCount(); + testPopCount(); +} + +fn testPopCount() void { + { + var x: u32 = 0xaa; + assert(@popCount(x) == 4); + } + { + var x: u32 = 0xaaaaaaaa; + assert(@popCount(x) == 16); + } + { + var x: i16 = -1; + assert(@popCount(x) == 16); + } + comptime { + assert(@popCount(0b11111111000110001100010000100001000011000011100101010001) == 24); + } +} diff --git a/test/compile_errors.zig b/test/compile_errors.zig index d508c7c36c..9071f0ad7e 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,24 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "@popCount - non-integer", + \\export fn entry(x: f32) u32 { + \\ return @popCount(x); + \\} + , + ".tmp_source.zig:2:22: error: expected integer type, found 'f32'", + ); + + cases.add( + "@popCount - negative comptime_int", + \\comptime { + \\ _ = @popCount(-1); + \\} + , + ".tmp_source.zig:2:9: error: @popCount on negative comptime_int value -1", + ); + cases.addCase(x: { const tc = cases.create( "wrong same named struct", -- cgit v1.2.3 From 3f30897fdcdb6c5579bc5609dda9746f67551870 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 11 Jul 2018 13:23:37 -0400 Subject: add compile error for disallowed types in extern structs closes #1218 --- src/analyze.cpp | 23 ++++++++++++++++++++--- std/c/darwin.zig | 2 +- test/compile_errors.zig | 27 +++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 4 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 9b60f7374a..5635cce411 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1430,10 +1430,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdBoundFn: case TypeTableEntryIdArgTuple: case TypeTableEntryIdPromise: + case TypeTableEntryIdVoid: return false; case TypeTableEntryIdOpaque: case TypeTableEntryIdUnreachable: - case TypeTableEntryIdVoid: case TypeTableEntryIdBool: return true; case TypeTableEntryIdInt: @@ -1460,7 +1460,10 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdOptional: { TypeTableEntry *child_type = type_entry->data.maybe.child_type; - return child_type->id == TypeTableEntryIdPointer || child_type->id == TypeTableEntryIdFn; + if (child_type->id != TypeTableEntryIdPointer && child_type->id != TypeTableEntryIdFn) { + return false; + } + return type_allowed_in_extern(g, child_type); } case TypeTableEntryIdEnum: return type_entry->data.enumeration.layout == ContainerLayoutExtern || type_entry->data.enumeration.layout == ContainerLayoutPacked; @@ -1637,7 +1640,10 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c fn_type_id.return_type = specified_return_type; } - if (!calling_convention_allows_zig_types(fn_type_id.cc) && !type_allowed_in_extern(g, fn_type_id.return_type)) { + if (!calling_convention_allows_zig_types(fn_type_id.cc) && + fn_type_id.return_type->id != TypeTableEntryIdVoid && + !type_allowed_in_extern(g, fn_type_id.return_type)) + { add_node_error(g, fn_proto->return_type, buf_sprintf("return type '%s' not allowed in function with calling convention '%s'", buf_ptr(&fn_type_id.return_type->name), @@ -1939,6 +1945,17 @@ static void resolve_struct_type(CodeGen *g, TypeTableEntry *struct_type) { break; } + if (struct_type->data.structure.layout == ContainerLayoutExtern) { + if (!type_allowed_in_extern(g, field_type)) { + AstNode *field_source_node = decl_node->data.container_decl.fields.at(i); + add_node_error(g, field_source_node, + buf_sprintf("extern structs cannot contain fields of type '%s'", + buf_ptr(&field_type->name))); + struct_type->data.structure.is_invalid = true; + break; + } + } + if (!type_has_bits(field_type)) continue; diff --git a/std/c/darwin.zig b/std/c/darwin.zig index 133ef62f05..4189dfeadc 100644 --- a/std/c/darwin.zig +++ b/std/c/darwin.zig @@ -44,7 +44,7 @@ pub const timezone = extern struct { tz_dsttime: i32, }; -pub const mach_timebase_info_data = struct { +pub const mach_timebase_info_data = extern struct { numer: u32, denom: u32, }; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index a6db8d50b4..58c73b8ae4 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,33 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "optional pointer to void in extern struct", + \\const Foo = extern struct { + \\ x: ?*const void, + \\}; + \\const Bar = extern struct { + \\ foo: Foo, + \\ y: i32, + \\}; + \\export fn entry(bar: *Bar) void {} + , + ".tmp_source.zig:2:5: error: extern structs cannot contain fields of type '?*const void'", + ); + + cases.add( + "use of comptime-known undefined function value", + \\const Cmd = struct { + \\ exec: fn () void, + \\}; + \\export fn entry() void { + \\ const command = Cmd{ .exec = undefined }; + \\ command.exec(); + \\} + , + ".tmp_source.zig:6:12: error: use of undefined value", + ); + cases.add( "use of comptime-known undefined function value", \\const Cmd = struct { -- cgit v1.2.3 From e9a03cccf375f11aa4e0a8a3515e499c88d05cde Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 16 Jul 2018 10:53:15 -0400 Subject: all integer sizes are available as primitives * fix wrong implicit cast for `@IntType` bit_count parameter. * fix incorrect docs for `@IntType` bit_count parameter. closes #1242 closes #745 closes #1240 --- doc/langref.html.in | 12 ++++---- src/all_types.hpp | 4 --- src/analyze.cpp | 73 +++++++++++++++++++++---------------------------- src/analyze.hpp | 3 +- src/codegen.cpp | 13 --------- src/ir.cpp | 21 ++++++++------ src/translate_c.cpp | 2 +- std/buffer.zig | 2 -- std/crypto/sha1.zig | 2 -- std/json.zig | 3 -- std/math/big/int.zig | 1 - std/math/exp2.zig | 24 ++++++++-------- std/math/index.zig | 2 +- std/os/time.zig | 1 - test/cases/misc.zig | 5 ---- test/cases/struct.zig | 1 - test/compile_errors.zig | 9 ++++++ 17 files changed, 74 insertions(+), 104 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index ea672ccb17..46b325832b 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2310,11 +2310,11 @@ test "while loop continue expression" { } test "while loop continue expression, more complicated" { - var i1: usize = 1; - var j1: usize = 1; - while (i1 * j1 < 2000) : ({ i1 *= 2; j1 *= 3; }) { - const my_ij1 = i1 * j1; - assert(my_ij1 < 2000); + var i: usize = 1; + var j: usize = 1; + while (i * j < 2000) : ({ i *= 2; j *= 3; }) { + const my_ij = i * j; + assert(my_ij < 2000); } } {#code_end#} @@ -5424,7 +5424,7 @@ fn add(a: i32, b: i32) i32 { return a + b; } {#header_close#} {#header_open|@IntType#} -

    @IntType(comptime is_signed: bool, comptime bit_count: u8) type
    +
    @IntType(comptime is_signed: bool, comptime bit_count: u32) type

    This function returns an integer type with the given signness and bit count.

    diff --git a/src/all_types.hpp b/src/all_types.hpp index 2da0677e1b..bcd6a04cc3 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1587,7 +1587,6 @@ struct CodeGen { struct { TypeTableEntry *entry_bool; - TypeTableEntry *entry_int[2][12]; // [signed,unsigned][2,3,4,5,6,7,8,16,29,32,64,128] TypeTableEntry *entry_c_int[CIntTypeCount]; TypeTableEntry *entry_c_longdouble; TypeTableEntry *entry_c_void; @@ -1596,12 +1595,9 @@ struct CodeGen { TypeTableEntry *entry_u32; TypeTableEntry *entry_u29; TypeTableEntry *entry_u64; - TypeTableEntry *entry_u128; TypeTableEntry *entry_i8; - TypeTableEntry *entry_i16; TypeTableEntry *entry_i32; TypeTableEntry *entry_i64; - TypeTableEntry *entry_i128; TypeTableEntry *entry_isize; TypeTableEntry *entry_usize; TypeTableEntry *entry_f16; diff --git a/src/analyze.cpp b/src/analyze.cpp index 5635cce411..2ace893508 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3227,9 +3227,8 @@ static void add_top_level_decl(CodeGen *g, ScopeDecls *decls_scope, Tld *tld) { } { - auto entry = g->primitive_type_table.maybe_get(tld->name); - if (entry) { - TypeTableEntry *type = entry->value; + TypeTableEntry *type = get_primitive_type(g, tld->name); + if (type != nullptr) { add_node_error(g, tld->source_node, buf_sprintf("declaration shadows type '%s'", buf_ptr(&type->name))); } @@ -3474,9 +3473,8 @@ VariableTableEntry *add_variable(CodeGen *g, AstNode *source_node, Scope *parent add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here")); variable_entry->value->type = g->builtin_types.entry_invalid; } else { - auto primitive_table_entry = g->primitive_type_table.maybe_get(name); - if (primitive_table_entry) { - TypeTableEntry *type = primitive_table_entry->value; + TypeTableEntry *type = get_primitive_type(g, name); + if (type != nullptr) { add_node_error(g, source_node, buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name))); variable_entry->value->type = g->builtin_types.entry_invalid; @@ -4307,43 +4305,7 @@ void semantic_analyze(CodeGen *g) { } } -TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits) { - size_t index; - if (size_in_bits == 2) { - index = 0; - } else if (size_in_bits == 3) { - index = 1; - } else if (size_in_bits == 4) { - index = 2; - } else if (size_in_bits == 5) { - index = 3; - } else if (size_in_bits == 6) { - index = 4; - } else if (size_in_bits == 7) { - index = 5; - } else if (size_in_bits == 8) { - index = 6; - } else if (size_in_bits == 16) { - index = 7; - } else if (size_in_bits == 29) { - index = 8; - } else if (size_in_bits == 32) { - index = 9; - } else if (size_in_bits == 64) { - index = 10; - } else if (size_in_bits == 128) { - index = 11; - } else { - return nullptr; - } - return &g->builtin_types.entry_int[is_signed ? 0 : 1][index]; -} - TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) { - TypeTableEntry **common_entry = get_int_type_ptr(g, is_signed, size_in_bits); - if (common_entry) - return *common_entry; - TypeId type_id = {}; type_id.id = TypeTableEntryIdInt; type_id.data.integer.is_signed = is_signed; @@ -4953,6 +4915,8 @@ bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type) { while (scope) { if (scope->id == ScopeIdVarDecl) { ScopeVarDecl *var_scope = (ScopeVarDecl *)scope; + if (type_is_invalid(var_scope->var->value->type)) + return false; if (can_mutate_comptime_var_state(var_scope->var->value)) return false; } else if (scope->id == ScopeIdFnDef) { @@ -6310,3 +6274,28 @@ bool type_can_fail(TypeTableEntry *type_entry) { bool fn_type_can_fail(FnTypeId *fn_type_id) { return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync; } + +TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name) { + if (buf_len(name) >= 2) { + uint8_t first_c = buf_ptr(name)[0]; + if (first_c == 'i' || first_c == 'u') { + for (size_t i = 1; i < buf_len(name); i += 1) { + uint8_t c = buf_ptr(name)[i]; + if (c < '0' || c > '9') { + goto not_integer; + } + } + bool is_signed = (first_c == 'i'); + uint32_t bit_count = atoi(buf_ptr(name) + 1); + return get_int_type(g, is_signed, bit_count); + } + } + +not_integer: + + auto primitive_table_entry = g->primitive_type_table.maybe_get(name); + if (primitive_table_entry != nullptr) { + return primitive_table_entry->value; + } + return nullptr; +} diff --git a/src/analyze.hpp b/src/analyze.hpp index 5168509fe0..e4dfae4ecb 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -19,7 +19,6 @@ TypeTableEntry *get_pointer_to_type_extra(CodeGen *g, TypeTableEntry *child_type bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count); uint64_t type_size(CodeGen *g, TypeTableEntry *type_entry); uint64_t type_size_bits(CodeGen *g, TypeTableEntry *type_entry); -TypeTableEntry **get_int_type_ptr(CodeGen *g, bool is_signed, uint32_t size_in_bits); TypeTableEntry *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits); TypeTableEntry **get_c_int_type_ptr(CodeGen *g, CIntType c_int_type); TypeTableEntry *get_c_int_type(CodeGen *g, CIntType c_int_type); @@ -204,4 +203,6 @@ bool type_can_fail(TypeTableEntry *type_entry); bool fn_eval_cacheable(Scope *scope, TypeTableEntry *return_type); AstNode *type_decl_node(TypeTableEntry *type_entry); +TypeTableEntry *get_primitive_type(CodeGen *g, Buf *name); + #endif diff --git a/src/codegen.cpp b/src/codegen.cpp index 0bcc211164..c38ae1036a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6161,16 +6161,6 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_arg_tuple = entry; } - for (size_t int_size_i = 0; int_size_i < array_length(int_sizes_in_bits); int_size_i += 1) { - uint8_t size_in_bits = int_sizes_in_bits[int_size_i]; - for (size_t is_sign_i = 0; is_sign_i < array_length(is_signed_list); is_sign_i += 1) { - bool is_signed = is_signed_list[is_sign_i]; - TypeTableEntry *entry = make_int_type(g, is_signed, size_in_bits); - g->primitive_type_table.put(&entry->name, entry); - get_int_type_ptr(g, is_signed, size_in_bits)[0] = entry; - } - } - for (size_t i = 0; i < array_length(c_int_type_infos); i += 1) { const CIntTypeInfo *info = &c_int_type_infos[i]; uint32_t size_in_bits = target_c_type_size_in_bits(&g->zig_target, info->id); @@ -6286,12 +6276,9 @@ static void define_builtin_types(CodeGen *g) { g->builtin_types.entry_u29 = get_int_type(g, false, 29); g->builtin_types.entry_u32 = get_int_type(g, false, 32); g->builtin_types.entry_u64 = get_int_type(g, false, 64); - g->builtin_types.entry_u128 = get_int_type(g, false, 128); g->builtin_types.entry_i8 = get_int_type(g, true, 8); - g->builtin_types.entry_i16 = get_int_type(g, true, 16); g->builtin_types.entry_i32 = get_int_type(g, true, 32); g->builtin_types.entry_i64 = get_int_type(g, true, 64); - g->builtin_types.entry_i128 = get_int_type(g, true, 128); { g->builtin_types.entry_c_void = get_opaque_type(g, nullptr, nullptr, "c_void"); diff --git a/src/ir.cpp b/src/ir.cpp index 3007bbcf64..0804134d2a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3217,9 +3217,8 @@ static VariableTableEntry *create_local_var(CodeGen *codegen, AstNode *node, Sco add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here")); variable_entry->value->type = codegen->builtin_types.entry_invalid; } else { - auto primitive_table_entry = codegen->primitive_type_table.maybe_get(name); - if (primitive_table_entry) { - TypeTableEntry *type = primitive_table_entry->value; + TypeTableEntry *type = get_primitive_type(codegen, name); + if (type != nullptr) { add_node_error(codegen, node, buf_sprintf("variable shadows type '%s'", buf_ptr(&type->name))); variable_entry->value->type = codegen->builtin_types.entry_invalid; @@ -3661,9 +3660,9 @@ static IrInstruction *ir_gen_symbol(IrBuilder *irb, Scope *scope, AstNode *node, return &const_instruction->base; } - auto primitive_table_entry = irb->codegen->primitive_type_table.maybe_get(variable_name); - if (primitive_table_entry) { - IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_table_entry->value); + TypeTableEntry *primitive_type = get_primitive_type(irb->codegen, variable_name); + if (primitive_type != nullptr) { + IrInstruction *value = ir_build_const_type(irb, scope, node, primitive_type); if (lval == LValPtr) { return ir_build_ref(irb, scope, node, value, false, false); } else { @@ -10691,11 +10690,11 @@ static bool ir_resolve_align(IrAnalyze *ira, IrInstruction *value, uint32_t *out return true; } -static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) { +static bool ir_resolve_unsigned(IrAnalyze *ira, IrInstruction *value, TypeTableEntry *int_type, uint64_t *out) { if (type_is_invalid(value->value.type)) return false; - IrInstruction *casted_value = ir_implicit_cast(ira, value, ira->codegen->builtin_types.entry_usize); + IrInstruction *casted_value = ir_implicit_cast(ira, value, int_type); if (type_is_invalid(casted_value->value.type)) return false; @@ -10707,6 +10706,10 @@ static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out return true; } +static bool ir_resolve_usize(IrAnalyze *ira, IrInstruction *value, uint64_t *out) { + return ir_resolve_unsigned(ira, value, ira->codegen->builtin_types.entry_usize, out); +} + static bool ir_resolve_bool(IrAnalyze *ira, IrInstruction *value, bool *out) { if (type_is_invalid(value->value.type)) return false; @@ -18025,7 +18028,7 @@ static TypeTableEntry *ir_analyze_instruction_int_type(IrAnalyze *ira, IrInstruc IrInstruction *bit_count_value = instruction->bit_count->other; uint64_t bit_count; - if (!ir_resolve_usize(ira, bit_count_value, &bit_count)) + if (!ir_resolve_unsigned(ira, bit_count_value, ira->codegen->builtin_types.entry_u32, &bit_count)) return ira->codegen->builtin_types.entry_invalid; ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); diff --git a/src/translate_c.cpp b/src/translate_c.cpp index db46d31c5b..267a716a9d 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -427,7 +427,7 @@ static AstNode *get_global(Context *c, Buf *name) { if (entry) return entry->value; } - if (c->codegen->primitive_type_table.maybe_get(name) != nullptr) { + if (get_primitive_type(c->codegen, name) != nullptr) { return trans_create_node_symbol(c, name); } return nullptr; diff --git a/std/buffer.zig b/std/buffer.zig index 0d82918580..aff7fa86ef 100644 --- a/std/buffer.zig +++ b/std/buffer.zig @@ -5,8 +5,6 @@ const Allocator = mem.Allocator; const assert = debug.assert; const ArrayList = std.ArrayList; -const fmt = std.fmt; - /// A buffer that allocates memory and maintains a null byte at the end. pub const Buffer = struct { list: ArrayList(u8), diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig index 5c91590c88..451cfb3122 100644 --- a/std/crypto/sha1.zig +++ b/std/crypto/sha1.zig @@ -4,8 +4,6 @@ const endian = @import("../endian.zig"); const debug = @import("../debug/index.zig"); const builtin = @import("builtin"); -pub const u160 = @IntType(false, 160); - const RoundParam = struct { a: usize, b: usize, diff --git a/std/json.zig b/std/json.zig index 8986034fb4..e62d5a3466 100644 --- a/std/json.zig +++ b/std/json.zig @@ -6,9 +6,6 @@ const std = @import("index.zig"); const debug = std.debug; const mem = std.mem; -const u1 = @IntType(false, 1); -const u256 = @IntType(false, 256); - // A single token slice into the parent string. // // Use `token.slice()` on the input at the current position to get the current slice. diff --git a/std/math/big/int.zig b/std/math/big/int.zig index 29673538eb..caa9d0a7ed 100644 --- a/std/math/big/int.zig +++ b/std/math/big/int.zig @@ -996,7 +996,6 @@ pub const Int = struct { // They will still run on larger than this and should pass, but the multi-limb code-paths // may be untested in some cases. -const u256 = @IntType(false, 256); const al = debug.global_allocator; test "big.int comptime_int set" { diff --git a/std/math/exp2.zig b/std/math/exp2.zig index 90ea088181..d590b0b60b 100644 --- a/std/math/exp2.zig +++ b/std/math/exp2.zig @@ -75,18 +75,18 @@ fn exp2_32(x: f32) f32 { } var uf = x + redux; - var i0 = @bitCast(u32, uf); - i0 += tblsiz / 2; + var i_0 = @bitCast(u32, uf); + i_0 += tblsiz / 2; - const k = i0 / tblsiz; + const k = i_0 / tblsiz; // NOTE: musl relies on undefined overflow shift behaviour. Appears that this produces the // intended result but should confirm how GCC/Clang handle this to ensure. const uk = @bitCast(f64, u64(0x3FF + k) << 52); - i0 &= tblsiz - 1; + i_0 &= tblsiz - 1; uf -= redux; const z: f64 = x - uf; - var r: f64 = exp2ft[i0]; + var r: f64 = exp2ft[i_0]; const t: f64 = r * z; r = r + t * (P1 + z * P2) + t * (z * z) * (P3 + z * P4); return @floatCast(f32, r * uk); @@ -401,18 +401,18 @@ fn exp2_64(x: f64) f64 { // reduce x var uf = x + redux; // NOTE: musl performs an implicit 64-bit to 32-bit u32 truncation here - var i0 = @truncate(u32, @bitCast(u64, uf)); - i0 += tblsiz / 2; + var i_0 = @truncate(u32, @bitCast(u64, uf)); + i_0 += tblsiz / 2; - const k: u32 = i0 / tblsiz * tblsiz; + const k: u32 = i_0 / tblsiz * tblsiz; const ik = @bitCast(i32, k / tblsiz); - i0 %= tblsiz; + i_0 %= tblsiz; uf -= redux; - // r = exp2(y) = exp2t[i0] * p(z - eps[i]) + // r = exp2(y) = exp2t[i_0] * p(z - eps[i]) var z = x - uf; - const t = exp2dt[2 * i0]; - z -= exp2dt[2 * i0 + 1]; + const t = exp2dt[2 * i_0]; + z -= exp2dt[2 * i_0 + 1]; const r = t + t * z * (P1 + z * (P2 + z * (P3 + z * (P4 + z * P5)))); return math.scalbn(r, ik); diff --git a/std/math/index.zig b/std/math/index.zig index 17b66f5568..e5fd0f3685 100644 --- a/std/math/index.zig +++ b/std/math/index.zig @@ -354,7 +354,7 @@ test "math.rotl" { pub fn Log2Int(comptime T: type) type { // comptime ceil log2 - comptime var count: usize = 0; + comptime var count = 0; comptime var s = T.bit_count - 1; inline while (s != 0) : (s >>= 1) { count += 1; diff --git a/std/os/time.zig b/std/os/time.zig index 73ba5bba82..795605d7a9 100644 --- a/std/os/time.zig +++ b/std/os/time.zig @@ -25,7 +25,6 @@ pub fn sleep(seconds: usize, nanoseconds: usize) void { } } -const u63 = @IntType(false, 63); pub fn posixSleep(seconds: u63, nanoseconds: u63) void { var req = posix.timespec{ .tv_sec = seconds, diff --git a/test/cases/misc.zig b/test/cases/misc.zig index 0f181a7b4e..1c0189571b 100644 --- a/test/cases/misc.zig +++ b/test/cases/misc.zig @@ -58,11 +58,6 @@ test "floating point primitive bit counts" { assert(f64.bit_count == 64); } -const u1 = @IntType(false, 1); -const u63 = @IntType(false, 63); -const i1 = @IntType(true, 1); -const i63 = @IntType(true, 63); - test "@minValue and @maxValue" { assert(@maxValue(u1) == 1); assert(@maxValue(u8) == 255); diff --git a/test/cases/struct.zig b/test/cases/struct.zig index 2941ecb56a..20d46999d5 100644 --- a/test/cases/struct.zig +++ b/test/cases/struct.zig @@ -240,7 +240,6 @@ fn getC(data: *const BitField1) u2 { return data.c; } -const u24 = @IntType(false, 24); const Foo24Bits = packed struct { field: u24, }; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 58c73b8ae4..d5582b1584 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,15 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "optional pointer to void in extern struct", + \\comptime { + \\ _ = @IntType(false, @maxValue(u32) + 1); + \\} + , + ".tmp_source.zig:2:40: error: integer value 4294967296 cannot be implicitly casted to type 'u32'", + ); + cases.add( "optional pointer to void in extern struct", \\const Foo = extern struct { -- cgit v1.2.3 From fd3a41dadc92e7b69b409af5f747004996465032 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Wed, 18 Jul 2018 17:00:42 +0200 Subject: Allow pointers to anything in extern/exported declarations (#1258) * type_allowed_in_extern accepts all ptr not size 0 * Generate correct headers for none extern structs/unions/enums --- src/analyze.cpp | 4 ++- src/codegen.cpp | 81 ++++++++++++++++++++++++++++++++------------------------- test/gen_h.zig | 47 +++++++++++++++++++++++++++++++++ 3 files changed, 95 insertions(+), 37 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 2ace893508..06d611f80d 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1454,7 +1454,9 @@ static bool type_allowed_in_extern(CodeGen *g, TypeTableEntry *type_entry) { case TypeTableEntryIdFn: return type_entry->data.fn.fn_type_id.cc == CallingConventionC; case TypeTableEntryIdPointer: - return type_allowed_in_extern(g, type_entry->data.pointer.child_type); + if (type_size(g, type_entry) == 0) + return false; + return true; case TypeTableEntryIdStruct: return type_entry->data.structure.layout == ContainerLayoutExtern || type_entry->data.structure.layout == ContainerLayoutPacked; case TypeTableEntryIdOptional: diff --git a/src/codegen.cpp b/src/codegen.cpp index f8801ea132..6e121be270 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -7508,51 +7508,60 @@ static void gen_h_file(CodeGen *g) { case TypeTableEntryIdPromise: zig_unreachable(); case TypeTableEntryIdEnum: - assert(type_entry->data.enumeration.layout == ContainerLayoutExtern); - fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) { - TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i]; - Buf *value_buf = buf_alloc(); - bigint_append_buf(value_buf, &enum_field->value, 10); - fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf)); - if (field_i != type_entry->data.enumeration.src_field_count - 1) { - fprintf(out_h, ","); + if (type_entry->data.enumeration.layout == ContainerLayoutExtern) { + fprintf(out_h, "enum %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.enumeration.src_field_count; field_i += 1) { + TypeEnumField *enum_field = &type_entry->data.enumeration.fields[field_i]; + Buf *value_buf = buf_alloc(); + bigint_append_buf(value_buf, &enum_field->value, 10); + fprintf(out_h, " %s = %s", buf_ptr(enum_field->name), buf_ptr(value_buf)); + if (field_i != type_entry->data.enumeration.src_field_count - 1) { + fprintf(out_h, ","); + } + fprintf(out_h, "\n"); } - fprintf(out_h, "\n"); + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "enum %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdStruct: - assert(type_entry->data.structure.layout == ContainerLayoutExtern); - fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) { - TypeStructField *struct_field = &type_entry->data.structure.fields[field_i]; - - Buf *type_name_buf = buf_alloc(); - get_c_type(g, gen_h, struct_field->type_entry, type_name_buf); - - if (struct_field->type_entry->id == TypeTableEntryIdArray) { - fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf), - buf_ptr(struct_field->name), - struct_field->type_entry->data.array.len); - } else { - fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name)); - } + if (type_entry->data.structure.layout == ContainerLayoutExtern) { + fprintf(out_h, "struct %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.structure.src_field_count; field_i += 1) { + TypeStructField *struct_field = &type_entry->data.structure.fields[field_i]; + + Buf *type_name_buf = buf_alloc(); + get_c_type(g, gen_h, struct_field->type_entry, type_name_buf); + + if (struct_field->type_entry->id == TypeTableEntryIdArray) { + fprintf(out_h, " %s %s[%" ZIG_PRI_u64 "];\n", buf_ptr(type_name_buf), + buf_ptr(struct_field->name), + struct_field->type_entry->data.array.len); + } else { + fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(struct_field->name)); + } + } + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "struct %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdUnion: - assert(type_entry->data.unionation.layout == ContainerLayoutExtern); - fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name)); - for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) { - TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i]; - - Buf *type_name_buf = buf_alloc(); - get_c_type(g, gen_h, union_field->type_entry, type_name_buf); - fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name)); + if (type_entry->data.unionation.layout == ContainerLayoutExtern) { + fprintf(out_h, "union %s {\n", buf_ptr(&type_entry->name)); + for (uint32_t field_i = 0; field_i < type_entry->data.unionation.src_field_count; field_i += 1) { + TypeUnionField *union_field = &type_entry->data.unionation.fields[field_i]; + + Buf *type_name_buf = buf_alloc(); + get_c_type(g, gen_h, union_field->type_entry, type_name_buf); + fprintf(out_h, " %s %s;\n", buf_ptr(type_name_buf), buf_ptr(union_field->name)); + } + fprintf(out_h, "};\n\n"); + } else { + fprintf(out_h, "union %s;\n", buf_ptr(&type_entry->name)); } - fprintf(out_h, "};\n\n"); break; case TypeTableEntryIdOpaque: fprintf(out_h, "struct %s;\n\n", buf_ptr(&type_entry->name)); diff --git a/test/gen_h.zig b/test/gen_h.zig index e6a757ea6d..b3aaa263d6 100644 --- a/test/gen_h.zig +++ b/test/gen_h.zig @@ -76,4 +76,51 @@ pub fn addCases(cases: *tests.GenHContext) void { \\TEST_EXPORT void entry(struct Foo foo, uint8_t bar[]); \\ ); + + cases.add("ptr to zig struct", + \\const S = struct { + \\ a: u8, + \\}; + \\ + \\export fn a(s: *S) u8 { + \\ return s.a; + \\} + + , + \\struct S; + \\TEST_EXPORT uint8_t a(struct S * s); + \\ + ); + + cases.add("ptr to zig union", + \\const U = union(enum) { + \\ A: u8, + \\ B: u16, + \\}; + \\ + \\export fn a(s: *U) u8 { + \\ return s.A; + \\} + + , + \\union U; + \\TEST_EXPORT uint8_t a(union U * s); + \\ + ); + + cases.add("ptr to zig enum", + \\const E = enum(u8) { + \\ A, + \\ B, + \\}; + \\ + \\export fn a(s: *E) u8 { + \\ return @enumToInt(s.*); + \\} + + , + \\enum E; + \\TEST_EXPORT uint8_t a(enum E * s); + \\ + ); } -- cgit v1.2.3 From 58c5f94a99a78346286065bbf390e4c30be1b707 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 20 Jul 2018 23:37:37 -0400 Subject: self-hosted: share C++ code for finding libc on windows --- CMakeLists.txt | 7 + src-self-hosted/c.zig | 1 + src-self-hosted/libc_installation.zig | 25 ++- src/analyze.cpp | 7 +- src/os.cpp | 248 +----------------------- src/os.hpp | 10 +- src/windows_sdk.cpp | 346 ++++++++++++++++++++++++++++++++++ src/windows_sdk.h | 45 +++++ std/os/windows/advapi32.zig | 30 +++ std/os/windows/index.zig | 192 +------------------ std/os/windows/kernel32.zig | 162 ++++++++++++++++ std/os/windows/ole32.zig | 18 ++ std/os/windows/shell32.zig | 4 + std/os/windows/shlwapi.zig | 4 + std/os/windows/user32.zig | 4 + 15 files changed, 658 insertions(+), 445 deletions(-) create mode 100644 src/windows_sdk.cpp create mode 100644 src/windows_sdk.h create mode 100644 std/os/windows/advapi32.zig create mode 100644 std/os/windows/kernel32.zig create mode 100644 std/os/windows/ole32.zig create mode 100644 std/os/windows/shell32.zig create mode 100644 std/os/windows/shlwapi.zig create mode 100644 std/os/windows/user32.zig (limited to 'src/analyze.cpp') diff --git a/CMakeLists.txt b/CMakeLists.txt index 30d7bb4856..20755cfc1b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -426,6 +426,7 @@ set(ZIG_SOURCES ) set(ZIG_CPP_SOURCES "${CMAKE_SOURCE_DIR}/src/zig_llvm.cpp" + "${CMAKE_SOURCE_DIR}/src/windows_sdk.cpp" ) set(ZIG_STD_FILES @@ -567,8 +568,14 @@ set(ZIG_STD_FILES "os/linux/x86_64.zig" "os/path.zig" "os/time.zig" + "os/windows/advapi32.zig" "os/windows/error.zig" "os/windows/index.zig" + "os/windows/kernel32.zig" + "os/windows/ole32.zig" + "os/windows/shell32.zig" + "os/windows/shlwapi.zig" + "os/windows/user32.zig" "os/windows/util.zig" "os/zen.zig" "rand/index.zig" diff --git a/src-self-hosted/c.zig b/src-self-hosted/c.zig index 3912462985..778d851240 100644 --- a/src-self-hosted/c.zig +++ b/src-self-hosted/c.zig @@ -4,4 +4,5 @@ pub use @cImport({ @cInclude("inttypes.h"); @cInclude("config.h"); @cInclude("zig_llvm.h"); + @cInclude("windows_sdk.h"); }); diff --git a/src-self-hosted/libc_installation.zig b/src-self-hosted/libc_installation.zig index 8444c47310..5a9b7561fa 100644 --- a/src-self-hosted/libc_installation.zig +++ b/src-self-hosted/libc_installation.zig @@ -2,6 +2,7 @@ const std = @import("std"); const builtin = @import("builtin"); const event = std.event; const Target = @import("target.zig").Target; +const c = @import("c.zig"); /// See the render function implementation for documentation of the fields. pub const LibCInstallation = struct { @@ -122,7 +123,7 @@ pub const LibCInstallation = struct { \\# Only needed when targeting Windows. \\kernel32_lib_dir={} \\ - \\# The full path to the dynamic linker. + \\# The full path to the dynamic linker, on the target system. \\# Only needed when targeting Linux. \\dynamic_linker_path={} \\ @@ -143,10 +144,24 @@ pub const LibCInstallation = struct { errdefer group.cancelAll(); switch (builtin.os) { builtin.Os.windows => { - try group.call(findNativeIncludeDirWindows, self, loop); - try group.call(findNativeLibDirWindows, self, loop); - try group.call(findNativeMsvcLibDir, self, loop); - try group.call(findNativeKernel32LibDir, self, loop); + var sdk: *c.ZigWindowsSDK = undefined; + switch (c.zig_find_windows_sdk(@ptrCast(?[*]?[*]c.ZigWindowsSDK, &sdk))) { + c.ZigFindWindowsSdkError.None => { + defer c.zig_free_windows_sdk(@ptrCast(?[*]c.ZigWindowsSDK, sdk)); + + errdefer if (self.msvc_lib_dir) |s| loop.allocator.free(s); + if (sdk.msvc_lib_dir_ptr) |ptr| { + self.msvc_lib_dir = try std.mem.dupe(loop.allocator, u8, ptr[0..sdk.msvc_lib_dir_len]); + } + //try group.call(findNativeIncludeDirWindows, self, loop); + //try group.call(findNativeLibDirWindows, self, loop); + //try group.call(findNativeMsvcLibDir, self, loop); + //try group.call(findNativeKernel32LibDir, self, loop); + }, + c.ZigFindWindowsSdkError.OutOfMemory => return error.OutOfMemory, + c.ZigFindWindowsSdkError.NotFound => return error.NotFound, + c.ZigFindWindowsSdkError.PathTooLong => return error.NotFound, + } }, builtin.Os.linux => { try group.call(findNativeIncludeDirLinux, self, loop); diff --git a/src/analyze.cpp b/src/analyze.cpp index 06d611f80d..6bbe5f6037 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4379,7 +4379,7 @@ bool handle_is_ptr(TypeTableEntry *type_entry) { static ZigWindowsSDK *get_windows_sdk(CodeGen *g) { if (g->win_sdk == nullptr) { - if (os_find_windows_sdk(&g->win_sdk)) { + if (zig_find_windows_sdk(&g->win_sdk)) { fprintf(stderr, "unable to determine windows sdk path\n"); exit(1); } @@ -4499,12 +4499,11 @@ void find_libc_lib_path(CodeGen *g) { ZigWindowsSDK *sdk = get_windows_sdk(g); if (g->msvc_lib_dir == nullptr) { - Buf* vc_lib_dir = buf_alloc(); - if (os_get_win32_vcruntime_path(vc_lib_dir, g->zig_target.arch.arch)) { + if (sdk->msvc_lib_dir_ptr == nullptr) { fprintf(stderr, "Unable to determine vcruntime path. --msvc-lib-dir"); exit(1); } - g->msvc_lib_dir = vc_lib_dir; + g->msvc_lib_dir = buf_create_from_mem(sdk->msvc_lib_dir_ptr, sdk->msvc_lib_dir_len); } if (g->libc_lib_dir == nullptr) { diff --git a/src/os.cpp b/src/os.cpp index d52295950d..91a591a7b6 100644 --- a/src/os.cpp +++ b/src/os.cpp @@ -26,7 +26,6 @@ #include #include #include -#include "windows_com.hpp" typedef SSIZE_T ssize_t; #else @@ -1115,249 +1114,10 @@ void os_stderr_set_color(TermColor color) { #endif } -int os_find_windows_sdk(ZigWindowsSDK **out_sdk) { -#if defined(ZIG_OS_WINDOWS) - ZigWindowsSDK *result_sdk = allocate(1); - buf_resize(&result_sdk->path10, 0); - buf_resize(&result_sdk->path81, 0); - - HKEY key; - HRESULT rc; - rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key); - if (rc != ERROR_SUCCESS) { - return ErrorFileNotFound; - } - - { - DWORD tmp_buf_len = MAX_PATH; - buf_resize(&result_sdk->path10, tmp_buf_len); - rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path10), &tmp_buf_len); - if (rc == ERROR_FILE_NOT_FOUND) { - buf_resize(&result_sdk->path10, 0); - } else { - buf_resize(&result_sdk->path10, tmp_buf_len); - } - } - { - DWORD tmp_buf_len = MAX_PATH; - buf_resize(&result_sdk->path81, tmp_buf_len); - rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)buf_ptr(&result_sdk->path81), &tmp_buf_len); - if (rc == ERROR_FILE_NOT_FOUND) { - buf_resize(&result_sdk->path81, 0); - } else { - buf_resize(&result_sdk->path81, tmp_buf_len); - } - } - - if (buf_len(&result_sdk->path10) != 0) { - Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\*", buf_ptr(&result_sdk->path10)); - - // enumerate files in sdk path looking for latest version - WIN32_FIND_DATA ffd; - HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd); - if (hFind == INVALID_HANDLE_VALUE) { - return ErrorFileNotFound; - } - int v0 = 0, v1 = 0, v2 = 0, v3 = 0; - bool found_version_dir = false; - for (;;) { - if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { - int c0 = 0, c1 = 0, c2 = 0, c3 = 0; - sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3); - if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) { - // Microsoft released 26624 as 10240 accidentally. - // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive - c2 = 26624; - } - if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) { - v0 = c0, v1 = c1, v2 = c2, v3 = c3; - buf_init_from_str(&result_sdk->version10, ffd.cFileName); - found_version_dir = true; - } - } - if (FindNextFile(hFind, &ffd) == 0) { - FindClose(hFind); - break; - } - } - if (!found_version_dir) { - buf_resize(&result_sdk->path10, 0); - } - } - - if (buf_len(&result_sdk->path81) != 0) { - Buf *sdk_lib_dir = buf_sprintf("%s\\Lib\\winv*", buf_ptr(&result_sdk->path81)); - - // enumerate files in sdk path looking for latest version - WIN32_FIND_DATA ffd; - HANDLE hFind = FindFirstFileA(buf_ptr(sdk_lib_dir), &ffd); - if (hFind == INVALID_HANDLE_VALUE) { - return ErrorFileNotFound; - } - int v0 = 0, v1 = 0; - bool found_version_dir = false; - for (;;) { - if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { - int c0 = 0, c1 = 0; - sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1); - if ((c0 > v0) || (c1 > v1)) { - v0 = c0, v1 = c1; - buf_init_from_str(&result_sdk->version81, ffd.cFileName); - found_version_dir = true; - } - } - if (FindNextFile(hFind, &ffd) == 0) { - FindClose(hFind); - break; - } - } - if (!found_version_dir) { - buf_resize(&result_sdk->path81, 0); - } - } - - *out_sdk = result_sdk; - return 0; -#else - return ErrorFileNotFound; -#endif -} - -int os_get_win32_vcruntime_path(Buf* output_buf, ZigLLVM_ArchType platform_type) { -#if defined(ZIG_OS_WINDOWS) - buf_resize(output_buf, 0); - //COM Smart Pointerse requires explicit scope - { - HRESULT rc; - rc = CoInitializeEx(NULL, COINIT_MULTITHREADED); - if (rc != S_OK) { - goto com_done; - } - - //This COM class is installed when a VS2017 - ISetupConfigurationPtr setup_config; - rc = setup_config.CreateInstance(__uuidof(SetupConfiguration)); - if (rc != S_OK) { - goto com_done; - } - - IEnumSetupInstancesPtr all_instances; - rc = setup_config->EnumInstances(&all_instances); - if (rc != S_OK) { - goto com_done; - } - - ISetupInstance* curr_instance; - ULONG found_inst; - while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) { - BSTR bstr_inst_path; - rc = curr_instance->GetInstallationPath(&bstr_inst_path); - if (rc != S_OK) { - goto com_done; - } - //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length - UINT bstr_path_len = *((UINT*)bstr_inst_path - 1); - ULONG tmp_path_len = bstr_path_len / 2 + 1; - char* conv_path = (char*)bstr_inst_path; - char *tmp_path = (char*)alloca(tmp_path_len); - memset(tmp_path, 0, tmp_path_len); - uint32_t c = 0; - for (uint32_t i = 0; i < bstr_path_len; i += 2) { - tmp_path[c] = conv_path[i]; - ++c; - assert(c != tmp_path_len); - } - - buf_append_str(output_buf, tmp_path); - buf_append_char(output_buf, '\\'); - - Buf* tmp_buf = buf_alloc(); - buf_append_buf(tmp_buf, output_buf); - buf_append_str(tmp_buf, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); - FILE* tools_file = fopen(buf_ptr(tmp_buf), "r"); - if (!tools_file) { - goto com_done; - } - memset(tmp_path, 0, tmp_path_len); - fgets(tmp_path, tmp_path_len, tools_file); - strtok(tmp_path, " \r\n"); - fclose(tools_file); - buf_appendf(output_buf, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path); - switch (platform_type) { - case ZigLLVM_x86: - buf_append_str(output_buf, "x86\\"); - break; - case ZigLLVM_x86_64: - buf_append_str(output_buf, "x64\\"); - break; - case ZigLLVM_arm: - buf_append_str(output_buf, "arm\\"); - break; - default: - zig_panic("Attemped to use vcruntime for non-supported platform."); - } - buf_resize(tmp_buf, 0); - buf_append_buf(tmp_buf, output_buf); - buf_append_str(tmp_buf, "vcruntime.lib"); - - if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) { - return 0; - } - } - } - -com_done:; - HKEY key; - HRESULT rc; - rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0, KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key); - if (rc != ERROR_SUCCESS) { - return ErrorFileNotFound; - } - - DWORD dw_type = 0; - DWORD cb_data = 0; - rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data); - if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) { - return ErrorFileNotFound; - } - - Buf* tmp_buf = buf_alloc_fixed(cb_data); - RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)buf_ptr(tmp_buf), &cb_data); - //RegQueryValueExA returns the length of the string INCLUDING the null terminator - buf_resize(tmp_buf, cb_data-1); - buf_append_str(tmp_buf, "VC\\Lib\\"); - switch (platform_type) { - case ZigLLVM_x86: - //x86 is in the root of the Lib folder - break; - case ZigLLVM_x86_64: - buf_append_str(tmp_buf, "amd64\\"); - break; - case ZigLLVM_arm: - buf_append_str(tmp_buf, "arm\\"); - break; - default: - zig_panic("Attemped to use vcruntime for non-supported platform."); - } - - buf_append_buf(output_buf, tmp_buf); - buf_append_str(tmp_buf, "vcruntime.lib"); - - if (GetFileAttributesA(buf_ptr(tmp_buf)) != INVALID_FILE_ATTRIBUTES) { - return 0; - } else { - buf_resize(output_buf, 0); - return ErrorFileNotFound; - } -#else - return ErrorFileNotFound; -#endif -} - int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchType platform_type) { #if defined(ZIG_OS_WINDOWS) buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Lib\\%s\\ucrt\\", sdk->path10_ptr, sdk->version10_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); @@ -1389,7 +1149,7 @@ int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_Arch int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf* output_buf) { #if defined(ZIG_OS_WINDOWS) buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Include\\%s\\ucrt", sdk->path10_ptr, sdk->version10_ptr); if (GetFileAttributesA(buf_ptr(output_buf)) != INVALID_FILE_ATTRIBUTES) { return 0; } @@ -1406,7 +1166,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy #if defined(ZIG_OS_WINDOWS) { buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path10), buf_ptr(&sdk->version10)); + buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path10_ptr, sdk->version10_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); @@ -1429,7 +1189,7 @@ int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf* output_buf, ZigLLVM_ArchTy } { buf_resize(output_buf, 0); - buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", buf_ptr(&sdk->path81), buf_ptr(&sdk->version81)); + buf_appendf(output_buf, "%s\\Lib\\%s\\um\\", sdk->path81_ptr, sdk->version81_ptr); switch (platform_type) { case ZigLLVM_x86: buf_append_str(output_buf, "x86\\"); diff --git a/src/os.hpp b/src/os.hpp index b94e98ec3d..cfe4e8f3a2 100644 --- a/src/os.hpp +++ b/src/os.hpp @@ -12,6 +12,7 @@ #include "buffer.hpp" #include "error.hpp" #include "zig_llvm.h" +#include "windows_sdk.h" #include #include @@ -79,15 +80,6 @@ bool os_is_sep(uint8_t c); int os_self_exe_path(Buf *out_path); -struct ZigWindowsSDK { - Buf path10; - Buf version10; - Buf path81; - Buf version81; -}; - -int os_find_windows_sdk(ZigWindowsSDK **out_sdk); -int os_get_win32_vcruntime_path(Buf *output_buf, ZigLLVM_ArchType platform_type); int os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf *output_buf); int os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type); int os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type); diff --git a/src/windows_sdk.cpp b/src/windows_sdk.cpp new file mode 100644 index 0000000000..059bdee4e9 --- /dev/null +++ b/src/windows_sdk.cpp @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2018 Andrew Kelley + * + * This file is part of zig, which is MIT licensed. + * See http://opensource.org/licenses/MIT + */ + +#include "windows_sdk.h" + +#if defined(_WIN32) + +#include "windows_com.hpp" +#include +#include + +struct ZigWindowsSDKPrivate { + ZigWindowsSDK base; +}; + +enum NativeArch { + NativeArchArm, + NativeArchi386, + NativeArchx86_64, +}; + +#if defined(_M_ARM) || defined(__arm_) +static const NativeArch native_arch = NativeArchArm; +#endif +#if defined(_M_IX86) || defined(__i386__) +static const NativeArch native_arch = NativeArchi386; +#endif +#if defined(_M_X64) || defined(__x86_64__) +static const NativeArch native_arch = NativeArchx86_64; +#endif + +void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) { + if (sdk == nullptr) { + return; + } + free((void*)sdk->path10_ptr); + free((void*)sdk->version10_ptr); + free((void*)sdk->path81_ptr); + free((void*)sdk->version81_ptr); + free((void*)sdk->msvc_lib_dir_ptr); +} + +static ZigFindWindowsSdkError find_msvc_lib_dir(ZigWindowsSDKPrivate *priv) { + //COM Smart Pointers requires explicit scope + { + HRESULT rc = CoInitializeEx(NULL, COINIT_MULTITHREADED); + if (rc != S_OK && rc != S_FALSE) { + goto com_done; + } + + //This COM class is installed when a VS2017 + ISetupConfigurationPtr setup_config; + rc = setup_config.CreateInstance(__uuidof(SetupConfiguration)); + if (rc != S_OK) { + goto com_done; + } + + IEnumSetupInstancesPtr all_instances; + rc = setup_config->EnumInstances(&all_instances); + if (rc != S_OK) { + goto com_done; + } + + ISetupInstance* curr_instance; + ULONG found_inst; + while ((rc = all_instances->Next(1, &curr_instance, &found_inst) == S_OK)) { + BSTR bstr_inst_path; + rc = curr_instance->GetInstallationPath(&bstr_inst_path); + if (rc != S_OK) { + goto com_done; + } + //BSTRs are UTF-16 encoded, so we need to convert the string & adjust the length + //TODO call an actual function to do this + UINT bstr_path_len = *((UINT*)bstr_inst_path - 1); + ULONG tmp_path_len = bstr_path_len / 2 + 1; + char* conv_path = (char*)bstr_inst_path; + // TODO don't use alloca + char *tmp_path = (char*)alloca(tmp_path_len); + memset(tmp_path, 0, tmp_path_len); + uint32_t c = 0; + for (uint32_t i = 0; i < bstr_path_len; i += 2) { + tmp_path[c] = conv_path[i]; + ++c; + assert(c != tmp_path_len); + } + char output_path[4096]; + output_path[0] = 0; + char *out_append_ptr = output_path; + + out_append_ptr += sprintf(out_append_ptr, "%s\\", tmp_path); + + char tmp_buf[4096]; + sprintf(tmp_buf, "%s%s", output_path, "VC\\Auxiliary\\Build\\Microsoft.VCToolsVersion.default.txt"); + FILE* tools_file = fopen(tmp_buf, "rb"); + if (!tools_file) { + goto com_done; + } + memset(tmp_path, 0, tmp_path_len); + fgets(tmp_path, tmp_path_len, tools_file); + strtok(tmp_path, " \r\n"); + fclose(tools_file); + out_append_ptr += sprintf(out_append_ptr, "VC\\Tools\\MSVC\\%s\\lib\\", tmp_path); + switch (native_arch) { + case NativeArchi386: + out_append_ptr += sprintf(out_append_ptr, "x86\\"); + break; + case NativeArchx86_64: + out_append_ptr += sprintf(out_append_ptr, "x64\\"); + break; + case NativeArchArm: + out_append_ptr += sprintf(out_append_ptr, "arm\\"); + break; + } + sprintf(tmp_buf, "%s%s", output_path, "vcruntime.lib"); + + if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) { + priv->base.msvc_lib_dir_ptr = strdup(output_path); + if (priv->base.msvc_lib_dir_ptr == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + priv->base.msvc_lib_dir_len = strlen(priv->base.msvc_lib_dir_ptr); + return ZigFindWindowsSdkErrorNone; + } + } + } + +com_done:; + HKEY key; + HRESULT rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VS7", 0, + KEY_QUERY_VALUE | KEY_WOW64_32KEY, &key); + if (rc != ERROR_SUCCESS) { + return ZigFindWindowsSdkErrorNotFound; + } + + DWORD dw_type = 0; + DWORD cb_data = 0; + rc = RegQueryValueEx(key, "14.0", NULL, &dw_type, NULL, &cb_data); + if ((rc == ERROR_FILE_NOT_FOUND) || (REG_SZ != dw_type)) { + return ZigFindWindowsSdkErrorNotFound; + } + + char tmp_buf[4096]; + + RegQueryValueExA(key, "14.0", NULL, NULL, (LPBYTE)tmp_buf, &cb_data); + // RegQueryValueExA returns the length of the string INCLUDING the null terminator + char *tmp_buf_append_ptr = tmp_buf + (cb_data - 1); + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "VC\\Lib\\"); + switch (native_arch) { + case NativeArchi386: + //x86 is in the root of the Lib folder + break; + case NativeArchx86_64: + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "amd64\\"); + break; + case NativeArchArm: + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "arm\\"); + break; + } + + char *output_path = strdup(tmp_buf); + if (output_path == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + + tmp_buf_append_ptr += sprintf(tmp_buf_append_ptr, "vcruntime.lib"); + + if (GetFileAttributesA(tmp_buf) != INVALID_FILE_ATTRIBUTES) { + priv->base.msvc_lib_dir_ptr = output_path; + priv->base.msvc_lib_dir_len = strlen(output_path); + return ZigFindWindowsSdkErrorNone; + } else { + free(output_path); + return ZigFindWindowsSdkErrorNotFound; + } +} + +static ZigFindWindowsSdkError find_10_version(ZigWindowsSDKPrivate *priv) { + if (priv->base.path10_ptr == nullptr) + return ZigFindWindowsSdkErrorNone; + + char sdk_lib_dir[4096]; + int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\*", priv->base.path10_ptr); + if (n < 0 || n >= 4096) { + return ZigFindWindowsSdkErrorPathTooLong; + } + + // enumerate files in sdk path looking for latest version + WIN32_FIND_DATA ffd; + HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd); + if (hFind == INVALID_HANDLE_VALUE) { + return ZigFindWindowsSdkErrorNotFound; + } + int v0 = 0, v1 = 0, v2 = 0, v3 = 0; + for (;;) { + if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + int c0 = 0, c1 = 0, c2 = 0, c3 = 0; + sscanf(ffd.cFileName, "%d.%d.%d.%d", &c0, &c1, &c2, &c3); + if (c0 == 10 && c1 == 0 && c2 == 10240 && c3 == 0) { + // Microsoft released 26624 as 10240 accidentally. + // https://developer.microsoft.com/en-us/windows/downloads/sdk-archive + c2 = 26624; + } + if ((c0 > v0) || (c1 > v1) || (c2 > v2) || (c3 > v3)) { + v0 = c0, v1 = c1, v2 = c2, v3 = c3; + free((void*)priv->base.version10_ptr); + priv->base.version10_ptr = strdup(ffd.cFileName); + if (priv->base.version10_ptr == nullptr) { + FindClose(hFind); + return ZigFindWindowsSdkErrorOutOfMemory; + } + } + } + if (FindNextFile(hFind, &ffd) == 0) { + FindClose(hFind); + break; + } + } + priv->base.version10_len = strlen(priv->base.version10_ptr); + return ZigFindWindowsSdkErrorNone; +} + +static ZigFindWindowsSdkError find_81_version(ZigWindowsSDKPrivate *priv) { + if (priv->base.path81_ptr == nullptr) + return ZigFindWindowsSdkErrorNone; + + char sdk_lib_dir[4096]; + int n = snprintf(sdk_lib_dir, 4096, "%s\\Lib\\winv*", priv->base.path81_ptr); + if (n < 0 || n >= 4096) { + return ZigFindWindowsSdkErrorPathTooLong; + } + + // enumerate files in sdk path looking for latest version + WIN32_FIND_DATA ffd; + HANDLE hFind = FindFirstFileA(sdk_lib_dir, &ffd); + if (hFind == INVALID_HANDLE_VALUE) { + return ZigFindWindowsSdkErrorNotFound; + } + int v0 = 0, v1 = 0; + for (;;) { + if (ffd.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) { + int c0 = 0, c1 = 0; + sscanf(ffd.cFileName, "winv%d.%d", &c0, &c1); + if ((c0 > v0) || (c1 > v1)) { + v0 = c0, v1 = c1; + free((void*)priv->base.version81_ptr); + priv->base.version81_ptr = strdup(ffd.cFileName); + if (priv->base.version81_ptr == nullptr) { + FindClose(hFind); + return ZigFindWindowsSdkErrorOutOfMemory; + } + } + } + if (FindNextFile(hFind, &ffd) == 0) { + FindClose(hFind); + break; + } + } + priv->base.version81_len = strlen(priv->base.version81_ptr); + return ZigFindWindowsSdkErrorNone; +} + +ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) { + ZigWindowsSDKPrivate *priv = (ZigWindowsSDKPrivate*)calloc(1, sizeof(ZigWindowsSDKPrivate)); + if (priv == nullptr) { + return ZigFindWindowsSdkErrorOutOfMemory; + } + + HKEY key; + HRESULT rc; + rc = RegOpenKeyEx(HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", 0, + KEY_QUERY_VALUE | KEY_WOW64_32KEY | KEY_ENUMERATE_SUB_KEYS, &key); + if (rc != ERROR_SUCCESS) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorNotFound; + } + + { + DWORD tmp_buf_len = MAX_PATH; + priv->base.path10_ptr = (const char *)calloc(tmp_buf_len, 1); + if (priv->base.path10_ptr == nullptr) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorOutOfMemory; + } + rc = RegQueryValueEx(key, "KitsRoot10", NULL, NULL, (LPBYTE)priv->base.path10_ptr, &tmp_buf_len); + if (rc == ERROR_SUCCESS) { + priv->base.path10_len = tmp_buf_len; + } else { + free((void*)priv->base.path10_ptr); + priv->base.path10_ptr = nullptr; + } + } + { + DWORD tmp_buf_len = MAX_PATH; + priv->base.path81_ptr = (const char *)calloc(tmp_buf_len, 1); + if (priv->base.path81_ptr == nullptr) { + zig_free_windows_sdk(&priv->base); + return ZigFindWindowsSdkErrorOutOfMemory; + } + rc = RegQueryValueEx(key, "KitsRoot81", NULL, NULL, (LPBYTE)priv->base.path81_ptr, &tmp_buf_len); + if (rc == ERROR_SUCCESS) { + priv->base.path81_len = tmp_buf_len; + } else { + free((void*)priv->base.path81_ptr); + priv->base.path81_ptr = nullptr; + } + } + + { + ZigFindWindowsSdkError err = find_10_version(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + { + ZigFindWindowsSdkError err = find_81_version(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + + { + ZigFindWindowsSdkError err = find_msvc_lib_dir(priv); + if (err == ZigFindWindowsSdkErrorOutOfMemory) { + zig_free_windows_sdk(&priv->base); + return err; + } + } + + *out_sdk = &priv->base; + return ZigFindWindowsSdkErrorNone; +} + +#else + +void zig_free_windows_sdk(struct ZigWindowsSDK *sdk) {} +ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk) { + return ZigFindWindowsSdkErrorNotFound; +} + +#endif diff --git a/src/windows_sdk.h b/src/windows_sdk.h new file mode 100644 index 0000000000..080ed55bed --- /dev/null +++ b/src/windows_sdk.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018 Andrew Kelley + * + * This file is part of zig, which is MIT licensed. + * See http://opensource.org/licenses/MIT + */ + +#ifndef ZIG_WINDOWS_SDK_H +#define ZIG_WINDOWS_SDK_H + +#ifdef __cplusplus +#define ZIG_EXTERN_C extern "C" +#else +#define ZIG_EXTERN_C +#endif + +struct ZigWindowsSDK { + const char *path10_ptr; + size_t path10_len; + + const char *version10_ptr; + size_t version10_len; + + const char *path81_ptr; + size_t path81_len; + + const char *version81_ptr; + size_t version81_len; + + const char *msvc_lib_dir_ptr; + size_t msvc_lib_dir_len; +}; + +enum ZigFindWindowsSdkError { + ZigFindWindowsSdkErrorNone, + ZigFindWindowsSdkErrorOutOfMemory, + ZigFindWindowsSdkErrorNotFound, + ZigFindWindowsSdkErrorPathTooLong, +}; + +ZIG_EXTERN_C enum ZigFindWindowsSdkError zig_find_windows_sdk(struct ZigWindowsSDK **out_sdk); + +ZIG_EXTERN_C void zig_free_windows_sdk(struct ZigWindowsSDK *sdk); + +#endif diff --git a/std/os/windows/advapi32.zig b/std/os/windows/advapi32.zig new file mode 100644 index 0000000000..dcb5a636ea --- /dev/null +++ b/std/os/windows/advapi32.zig @@ -0,0 +1,30 @@ +use @import("index.zig"); + +pub const PROV_RSA_FULL = 1; + +pub const REGSAM = ACCESS_MASK; +pub const ACCESS_MASK = DWORD; +pub const PHKEY = &HKEY; +pub const HKEY = &HKEY__; +pub const HKEY__ = extern struct { + unused: c_int, +}; +pub const LSTATUS = LONG; + +pub extern "advapi32" stdcallcc fn CryptAcquireContextA( + phProv: *HCRYPTPROV, + pszContainer: ?LPCSTR, + pszProvider: ?LPCSTR, + dwProvType: DWORD, + dwFlags: DWORD, +) BOOL; + +pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL; + +pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: [*]BYTE) BOOL; + +pub extern "advapi32" stdcallcc fn RegOpenKeyExW(hKey: HKEY, lpSubKey: LPCWSTR, ulOptions: DWORD, samDesired: REGSAM, + phkResult: &HKEY,) LSTATUS; + +pub extern "advapi32" stdcallcc fn RegQueryValueExW(hKey: HKEY, lpValueName: LPCWSTR, lpReserved: LPDWORD, + lpType: LPDWORD, lpData: LPBYTE, lpcbData: LPDWORD,) LSTATUS; diff --git a/std/os/windows/index.zig b/std/os/windows/index.zig index 96c4d3861c..90ccfaf6c5 100644 --- a/std/os/windows/index.zig +++ b/std/os/windows/index.zig @@ -1,190 +1,19 @@ const std = @import("../../index.zig"); const assert = std.debug.assert; + +pub use @import("advapi32.zig"); +pub use @import("kernel32.zig"); +pub use @import("ole32.zig"); +pub use @import("shell32.zig"); +pub use @import("shlwapi.zig"); +pub use @import("user32.zig"); + test "import" { _ = @import("util.zig"); } pub const ERROR = @import("error.zig"); -pub extern "advapi32" stdcallcc fn CryptAcquireContextA( - phProv: *HCRYPTPROV, - pszContainer: ?LPCSTR, - pszProvider: ?LPCSTR, - dwProvType: DWORD, - dwFlags: DWORD, -) BOOL; - -pub extern "advapi32" stdcallcc fn CryptReleaseContext(hProv: HCRYPTPROV, dwFlags: DWORD) BOOL; - -pub extern "advapi32" stdcallcc fn CryptGenRandom(hProv: HCRYPTPROV, dwLen: DWORD, pbBuffer: [*]BYTE) BOOL; - -pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; - -pub extern "kernel32" stdcallcc fn CreateDirectoryA( - lpPathName: LPCSTR, - lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, -) BOOL; - -pub extern "kernel32" stdcallcc fn CreateFileA( - lpFileName: LPCSTR, - dwDesiredAccess: DWORD, - dwShareMode: DWORD, - lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES, - dwCreationDisposition: DWORD, - dwFlagsAndAttributes: DWORD, - hTemplateFile: ?HANDLE, -) HANDLE; - -pub extern "kernel32" stdcallcc fn CreatePipe( - hReadPipe: *HANDLE, - hWritePipe: *HANDLE, - lpPipeAttributes: *const SECURITY_ATTRIBUTES, - nSize: DWORD, -) BOOL; - -pub extern "kernel32" stdcallcc fn CreateProcessA( - lpApplicationName: ?LPCSTR, - lpCommandLine: LPSTR, - lpProcessAttributes: ?*SECURITY_ATTRIBUTES, - lpThreadAttributes: ?*SECURITY_ATTRIBUTES, - bInheritHandles: BOOL, - dwCreationFlags: DWORD, - lpEnvironment: ?*c_void, - lpCurrentDirectory: ?LPCSTR, - lpStartupInfo: *STARTUPINFOA, - lpProcessInformation: *PROCESS_INFORMATION, -) BOOL; - -pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA( - lpSymlinkFileName: LPCSTR, - lpTargetFileName: LPCSTR, - dwFlags: DWORD, -) BOOLEAN; - -pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, ExistingCompletionPort: ?HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) ?HANDLE; - -pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE; - -pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL; - -pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn; - -pub extern "kernel32" stdcallcc fn FindFirstFileA(lpFileName: LPCSTR, lpFindFileData: *WIN32_FIND_DATAA) HANDLE; -pub extern "kernel32" stdcallcc fn FindClose(hFindFile: HANDLE) BOOL; -pub extern "kernel32" stdcallcc fn FindNextFileA(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAA) BOOL; - -pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL; - -pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR; - -pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL; - -pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD; - -pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8; - -pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD; - -pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL; - -pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL; - -pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD; - -pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD; - -pub extern "kernel32" stdcallcc fn GetLastError() DWORD; - -pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx( - in_hFile: HANDLE, - in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, - out_lpFileInformation: *c_void, - in_dwBufferSize: DWORD, -) BOOL; - -pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA( - hFile: HANDLE, - lpszFilePath: LPSTR, - cchFilePath: DWORD, - dwFlags: DWORD, -) DWORD; - -pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE; -pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL; - -pub extern "kernel32" stdcallcc fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) void; -pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(*FILETIME) void; - -pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE; -pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; -pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; -pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; -pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL; -pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; -pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; - -pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE; - -pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void; - -pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; - -pub extern "kernel32" stdcallcc fn MoveFileExA( - lpExistingFileName: LPCSTR, - lpNewFileName: LPCSTR, - dwFlags: DWORD, -) BOOL; - -pub extern "kernel32" stdcallcc fn PostQueuedCompletionStatus(CompletionPort: HANDLE, dwNumberOfBytesTransferred: DWORD, dwCompletionKey: ULONG_PTR, lpOverlapped: ?*OVERLAPPED) BOOL; - -pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL; - -pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL; - -pub extern "kernel32" stdcallcc fn ReadFile( - in_hFile: HANDLE, - out_lpBuffer: *c_void, - in_nNumberOfBytesToRead: DWORD, - out_lpNumberOfBytesRead: *DWORD, - in_out_lpOverlapped: ?*OVERLAPPED, -) BOOL; - -pub extern "kernel32" stdcallcc fn RemoveDirectoryA(lpPathName: LPCSTR) BOOL; - -pub extern "kernel32" stdcallcc fn SetFilePointerEx( - in_fFile: HANDLE, - in_liDistanceToMove: LARGE_INTEGER, - out_opt_ldNewFilePointer: ?*LARGE_INTEGER, - in_dwMoveMethod: DWORD, -) BOOL; - -pub extern "kernel32" stdcallcc fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) BOOL; - -pub extern "kernel32" stdcallcc fn Sleep(dwMilliseconds: DWORD) void; - -pub extern "kernel32" stdcallcc fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) BOOL; - -pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) DWORD; - -pub extern "kernel32" stdcallcc fn WriteFile( - in_hFile: HANDLE, - in_lpBuffer: *const c_void, - in_nNumberOfBytesToWrite: DWORD, - out_lpNumberOfBytesWritten: ?*DWORD, - in_out_lpOverlapped: ?*OVERLAPPED, -) BOOL; - -//TODO: call unicode versions instead of relying on ANSI code page -pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE; - -pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL; - -pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int; - -pub extern "shlwapi" stdcallcc fn PathFileExistsA(pszPath: ?LPCTSTR) BOOL; - -pub const PROV_RSA_FULL = 1; - pub const BOOL = c_int; pub const BOOLEAN = BYTE; pub const BYTE = u8; @@ -206,6 +35,7 @@ pub const LPSTR = [*]CHAR; pub const LPTSTR = if (UNICODE) LPWSTR else LPSTR; pub const LPVOID = *c_void; pub const LPWSTR = [*]WCHAR; +pub const LPCWSTR = [*]const WCHAR; pub const PVOID = *c_void; pub const PWSTR = [*]WCHAR; pub const SIZE_T = usize; @@ -442,10 +272,6 @@ pub const SYSTEM_INFO = extern struct { wProcessorRevision: WORD, }; -pub extern "ole32.dll" stdcallcc fn CoTaskMemFree(pv: LPVOID) void; - -pub extern "shell32.dll" stdcallcc fn SHGetKnownFolderPath(rfid: *const KNOWNFOLDERID, dwFlags: DWORD, hToken: ?HANDLE, ppszPath: *[*]WCHAR) HRESULT; - pub const HRESULT = c_long; pub const KNOWNFOLDERID = GUID; diff --git a/std/os/windows/kernel32.zig b/std/os/windows/kernel32.zig new file mode 100644 index 0000000000..fa3473ad05 --- /dev/null +++ b/std/os/windows/kernel32.zig @@ -0,0 +1,162 @@ +use @import("index.zig"); + +pub extern "kernel32" stdcallcc fn CloseHandle(hObject: HANDLE) BOOL; + +pub extern "kernel32" stdcallcc fn CreateDirectoryA( + lpPathName: LPCSTR, + lpSecurityAttributes: ?*SECURITY_ATTRIBUTES, +) BOOL; + +pub extern "kernel32" stdcallcc fn CreateFileA( + lpFileName: LPCSTR, + dwDesiredAccess: DWORD, + dwShareMode: DWORD, + lpSecurityAttributes: ?LPSECURITY_ATTRIBUTES, + dwCreationDisposition: DWORD, + dwFlagsAndAttributes: DWORD, + hTemplateFile: ?HANDLE, +) HANDLE; + +pub extern "kernel32" stdcallcc fn CreatePipe( + hReadPipe: *HANDLE, + hWritePipe: *HANDLE, + lpPipeAttributes: *const SECURITY_ATTRIBUTES, + nSize: DWORD, +) BOOL; + +pub extern "kernel32" stdcallcc fn CreateProcessA( + lpApplicationName: ?LPCSTR, + lpCommandLine: LPSTR, + lpProcessAttributes: ?*SECURITY_ATTRIBUTES, + lpThreadAttributes: ?*SECURITY_ATTRIBUTES, + bInheritHandles: BOOL, + dwCreationFlags: DWORD, + lpEnvironment: ?*c_void, + lpCurrentDirectory: ?LPCSTR, + lpStartupInfo: *STARTUPINFOA, + lpProcessInformation: *PROCESS_INFORMATION, +) BOOL; + +pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA( + lpSymlinkFileName: LPCSTR, + lpTargetFileName: LPCSTR, + dwFlags: DWORD, +) BOOLEAN; + +pub extern "kernel32" stdcallcc fn CreateIoCompletionPort(FileHandle: HANDLE, ExistingCompletionPort: ?HANDLE, CompletionKey: ULONG_PTR, NumberOfConcurrentThreads: DWORD) ?HANDLE; + +pub extern "kernel32" stdcallcc fn CreateThread(lpThreadAttributes: ?LPSECURITY_ATTRIBUTES, dwStackSize: SIZE_T, lpStartAddress: LPTHREAD_START_ROUTINE, lpParameter: ?LPVOID, dwCreationFlags: DWORD, lpThreadId: ?LPDWORD) ?HANDLE; + +pub extern "kernel32" stdcallcc fn DeleteFileA(lpFileName: LPCSTR) BOOL; + +pub extern "kernel32" stdcallcc fn ExitProcess(exit_code: UINT) noreturn; + +pub extern "kernel32" stdcallcc fn FindFirstFileA(lpFileName: LPCSTR, lpFindFileData: *WIN32_FIND_DATAA) HANDLE; +pub extern "kernel32" stdcallcc fn FindClose(hFindFile: HANDLE) BOOL; +pub extern "kernel32" stdcallcc fn FindNextFileA(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAA) BOOL; + +pub extern "kernel32" stdcallcc fn FreeEnvironmentStringsA(penv: [*]u8) BOOL; + +pub extern "kernel32" stdcallcc fn GetCommandLineA() LPSTR; + +pub extern "kernel32" stdcallcc fn GetConsoleMode(in_hConsoleHandle: HANDLE, out_lpMode: *DWORD) BOOL; + +pub extern "kernel32" stdcallcc fn GetCurrentDirectoryA(nBufferLength: WORD, lpBuffer: ?LPSTR) DWORD; + +pub extern "kernel32" stdcallcc fn GetEnvironmentStringsA() ?[*]u8; + +pub extern "kernel32" stdcallcc fn GetEnvironmentVariableA(lpName: LPCSTR, lpBuffer: LPSTR, nSize: DWORD) DWORD; + +pub extern "kernel32" stdcallcc fn GetExitCodeProcess(hProcess: HANDLE, lpExitCode: *DWORD) BOOL; + +pub extern "kernel32" stdcallcc fn GetFileSizeEx(hFile: HANDLE, lpFileSize: *LARGE_INTEGER) BOOL; + +pub extern "kernel32" stdcallcc fn GetFileAttributesA(lpFileName: LPCSTR) DWORD; + +pub extern "kernel32" stdcallcc fn GetModuleFileNameA(hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) DWORD; + +pub extern "kernel32" stdcallcc fn GetLastError() DWORD; + +pub extern "kernel32" stdcallcc fn GetFileInformationByHandleEx( + in_hFile: HANDLE, + in_FileInformationClass: FILE_INFO_BY_HANDLE_CLASS, + out_lpFileInformation: *c_void, + in_dwBufferSize: DWORD, +) BOOL; + +pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA( + hFile: HANDLE, + lpszFilePath: LPSTR, + cchFilePath: DWORD, + dwFlags: DWORD, +) DWORD; + +pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE; +pub extern "kernel32" stdcallcc fn GetQueuedCompletionStatus(CompletionPort: HANDLE, lpNumberOfBytesTransferred: LPDWORD, lpCompletionKey: *ULONG_PTR, lpOverlapped: *?*OVERLAPPED, dwMilliseconds: DWORD) BOOL; + +pub extern "kernel32" stdcallcc fn GetSystemInfo(lpSystemInfo: *SYSTEM_INFO) void; +pub extern "kernel32" stdcallcc fn GetSystemTimeAsFileTime(*FILETIME) void; + +pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE; +pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL; +pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void, dwBytes: SIZE_T) ?*c_void; +pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) SIZE_T; +pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: *const c_void) BOOL; +pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T; +pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL; + +pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE; + +pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?*c_void; + +pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: *c_void) BOOL; + +pub extern "kernel32" stdcallcc fn MoveFileExA( + lpExistingFileName: LPCSTR, + lpNewFileName: LPCSTR, + dwFlags: DWORD, +) BOOL; + +pub extern "kernel32" stdcallcc fn PostQueuedCompletionStatus(CompletionPort: HANDLE, dwNumberOfBytesTransferred: DWORD, dwCompletionKey: ULONG_PTR, lpOverlapped: ?*OVERLAPPED) BOOL; + +pub extern "kernel32" stdcallcc fn QueryPerformanceCounter(lpPerformanceCount: *LARGE_INTEGER) BOOL; + +pub extern "kernel32" stdcallcc fn QueryPerformanceFrequency(lpFrequency: *LARGE_INTEGER) BOOL; + +pub extern "kernel32" stdcallcc fn ReadFile( + in_hFile: HANDLE, + out_lpBuffer: *c_void, + in_nNumberOfBytesToRead: DWORD, + out_lpNumberOfBytesRead: *DWORD, + in_out_lpOverlapped: ?*OVERLAPPED, +) BOOL; + +pub extern "kernel32" stdcallcc fn RemoveDirectoryA(lpPathName: LPCSTR) BOOL; + +pub extern "kernel32" stdcallcc fn SetFilePointerEx( + in_fFile: HANDLE, + in_liDistanceToMove: LARGE_INTEGER, + out_opt_ldNewFilePointer: ?*LARGE_INTEGER, + in_dwMoveMethod: DWORD, +) BOOL; + +pub extern "kernel32" stdcallcc fn SetHandleInformation(hObject: HANDLE, dwMask: DWORD, dwFlags: DWORD) BOOL; + +pub extern "kernel32" stdcallcc fn Sleep(dwMilliseconds: DWORD) void; + +pub extern "kernel32" stdcallcc fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) BOOL; + +pub extern "kernel32" stdcallcc fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) DWORD; + +pub extern "kernel32" stdcallcc fn WriteFile( + in_hFile: HANDLE, + in_lpBuffer: *const c_void, + in_nNumberOfBytesToWrite: DWORD, + out_lpNumberOfBytesWritten: ?*DWORD, + in_out_lpOverlapped: ?*OVERLAPPED, +) BOOL; + +//TODO: call unicode versions instead of relying on ANSI code page +pub extern "kernel32" stdcallcc fn LoadLibraryA(lpLibFileName: LPCSTR) ?HMODULE; + +pub extern "kernel32" stdcallcc fn FreeLibrary(hModule: HMODULE) BOOL; diff --git a/std/os/windows/ole32.zig b/std/os/windows/ole32.zig new file mode 100644 index 0000000000..84d8089d07 --- /dev/null +++ b/std/os/windows/ole32.zig @@ -0,0 +1,18 @@ +use @import("index.zig"); + +pub extern "ole32.dll" stdcallcc fn CoTaskMemFree(pv: LPVOID) void; +pub extern "ole32.dll" stdcallcc fn CoUninitialize() void; +pub extern "ole32.dll" stdcallcc fn CoGetCurrentProcess() DWORD; +pub extern "ole32.dll" stdcallcc fn CoInitializeEx(pvReserved: LPVOID, dwCoInit: DWORD) HRESULT; + + +pub const COINIT_APARTMENTTHREADED = COINIT.COINIT_APARTMENTTHREADED; +pub const COINIT_MULTITHREADED = COINIT.COINIT_MULTITHREADED; +pub const COINIT_DISABLE_OLE1DDE = COINIT.COINIT_DISABLE_OLE1DDE; +pub const COINIT_SPEED_OVER_MEMORY = COINIT.COINIT_SPEED_OVER_MEMORY; +pub const COINIT = extern enum { + COINIT_APARTMENTTHREADED = 2, + COINIT_MULTITHREADED = 0, + COINIT_DISABLE_OLE1DDE = 4, + COINIT_SPEED_OVER_MEMORY = 8, +}; diff --git a/std/os/windows/shell32.zig b/std/os/windows/shell32.zig new file mode 100644 index 0000000000..f10466add3 --- /dev/null +++ b/std/os/windows/shell32.zig @@ -0,0 +1,4 @@ +use @import("index.zig"); + +pub extern "shell32.dll" stdcallcc fn SHGetKnownFolderPath(rfid: *const KNOWNFOLDERID, dwFlags: DWORD, hToken: ?HANDLE, ppszPath: *[*]WCHAR) HRESULT; + diff --git a/std/os/windows/shlwapi.zig b/std/os/windows/shlwapi.zig new file mode 100644 index 0000000000..6bccefaf98 --- /dev/null +++ b/std/os/windows/shlwapi.zig @@ -0,0 +1,4 @@ +use @import("index.zig"); + +pub extern "shlwapi" stdcallcc fn PathFileExistsA(pszPath: ?LPCTSTR) BOOL; + diff --git a/std/os/windows/user32.zig b/std/os/windows/user32.zig new file mode 100644 index 0000000000..37f9f6f3b8 --- /dev/null +++ b/std/os/windows/user32.zig @@ -0,0 +1,4 @@ +use @import("index.zig"); + +pub extern "user32" stdcallcc fn MessageBoxA(hWnd: ?HANDLE, lpText: ?LPCTSTR, lpCaption: ?LPCTSTR, uType: UINT) c_int; + -- cgit v1.2.3 From 29e19ace362e7a1910b9f105257f2bce2491e32b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 Jul 2018 10:13:40 -0400 Subject: fix logic for determining whether param requires comptime closes #778 closes #1213 --- src/analyze.cpp | 14 +++++++++----- test/compile_errors.zig | 11 +++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index 6bbe5f6037..f399ab8305 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -1585,10 +1585,6 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdBlock: case TypeTableEntryIdBoundFn: case TypeTableEntryIdMetaType: - add_node_error(g, param_node->data.param_decl.type, - buf_sprintf("parameter of type '%s' must be declared comptime", - buf_ptr(&type_entry->name))); - return g->builtin_types.entry_invalid; case TypeTableEntryIdVoid: case TypeTableEntryIdBool: case TypeTableEntryIdInt: @@ -1603,6 +1599,13 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c case TypeTableEntryIdUnion: case TypeTableEntryIdFn: case TypeTableEntryIdPromise: + type_ensure_zero_bits_known(g, type_entry); + if (type_requires_comptime(type_entry)) { + add_node_error(g, param_node->data.param_decl.type, + buf_sprintf("parameter of type '%s' must be declared comptime", + buf_ptr(&type_entry->name))); + return g->builtin_types.entry_invalid; + } break; } FnTypeParamInfo *param_info = &fn_type_id.param_info[fn_type_id.next_param_index]; @@ -5019,9 +5022,10 @@ bool type_requires_comptime(TypeTableEntry *type_entry) { } else { return type_requires_comptime(type_entry->data.pointer.child_type); } + case TypeTableEntryIdFn: + return type_entry->data.fn.is_generic; case TypeTableEntryIdEnum: case TypeTableEntryIdErrorSet: - case TypeTableEntryIdFn: case TypeTableEntryIdBool: case TypeTableEntryIdInt: case TypeTableEntryIdFloat: diff --git a/test/compile_errors.zig b/test/compile_errors.zig index d5582b1584..b7bd39f29e 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,17 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "generic fn as parameter without comptime keyword", + \\fn f(_: fn (var) void) void {} + \\fn g(_: var) void {} + \\export fn entry() void { + \\ f(g); + \\} + , + ".tmp_source.zig:1:9: error: parameter of type 'fn(var)var' must be declared comptime", + ); + cases.add( "optional pointer to void in extern struct", \\comptime { -- cgit v1.2.3 From 2ea08561cf69dabc99722ffc24cb0e4327605506 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 24 Jul 2018 14:20:49 -0400 Subject: self-hosted: function types use table lookup --- src-self-hosted/codegen.zig | 3 +- src-self-hosted/compilation.zig | 69 +++++++- src-self-hosted/ir.zig | 8 +- src-self-hosted/type.zig | 338 +++++++++++++++++++++++++++++++++------- src/analyze.cpp | 8 +- 5 files changed, 356 insertions(+), 70 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index ad3dce061e..88293c845e 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -168,6 +168,7 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) //} const fn_type = fn_val.base.typ.cast(Type.Fn).?; + const fn_type_normal = &fn_type.key.data.Normal; try addLLVMFnAttr(ofile, llvm_fn, "nounwind"); //add_uwtable_attr(g, fn_table_entry->llvm_value); @@ -209,7 +210,7 @@ pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) // addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); //} - const cur_ret_ptr = if (fn_type.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null; + const cur_ret_ptr = if (fn_type_normal.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null; // build all basic blocks for (code.basic_block_list.toSlice()) |bb| { diff --git a/src-self-hosted/compilation.zig b/src-self-hosted/compilation.zig index 093aab21da..8d41e2439b 100644 --- a/src-self-hosted/compilation.zig +++ b/src-self-hosted/compilation.zig @@ -220,12 +220,14 @@ pub const Compilation = struct { int_type_table: event.Locked(IntTypeTable), array_type_table: event.Locked(ArrayTypeTable), ptr_type_table: event.Locked(PtrTypeTable), + fn_type_table: event.Locked(FnTypeTable), c_int_types: [CInt.list.len]*Type.Int, const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql); const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql); const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql); + const FnTypeTable = std.HashMap(*const Type.Fn.Key, *Type.Fn, Type.Fn.Key.hash, Type.Fn.Key.eql); const TypeTable = std.HashMap([]const u8, *Type, mem.hash_slice_u8, mem.eql_slice_u8); const CompileErrList = std.ArrayList(*Msg); @@ -384,6 +386,7 @@ pub const Compilation = struct { .int_type_table = event.Locked(IntTypeTable).init(loop, IntTypeTable.init(loop.allocator)), .array_type_table = event.Locked(ArrayTypeTable).init(loop, ArrayTypeTable.init(loop.allocator)), .ptr_type_table = event.Locked(PtrTypeTable).init(loop, PtrTypeTable.init(loop.allocator)), + .fn_type_table = event.Locked(FnTypeTable).init(loop, FnTypeTable.init(loop.allocator)), .c_int_types = undefined, .meta_type = undefined, @@ -414,6 +417,7 @@ pub const Compilation = struct { comp.int_type_table.private_data.deinit(); comp.array_type_table.private_data.deinit(); comp.ptr_type_table.private_data.deinit(); + comp.fn_type_table.private_data.deinit(); comp.arena_allocator.deinit(); comp.loop.allocator.destroy(comp); } @@ -1160,10 +1164,47 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void { fn_decl.value = Decl.Fn.Val{ .Fn = fn_val }; symbol_name_consumed = true; + // Define local parameter variables + //for (size_t i = 0; i < fn_type_id->param_count; i += 1) { + // FnTypeParamInfo *param_info = &fn_type_id->param_info[i]; + // AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i); + // Buf *param_name; + // bool is_var_args = param_decl_node && param_decl_node->data.param_decl.is_var_args; + // if (param_decl_node && !is_var_args) { + // param_name = param_decl_node->data.param_decl.name; + // } else { + // param_name = buf_sprintf("arg%" ZIG_PRI_usize "", i); + // } + // if (param_name == nullptr) { + // continue; + // } + + // TypeTableEntry *param_type = param_info->type; + // bool is_noalias = param_info->is_noalias; + + // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) { + // add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter")); + // } + + // VariableTableEntry *var = add_variable(g, param_decl_node, fn_table_entry->child_scope, + // param_name, true, create_const_runtime(param_type), nullptr); + // var->src_arg_index = i; + // fn_table_entry->child_scope = var->child_scope; + // var->shadowable = var->shadowable || is_var_args; + + // if (type_has_bits(param_type)) { + // fn_table_entry->variable_list.append(var); + // } + + // if (fn_type->data.fn.gen_param_info) { + // var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index; + // } + //} + const analyzed_code = try await (async comp.genAndAnalyzeCode( &fndef_scope.base, body_node, - fn_type.return_type, + fn_type.key.data.Normal.return_type, ) catch unreachable); errdefer analyzed_code.destroy(comp.gpa()); @@ -1199,14 +1240,13 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn var params = ArrayList(Type.Fn.Param).init(comp.gpa()); var params_consumed = false; - defer if (params_consumed) { + defer if (!params_consumed) { for (params.toSliceConst()) |param| { param.typ.base.deref(comp); } params.deinit(); }; - const is_var_args = false; { var it = fn_proto.params.iterator(0); while (it.next()) |param_node_ptr| { @@ -1219,8 +1259,29 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn }); } } - const fn_type = try Type.Fn.create(comp, return_type, params.toOwnedSlice(), is_var_args); + + const key = Type.Fn.Key{ + .alignment = null, + .data = Type.Fn.Key.Data{ + .Normal = Type.Fn.Normal{ + .return_type = return_type, + .params = params.toOwnedSlice(), + .is_var_args = false, // TODO + .cc = Type.Fn.CallingConvention.Auto, // TODO + }, + }, + }; params_consumed = true; + var key_consumed = false; + defer if (!key_consumed) { + for (key.data.Normal.params) |param| { + param.typ.base.deref(comp); + } + comp.gpa().free(key.data.Normal.params); + }; + + const fn_type = try await (async Type.Fn.get(comp, key) catch unreachable); + key_consumed = true; errdefer fn_type.base.base.deref(comp); return fn_type; diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index c34f06753d..45355bbf2c 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -281,11 +281,13 @@ pub const Inst = struct { return error.SemanticAnalysisFailed; }; - if (fn_type.params.len != self.params.args.len) { + const fn_type_param_count = fn_type.paramCount(); + + if (fn_type_param_count != self.params.args.len) { try ira.addCompileError( self.base.span, "expected {} arguments, found {}", - fn_type.params.len, + fn_type_param_count, self.params.args.len, ); return error.SemanticAnalysisFailed; @@ -299,7 +301,7 @@ pub const Inst = struct { .fn_ref = fn_ref, .args = args, }); - new_inst.val = IrVal{ .KnownType = fn_type.return_type }; + new_inst.val = IrVal{ .KnownType = fn_type.key.data.Normal.return_type }; return new_inst; } diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index 217c1d50a7..3b57260447 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -221,57 +221,267 @@ pub const Type = struct { pub const Fn = struct { base: Type, - return_type: *Type, - params: []Param, - is_var_args: bool, + key: Key, + garbage_node: std.atomic.Stack(*Fn).Node, + + pub const Key = struct { + data: Data, + alignment: ?u32, + + pub const Data = union(enum) { + Generic: Generic, + Normal: Normal, + }; + + pub fn hash(self: *const Key) u32 { + var result: u32 = 0; + result +%= hashAny(self.alignment, 0); + switch (self.data) { + Data.Generic => |generic| { + result +%= hashAny(generic.param_count, 1); + switch (generic.cc) { + CallingConvention.Async => |allocator_type| result +%= hashAny(allocator_type, 2), + else => result +%= hashAny(CallingConvention(generic.cc), 3), + } + }, + Data.Normal => |normal| { + result +%= hashAny(normal.return_type, 4); + result +%= hashAny(normal.is_var_args, 5); + result +%= hashAny(normal.cc, 6); + for (normal.params) |param| { + result +%= hashAny(param.is_noalias, 7); + result +%= hashAny(param.typ, 8); + } + }, + } + return result; + } + + pub fn eql(self: *const Key, other: *const Key) bool { + if ((self.alignment == null) != (other.alignment == null)) return false; + if (self.alignment) |self_align| { + if (self_align != other.alignment.?) return false; + } + if (@TagType(Data)(self.data) != @TagType(Data)(other.data)) return false; + switch (self.data) { + Data.Generic => |*self_generic| { + const other_generic = &other.data.Generic; + if (self_generic.param_count != other_generic.param_count) return false; + if (CallingConvention(self_generic.cc) != CallingConvention(other_generic.cc)) return false; + switch (self_generic.cc) { + CallingConvention.Async => |self_allocator_type| { + const other_allocator_type = other_generic.cc.Async; + if (self_allocator_type != other_allocator_type) return false; + }, + else => {}, + } + }, + Data.Normal => |*self_normal| { + const other_normal = &other.data.Normal; + if (self_normal.cc != other_normal.cc) return false; + if (self_normal.is_var_args != other_normal.is_var_args) return false; + if (self_normal.return_type != other_normal.return_type) return false; + for (self_normal.params) |*self_param, i| { + const other_param = &other_normal.params[i]; + if (self_param.is_noalias != other_param.is_noalias) return false; + if (self_param.typ != other_param.typ) return false; + } + }, + } + return true; + } + + pub fn deref(key: Key, comp: *Compilation) void { + switch (key.data) { + Key.Data.Generic => |generic| { + switch (generic.cc) { + CallingConvention.Async => |allocator_type| allocator_type.base.deref(comp), + else => {}, + } + }, + Key.Data.Normal => |normal| { + normal.return_type.base.deref(comp); + for (normal.params) |param| { + param.typ.base.deref(comp); + } + }, + } + } + + pub fn ref(key: Key) void { + switch (key.data) { + Key.Data.Generic => |generic| { + switch (generic.cc) { + CallingConvention.Async => |allocator_type| allocator_type.base.ref(), + else => {}, + } + }, + Key.Data.Normal => |normal| { + normal.return_type.base.ref(); + for (normal.params) |param| { + param.typ.base.ref(); + } + }, + } + } + }; + + pub const Normal = struct { + params: []Param, + return_type: *Type, + is_var_args: bool, + cc: CallingConvention, + }; + + pub const Generic = struct { + param_count: usize, + cc: CC, + + pub const CC = union(CallingConvention) { + Auto, + C, + Cold, + Naked, + Stdcall, + Async: *Type, // allocator type + }; + }; + + pub const CallingConvention = enum { + Auto, + C, + Cold, + Naked, + Stdcall, + Async, + }; pub const Param = struct { is_noalias: bool, typ: *Type, }; - pub fn create(comp: *Compilation, return_type: *Type, params: []Param, is_var_args: bool) !*Fn { - const result = try comp.gpa().create(Fn{ + fn ccFnTypeStr(cc: CallingConvention) []const u8 { + return switch (cc) { + CallingConvention.Auto => "", + CallingConvention.C => "extern ", + CallingConvention.Cold => "coldcc ", + CallingConvention.Naked => "nakedcc ", + CallingConvention.Stdcall => "stdcallcc ", + CallingConvention.Async => unreachable, + }; + } + + pub fn paramCount(self: *Fn) usize { + return switch (self.key.data) { + Key.Data.Generic => |generic| generic.param_count, + Key.Data.Normal => |normal| normal.params.len, + }; + } + + /// takes ownership of key.Normal.params on success + pub async fn get(comp: *Compilation, key: Key) !*Fn { + { + const held = await (async comp.fn_type_table.acquire() catch unreachable); + defer held.release(); + + if (held.value.get(&key)) |entry| { + entry.value.base.base.ref(); + return entry.value; + } + } + + key.ref(); + errdefer key.deref(comp); + + const self = try comp.gpa().create(Fn{ .base = undefined, - .return_type = return_type, - .params = params, - .is_var_args = is_var_args, + .key = key, + .garbage_node = undefined, }); - errdefer comp.gpa().destroy(result); + errdefer comp.gpa().destroy(self); - result.base.init(comp, Id.Fn, "TODO fn type name"); + var name_buf = try std.Buffer.initSize(comp.gpa(), 0); + defer name_buf.deinit(); + + const name_stream = &std.io.BufferOutStream.init(&name_buf).stream; + + switch (key.data) { + Key.Data.Generic => |generic| { + switch (generic.cc) { + CallingConvention.Async => |async_allocator_type| { + try name_stream.print("async<{}> ", async_allocator_type.name); + }, + else => { + const cc_str = ccFnTypeStr(generic.cc); + try name_stream.write(cc_str); + }, + } + try name_stream.write("fn("); + var param_i: usize = 0; + while (param_i < generic.param_count) : (param_i += 1) { + const arg = if (param_i == 0) "var" else ", var"; + try name_stream.write(arg); + } + try name_stream.write(")"); + if (key.alignment) |alignment| { + try name_stream.print(" align<{}>", alignment); + } + try name_stream.write(" var"); + }, + Key.Data.Normal => |normal| { + const cc_str = ccFnTypeStr(normal.cc); + try name_stream.print("{}fn(", cc_str); + for (normal.params) |param, i| { + if (i != 0) try name_stream.write(", "); + if (param.is_noalias) try name_stream.write("noalias "); + try name_stream.write(param.typ.name); + } + if (normal.is_var_args) { + if (normal.params.len != 0) try name_stream.write(", "); + try name_stream.write("..."); + } + try name_stream.write(")"); + if (key.alignment) |alignment| { + try name_stream.print(" align<{}>", alignment); + } + try name_stream.print(" {}", normal.return_type.name); + }, + } + + self.base.init(comp, Id.Fn, name_buf.toOwnedSlice()); - result.return_type.base.ref(); - for (result.params) |param| { - param.typ.base.ref(); + { + const held = await (async comp.fn_type_table.acquire() catch unreachable); + defer held.release(); + + _ = try held.value.put(&self.key, self); } - return result; + return self; } pub fn destroy(self: *Fn, comp: *Compilation) void { - self.return_type.base.deref(comp); - for (self.params) |param| { - param.typ.base.deref(comp); - } + self.key.deref(comp); comp.gpa().destroy(self); } pub fn getLlvmType(self: *Fn, allocator: *Allocator, llvm_context: llvm.ContextRef) !llvm.TypeRef { - const llvm_return_type = switch (self.return_type.id) { + const normal = &self.key.data.Normal; + const llvm_return_type = switch (normal.return_type.id) { Type.Id.Void => llvm.VoidTypeInContext(llvm_context) orelse return error.OutOfMemory, - else => try self.return_type.getLlvmType(allocator, llvm_context), + else => try normal.return_type.getLlvmType(allocator, llvm_context), }; - const llvm_param_types = try allocator.alloc(llvm.TypeRef, self.params.len); + const llvm_param_types = try allocator.alloc(llvm.TypeRef, normal.params.len); defer allocator.free(llvm_param_types); for (llvm_param_types) |*llvm_param_type, i| { - llvm_param_type.* = try self.params[i].typ.getLlvmType(allocator, llvm_context); + llvm_param_type.* = try normal.params[i].typ.getLlvmType(allocator, llvm_context); } return llvm.FunctionType( llvm_return_type, llvm_param_types.ptr, @intCast(c_uint, llvm_param_types.len), - @boolToInt(self.is_var_args), + @boolToInt(normal.is_var_args), ) orelse error.OutOfMemory; } }; @@ -347,8 +557,10 @@ pub const Type = struct { is_signed: bool, pub fn hash(self: *const Key) u32 { - const rands = [2]u32{ 0xa4ba6498, 0x75fc5af7 }; - return rands[@boolToInt(self.is_signed)] *% self.bit_count; + var result: u32 = 0; + result +%= hashAny(self.is_signed, 0); + result +%= hashAny(self.bit_count, 1); + return result; } pub fn eql(self: *const Key, other: *const Key) bool { @@ -443,15 +655,16 @@ pub const Type = struct { alignment: Align, pub fn hash(self: *const Key) u32 { - const align_hash = switch (self.alignment) { + var result: u32 = 0; + result +%= switch (self.alignment) { Align.Abi => 0xf201c090, - Align.Override => |x| x, + Align.Override => |x| hashAny(x, 0), }; - return hash_usize(@ptrToInt(self.child_type)) *% - hash_enum(self.mut) *% - hash_enum(self.vol) *% - hash_enum(self.size) *% - align_hash; + result +%= hashAny(self.child_type, 1); + result +%= hashAny(self.mut, 2); + result +%= hashAny(self.vol, 3); + result +%= hashAny(self.size, 4); + return result; } pub fn eql(self: *const Key, other: *const Key) bool { @@ -605,7 +818,10 @@ pub const Type = struct { len: usize, pub fn hash(self: *const Key) u32 { - return hash_usize(@ptrToInt(self.elem_type)) *% hash_usize(self.len); + var result: u32 = 0; + result +%= hashAny(self.elem_type, 0); + result +%= hashAny(self.len, 1); + return result; } pub fn eql(self: *const Key, other: *const Key) bool { @@ -818,27 +1034,37 @@ pub const Type = struct { }; }; -fn hash_usize(x: usize) u32 { - return switch (@sizeOf(usize)) { - 4 => x, - 8 => @truncate(u32, x *% 0xad44ee2d8e3fc13d), - else => @compileError("implement this hash function"), - }; -} - -fn hash_enum(x: var) u32 { - const rands = []u32{ - 0x85ebf64f, - 0x3fcb3211, - 0x240a4e8e, - 0x40bb0e3c, - 0x78be45af, - 0x1ca98e37, - 0xec56053a, - 0x906adc48, - 0xd4fe9763, - 0x54c80dac, - }; - comptime assert(@memberCount(@typeOf(x)) < rands.len); - return rands[@enumToInt(x)]; +fn hashAny(x: var, comptime seed: u64) u32 { + switch (@typeInfo(@typeOf(x))) { + builtin.TypeId.Int => |info| { + comptime var rng = comptime std.rand.DefaultPrng.init(seed); + const unsigned_x = @bitCast(@IntType(false, info.bits), x); + if (info.bits <= 32) { + return u32(unsigned_x) *% comptime rng.random.scalar(u32); + } else { + return @truncate(u32, unsigned_x *% comptime rng.random.scalar(@typeOf(unsigned_x))); + } + }, + builtin.TypeId.Pointer => |info| { + switch (info.size) { + builtin.TypeInfo.Pointer.Size.One => return hashAny(@ptrToInt(x), seed), + builtin.TypeInfo.Pointer.Size.Many => @compileError("implement hash function"), + builtin.TypeInfo.Pointer.Size.Slice => @compileError("implement hash function"), + } + }, + builtin.TypeId.Enum => return hashAny(@enumToInt(x), seed), + builtin.TypeId.Bool => { + comptime var rng = comptime std.rand.DefaultPrng.init(seed); + const vals = comptime [2]u32{ rng.random.scalar(u32), rng.random.scalar(u32) }; + return vals[@boolToInt(x)]; + }, + builtin.TypeId.Optional => { + if (x) |non_opt| { + return hashAny(non_opt, seed); + } else { + return hashAny(u32(1), seed); + } + }, + else => @compileError("implement hash function for " ++ @typeName(@typeOf(x))), + } } diff --git a/src/analyze.cpp b/src/analyze.cpp index f399ab8305..a4bfff78c3 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -3941,7 +3941,7 @@ AstNode *get_param_decl_node(FnTableEntry *fn_entry, size_t index) { return nullptr; } -static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry, VariableTableEntry **arg_vars) { +static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entry) { TypeTableEntry *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; @@ -3979,10 +3979,6 @@ static void define_local_param_variables(CodeGen *g, FnTableEntry *fn_table_entr if (fn_type->data.fn.gen_param_info) { var->gen_arg_index = fn_type->data.fn.gen_param_info[i].gen_index; } - - if (arg_vars) { - arg_vars[i] = var; - } } } @@ -4082,7 +4078,7 @@ static void analyze_fn_body(CodeGen *g, FnTableEntry *fn_table_entry) { if (!fn_table_entry->child_scope) fn_table_entry->child_scope = &fn_table_entry->fndef_scope->base; - define_local_param_variables(g, fn_table_entry, nullptr); + define_local_param_variables(g, fn_table_entry); TypeTableEntry *fn_type = fn_table_entry->type_entry; assert(!fn_type->data.fn.is_generic); -- cgit v1.2.3 From 2cbad364c1d23b64ae064f8547590c133b4f070a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 26 Jul 2018 18:29:07 -0400 Subject: add compile error for ignoring return value of while loop bodies closes #1049 --- src/analyze.cpp | 2 +- src/ir.cpp | 12 +++++++++--- src/ir_print.cpp | 4 ++++ test/compile_errors.zig | 22 ++++++++++++++++++++++ 4 files changed, 36 insertions(+), 4 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index a4bfff78c3..aadee29fc8 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -4056,7 +4056,7 @@ void analyze_fn_ir(CodeGen *g, FnTableEntry *fn_table_entry, AstNode *return_typ } if (g->verbose_ir) { - fprintf(stderr, "{ // (analyzed)\n"); + fprintf(stderr, "fn %s() { // (analyzed)\n", buf_ptr(&fn_table_entry->symbol_name)); ir_print(g, stderr, &fn_table_entry->analyzed_executable, 4); fprintf(stderr, "}\n"); } diff --git a/src/ir.cpp b/src/ir.cpp index e40c129953..a6007852e0 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -5251,8 +5251,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, payload_scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, payload_scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); @@ -5331,8 +5333,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, child_scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, child_scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); @@ -5392,8 +5396,10 @@ static IrInstruction *ir_gen_while_expr(IrBuilder *irb, Scope *scope, AstNode *n if (body_result == irb->codegen->invalid_instruction) return body_result; - if (!instr_is_unreachable(body_result)) + if (!instr_is_unreachable(body_result)) { + ir_mark_gen(ir_build_check_statement_is_void(irb, scope, node->data.while_expr.body, body_result)); ir_mark_gen(ir_build_br(irb, scope, node, continue_block, is_comptime)); + } if (continue_expr_node) { ir_set_cursor_at_end_and_append_block(irb, continue_block); diff --git a/src/ir_print.cpp b/src/ir_print.cpp index 6182958d0a..127afa94a5 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -45,6 +45,10 @@ static void ir_print_var_instruction(IrPrint *irp, IrInstruction *instruction) { } static void ir_print_other_instruction(IrPrint *irp, IrInstruction *instruction) { + if (instruction == nullptr) { + fprintf(irp->f, "(null)"); + return; + } if (instruction->value.special != ConstValSpecialRuntime) { ir_print_const_value(irp, &instruction->value); } else { diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 83bf715f78..2c4c9208eb 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,28 @@ const tests = @import("tests.zig"); pub fn addCases(cases: *tests.CompileErrorContext) void { + cases.add( + "while loop body expression ignored", + \\fn returns() usize { + \\ return 2; + \\} + \\export fn f1() void { + \\ while (true) returns(); + \\} + \\export fn f2() void { + \\ var x: ?i32 = null; + \\ while (x) |_| returns(); + \\} + \\export fn f3() void { + \\ var x: error!i32 = error.Bad; + \\ while (x) |_| returns() else |_| unreachable; + \\} + , + ".tmp_source.zig:5:25: error: expression value is ignored", + ".tmp_source.zig:9:26: error: expression value is ignored", + ".tmp_source.zig:13:26: error: expression value is ignored", + ); + cases.add( "missing parameter name of generic function", \\fn dump(var) void {} -- cgit v1.2.3 From b3f4182ca1756ccf84fe5bbc88594a91ead617b5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 26 Jul 2018 22:26:00 -0400 Subject: coroutines have 3 more bits of atomic state --- src/all_types.hpp | 2 +- src/analyze.cpp | 13 ++++++--- src/ir.cpp | 80 ++++++++++++++++++++++++++++++++++++++----------------- 3 files changed, 66 insertions(+), 29 deletions(-) (limited to 'src/analyze.cpp') diff --git a/src/all_types.hpp b/src/all_types.hpp index bcd6a04cc3..70ea629c59 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3245,7 +3245,7 @@ static const size_t stack_trace_ptr_count = 30; #define RESULT_FIELD_NAME "result" #define ASYNC_ALLOC_FIELD_NAME "allocFn" #define ASYNC_FREE_FIELD_NAME "freeFn" -#define AWAITER_HANDLE_FIELD_NAME "awaiter_handle" +#define ATOMIC_STATE_FIELD_NAME "atomic_state" // these point to data belonging to the awaiter #define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr" #define RESULT_PTR_FIELD_NAME "result_ptr" diff --git a/src/analyze.cpp b/src/analyze.cpp index aadee29fc8..74d59f966a 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -519,11 +519,11 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) return return_type->promise_frame_parent; } - TypeTableEntry *awaiter_handle_type = get_optional_type(g, g->builtin_types.entry_promise); + TypeTableEntry *atomic_state_type = g->builtin_types.entry_usize; TypeTableEntry *result_ptr_type = get_pointer_to_type(g, return_type, false); ZigList field_names = {}; - field_names.append(AWAITER_HANDLE_FIELD_NAME); + field_names.append(ATOMIC_STATE_FIELD_NAME); field_names.append(RESULT_FIELD_NAME); field_names.append(RESULT_PTR_FIELD_NAME); if (g->have_err_ret_tracing) { @@ -533,7 +533,7 @@ TypeTableEntry *get_promise_frame_type(CodeGen *g, TypeTableEntry *return_type) } ZigList field_types = {}; - field_types.append(awaiter_handle_type); + field_types.append(atomic_state_type); field_types.append(return_type); field_types.append(result_ptr_type); if (g->have_err_ret_tracing) { @@ -6228,7 +6228,12 @@ uint32_t get_abi_alignment(CodeGen *g, TypeTableEntry *type_entry) { } else if (type_entry->id == TypeTableEntryIdOpaque) { return 1; } else { - return LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref); + uint32_t llvm_alignment = LLVMABIAlignmentOfType(g->target_data_ref, type_entry->type_ref); + // promises have at least alignment 8 so that we can have 3 extra bits when doing atomicrmw + if (type_entry->id == TypeTableEntryIdPromise && llvm_alignment < 8) { + return 8; + } + return llvm_alignment; } } diff --git a/src/ir.cpp b/src/ir.cpp index a6007852e0..5466e64e55 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -3097,19 +3097,47 @@ static IrInstruction *ir_gen_async_return(IrBuilder *irb, Scope *scope, AstNode return return_inst; } + IrBasicBlock *canceled_block = ir_create_basic_block(irb, scope, "Canceled"); + IrBasicBlock *not_canceled_block = ir_create_basic_block(irb, scope, "NotCanceled"); + IrBasicBlock *suspended_block = ir_create_basic_block(irb, scope, "Suspended"); + IrBasicBlock *not_suspended_block = ir_create_basic_block(irb, scope, "NotSuspended"); + ir_build_store_ptr(irb, scope, node, irb->exec->coro_result_field_ptr, return_value); - IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, - get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - // TODO replace replacement_value with @intToPtr(?promise, 0x1) when it doesn't crash zig - IrInstruction *replacement_value = irb->exec->coro_handle; - IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, scope, node, - promise_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, - AtomicRmwOp_xchg, AtomicOrderSeqCst); - ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, maybe_await_handle); - IrInstruction *is_non_null = ir_build_test_nonnull(irb, scope, node, maybe_await_handle); + IrInstruction *usize_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *replacement_value = ir_build_const_usize(irb, scope, node, 0xa); // 0b1010 + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, scope, node, + usize_type_val, irb->exec->coro_awaiter_field_ptr, nullptr, replacement_value, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); IrInstruction *is_comptime = ir_build_const_bool(irb, scope, node, false); - return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, irb->exec->coro_early_final, - is_comptime); + IrInstruction *is_canceled_mask = ir_build_const_usize(irb, scope, node, 0x1); // 0b001 + IrInstruction *is_canceled_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, is_canceled_mask, false); + IrInstruction *is_canceled_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_canceled_value, zero, false); + ir_build_cond_br(irb, scope, node, is_canceled_bool, canceled_block, not_canceled_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, canceled_block); + ir_mark_gen(ir_build_br(irb, scope, node, irb->exec->coro_final_cleanup_block, is_comptime)); + + ir_set_cursor_at_end_and_append_block(irb, not_canceled_block); + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, scope, node, 0x7); // 0b111 + IrInstruction *is_suspended_value = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, inverted_ptr_mask, false); + IrInstruction *is_suspended_bool = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, is_suspended_value, zero, false); + ir_build_cond_br(irb, scope, node, is_suspended_bool, suspended_block, not_suspended_block, is_comptime); + + ir_set_cursor_at_end_and_append_block(irb, suspended_block); + ir_build_unreachable(irb, scope, node); + + ir_set_cursor_at_end_and_append_block(irb, not_suspended_block); + IrInstruction *ptr_mask = ir_build_un_op(irb, scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + IrInstruction *await_handle_addr = ir_build_bin_op(irb, scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + IrInstruction *promise_type_val = ir_build_const_type(irb, scope, node, irb->codegen->builtin_types.entry_promise); + // if we ever add null checking safety to the ptrtoint instruction, it needs to be disabled here + IrInstruction *await_handle = ir_build_int_to_ptr(irb, scope, node, promise_type_val, await_handle_addr); + ir_build_store_ptr(irb, scope, node, irb->exec->await_handle_var_ptr, await_handle); + IrInstruction *is_non_null = ir_build_bin_op(irb, scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); + return ir_build_cond_br(irb, scope, node, is_non_null, irb->exec->coro_normal_final, + irb->exec->coro_early_final, is_comptime); // the above blocks are rendered by ir_gen after the rest of codegen } @@ -6708,9 +6736,9 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast ir_build_store_ptr(irb, parent_scope, node, err_ret_trace_ptr_field_ptr, err_ret_trace_ptr); } - Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); - IrInstruction *awaiter_field_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, - awaiter_handle_field_name); + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); + IrInstruction *atomic_state_ptr = ir_build_field_ptr(irb, parent_scope, node, coro_promise_ptr, + atomic_state_field_name); IrInstruction *const_bool_false = ir_build_const_bool(irb, parent_scope, node, false); VariableTableEntry *result_var = ir_create_var(irb, node, parent_scope, nullptr, @@ -6723,12 +6751,16 @@ static IrInstruction *ir_gen_await_expr(IrBuilder *irb, Scope *parent_scope, Ast IrInstruction *my_result_var_ptr = ir_build_var_ptr(irb, parent_scope, node, result_var); ir_build_store_ptr(irb, parent_scope, node, result_ptr_field_ptr, my_result_var_ptr); IrInstruction *save_token = ir_build_coro_save(irb, parent_scope, node, irb->exec->coro_handle); - IrInstruction *promise_type_val = ir_build_const_type(irb, parent_scope, node, - get_optional_type(irb->codegen, irb->codegen->builtin_types.entry_promise)); - IrInstruction *maybe_await_handle = ir_build_atomic_rmw(irb, parent_scope, node, - promise_type_val, awaiter_field_ptr, nullptr, irb->exec->coro_handle, nullptr, - AtomicRmwOp_xchg, AtomicOrderSeqCst); - IrInstruction *is_non_null = ir_build_test_nonnull(irb, parent_scope, node, maybe_await_handle); + IrInstruction *usize_type_val = ir_build_const_type(irb, parent_scope, node, irb->codegen->builtin_types.entry_usize); + IrInstruction *coro_handle_addr = ir_build_ptr_to_int(irb, parent_scope, node, irb->exec->coro_handle); + IrInstruction *prev_atomic_value = ir_build_atomic_rmw(irb, parent_scope, node, + usize_type_val, atomic_state_ptr, nullptr, coro_handle_addr, nullptr, + AtomicRmwOp_or, AtomicOrderSeqCst); + IrInstruction *zero = ir_build_const_usize(irb, parent_scope, node, 0); + IrInstruction *inverted_ptr_mask = ir_build_const_usize(irb, parent_scope, node, 0x7); // 0b111 + IrInstruction *ptr_mask = ir_build_un_op(irb, parent_scope, node, IrUnOpBinNot, inverted_ptr_mask); // 0b111...000 + IrInstruction *await_handle_addr = ir_build_bin_op(irb, parent_scope, node, IrBinOpBinAnd, prev_atomic_value, ptr_mask, false); + IrInstruction *is_non_null = ir_build_bin_op(irb, parent_scope, node, IrBinOpCmpNotEq, await_handle_addr, zero, false); IrBasicBlock *yes_suspend_block = ir_create_basic_block(irb, parent_scope, "YesSuspend"); IrBasicBlock *no_suspend_block = ir_create_basic_block(irb, parent_scope, "NoSuspend"); IrBasicBlock *merge_block = ir_create_basic_block(irb, parent_scope, "MergeSuspend"); @@ -7087,10 +7119,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction *coro_mem_ptr = ir_build_ptr_cast(irb, coro_scope, node, u8_ptr_type, maybe_coro_mem_ptr); irb->exec->coro_handle = ir_build_coro_begin(irb, coro_scope, node, coro_id, coro_mem_ptr); - Buf *awaiter_handle_field_name = buf_create_from_str(AWAITER_HANDLE_FIELD_NAME); + Buf *atomic_state_field_name = buf_create_from_str(ATOMIC_STATE_FIELD_NAME); irb->exec->coro_awaiter_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, - awaiter_handle_field_name); - ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, null_value); + atomic_state_field_name); + IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); + ir_build_store_ptr(irb, scope, node, irb->exec->coro_awaiter_field_ptr, zero); Buf *result_field_name = buf_create_from_str(RESULT_FIELD_NAME); irb->exec->coro_result_field_ptr = ir_build_field_ptr(irb, scope, node, coro_promise_ptr, result_field_name); result_ptr_field_name = buf_create_from_str(RESULT_PTR_FIELD_NAME); @@ -7108,7 +7141,6 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec // coordinate with builtin.zig Buf *index_name = buf_create_from_str("index"); IrInstruction *index_ptr = ir_build_field_ptr(irb, scope, node, err_ret_trace_ptr, index_name); - IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0); ir_build_store_ptr(irb, scope, node, index_ptr, zero); Buf *instruction_addresses_name = buf_create_from_str("instruction_addresses"); -- cgit v1.2.3 From 02c5bda704d30e95e6af23804f9a552e9d8ca2d7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 27 Jul 2018 17:27:03 -0400 Subject: remove ability to break from suspend blocks closes #803 --- doc/langref.html.in | 2 +- src/all_types.hpp | 2 -- src/analyze.cpp | 1 - src/ir.cpp | 17 ++--------------- src/parser.cpp | 25 ++----------------------- 5 files changed, 5 insertions(+), 42 deletions(-) (limited to 'src/analyze.cpp') diff --git a/doc/langref.html.in b/doc/langref.html.in index 60ba09d391..d91fb6e8fb 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -7336,7 +7336,7 @@ Defer(body) = ("defer" | "deferror") body IfExpression(body) = "if" "(" Expression ")" body option("else" BlockExpression(body)) -SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body)) +SuspendExpression(body) = "suspend" option(("|" Symbol "|" body)) IfErrorExpression(body) = "if" "(" Expression ")" option("|" option("*") Symbol "|") body "else" "|" Symbol "|" BlockExpression(body) diff --git a/src/all_types.hpp b/src/all_types.hpp index 3ac7afe474..2f09e70301 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -898,7 +898,6 @@ struct AstNodeAwaitExpr { }; struct AstNodeSuspend { - Buf *name; AstNode *block; AstNode *promise_symbol; }; @@ -1929,7 +1928,6 @@ struct ScopeLoop { struct ScopeSuspend { Scope base; - Buf *name; IrBasicBlock *resume_block; bool reported_err; }; diff --git a/src/analyze.cpp b/src/analyze.cpp index 74d59f966a..03cfa5b67b 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -161,7 +161,6 @@ ScopeSuspend *create_suspend_scope(AstNode *node, Scope *parent) { assert(node->type == NodeTypeSuspend); ScopeSuspend *scope = allocate(1); init_scope(&scope->base, ScopeIdSuspend, node, parent); - scope->name = node->data.suspend.name; return scope; } diff --git a/src/ir.cpp b/src/ir.cpp index cd791fb189..799d7e3bc5 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6186,15 +6186,6 @@ static IrInstruction *ir_gen_return_from_block(IrBuilder *irb, Scope *break_scop return ir_build_br(irb, break_scope, node, dest_block, is_comptime); } -static IrInstruction *ir_gen_break_from_suspend(IrBuilder *irb, Scope *break_scope, AstNode *node, ScopeSuspend *suspend_scope) { - IrInstruction *is_comptime = ir_build_const_bool(irb, break_scope, node, false); - - IrBasicBlock *dest_block = suspend_scope->resume_block; - ir_gen_defers_for_block(irb, break_scope, dest_block->scope, false); - - return ir_build_br(irb, break_scope, node, dest_block, is_comptime); -} - static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode *node) { assert(node->type == NodeTypeBreak); @@ -6235,12 +6226,8 @@ static IrInstruction *ir_gen_break(IrBuilder *irb, Scope *break_scope, AstNode * return ir_gen_return_from_block(irb, break_scope, node, this_block_scope); } } else if (search_scope->id == ScopeIdSuspend) { - ScopeSuspend *this_suspend_scope = (ScopeSuspend *)search_scope; - if (node->data.break_expr.name != nullptr && - (this_suspend_scope->name != nullptr && buf_eql_buf(node->data.break_expr.name, this_suspend_scope->name))) - { - return ir_gen_break_from_suspend(irb, break_scope, node, this_suspend_scope); - } + add_node_error(irb->codegen, node, buf_sprintf("cannot break out of suspend block")); + return irb->codegen->invalid_instruction; } search_scope = search_scope->parent; } diff --git a/src/parser.cpp b/src/parser.cpp index adb1633f5d..a93d8de830 100644 --- a/src/parser.cpp +++ b/src/parser.cpp @@ -648,30 +648,12 @@ static AstNode *ast_parse_asm_expr(ParseContext *pc, size_t *token_index, bool m } /* -SuspendExpression(body) = option(Symbol ":") "suspend" option(("|" Symbol "|" body)) +SuspendExpression(body) = "suspend" option(("|" Symbol "|" body)) */ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, bool mandatory) { size_t orig_token_index = *token_index; - Token *name_token = nullptr; - Token *token = &pc->tokens->at(*token_index); - if (token->id == TokenIdSymbol) { - *token_index += 1; - Token *colon_token = &pc->tokens->at(*token_index); - if (colon_token->id == TokenIdColon) { - *token_index += 1; - name_token = token; - token = &pc->tokens->at(*token_index); - } else if (mandatory) { - ast_expect_token(pc, colon_token, TokenIdColon); - zig_unreachable(); - } else { - *token_index = orig_token_index; - return nullptr; - } - } - - Token *suspend_token = token; + Token *suspend_token = &pc->tokens->at(*token_index); if (suspend_token->id == TokenIdKeywordSuspend) { *token_index += 1; } else if (mandatory) { @@ -693,9 +675,6 @@ static AstNode *ast_parse_suspend_block(ParseContext *pc, size_t *token_index, b } AstNode *node = ast_create_node(pc, NodeTypeSuspend, suspend_token); - if (name_token != nullptr) { - node->data.suspend.name = token_buf(name_token); - } node->data.suspend.promise_symbol = ast_parse_symbol(pc, token_index); ast_eat_token(pc, token_index, TokenIdBinOr); node->data.suspend.block = ast_parse_block(pc, token_index, true); -- cgit v1.2.3