From 3178528335cb5efbf237cecb9ea9eb3bfa31b21f Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 14:05:08 +0200 Subject: Removed zero sized error set optimization fixes #762 fixes #818 --- src/ir.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'src/ir.cpp') diff --git a/src/ir.cpp b/src/ir.cpp index 86c77758b2..ec7f41d748 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -6166,16 +6166,10 @@ static IrInstruction *ir_gen_err_set_decl(IrBuilder *irb, Scope *parent_scope, A buf_init_from_buf(&err_set_type->name, type_name); err_set_type->is_copyable = true; err_set_type->data.error_set.err_count = err_count; - - if (err_count == 0) { - err_set_type->zero_bits = true; - err_set_type->di_type = irb->codegen->builtin_types.entry_void->di_type; - } else { - err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref; - err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type; - irb->codegen->error_di_types.append(&err_set_type->di_type); - err_set_type->data.error_set.errors = allocate(err_count); - } + err_set_type->type_ref = irb->codegen->builtin_types.entry_global_error_set->type_ref; + err_set_type->di_type = irb->codegen->builtin_types.entry_global_error_set->di_type; + irb->codegen->error_di_types.append(&err_set_type->di_type); + err_set_type->data.error_set.errors = allocate(err_count); ErrorTableEntry **errors = allocate(irb->codegen->errors_by_index.length + err_count); -- cgit v1.2.3 From 2fc34eaa581cc31827e978fbd973bf36d2c647e2 Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 16:27:31 +0200 Subject: Functions with infered error set can now return literals fixes #852 --- src/analyze.cpp | 1 - src/ir.cpp | 36 +++++++++++++++++++----------------- test/cases/error.zig | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 58 insertions(+), 18 deletions(-) (limited to 'src/ir.cpp') diff --git a/src/analyze.cpp b/src/analyze.cpp index a598d7676e..11715220c7 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -6131,4 +6131,3 @@ bool type_can_fail(TypeTableEntry *type_entry) { bool fn_type_can_fail(FnTypeId *fn_type_id) { return type_can_fail(fn_type_id->return_type) || fn_type_id->cc == CallingConventionAsync; } - diff --git a/src/ir.cpp b/src/ir.cpp index ec7f41d748..d8156b214e 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -8111,7 +8111,7 @@ static void update_errors_helper(CodeGen *g, ErrorTableEntry ***errors, size_t * *errors = reallocate(*errors, old_errors_count, *errors_count); } -static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, IrInstruction **instructions, size_t instruction_count) { +static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_node, TypeTableEntry *expected_type, IrInstruction **instructions, size_t instruction_count) { assert(instruction_count >= 1); IrInstruction *prev_inst = instructions[0]; if (type_is_invalid(prev_inst->value.type)) { @@ -8158,16 +8158,6 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod continue; } - if (prev_type->id == TypeTableEntryIdNullLit) { - prev_inst = cur_inst; - continue; - } - - if (cur_type->id == TypeTableEntryIdNullLit) { - any_are_null = true; - continue; - } - if (prev_type->id == TypeTableEntryIdErrorSet) { assert(err_set_type != nullptr); if (cur_type->id == TypeTableEntryIdErrorSet) { @@ -8427,6 +8417,16 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } } + if (prev_type->id == TypeTableEntryIdNullLit) { + prev_inst = cur_inst; + continue; + } + + if (cur_type->id == TypeTableEntryIdNullLit) { + any_are_null = true; + continue; + } + if (types_match_const_cast_only(ira, prev_type, cur_type, source_node).id == ConstCastResultIdOk) { continue; } @@ -8610,6 +8610,10 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod } else if (err_set_type != nullptr) { if (prev_inst->value.type->id == TypeTableEntryIdErrorSet) { return err_set_type; + } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); + } else if (expected_type != nullptr && expected_type->id == TypeTableEntryIdErrorUnion) { + return get_error_union_type(ira->codegen, err_set_type, expected_type->data.error_union.payload_type); } else { if (prev_inst->value.type->id == TypeTableEntryIdNumLitInt || prev_inst->value.type->id == TypeTableEntryIdNumLitFloat) @@ -8621,8 +8625,6 @@ static TypeTableEntry *ir_resolve_peer_types(IrAnalyze *ira, AstNode *source_nod ir_add_error_node(ira, source_node, buf_sprintf("unable to make error union out of null literal")); return ira->codegen->builtin_types.entry_invalid; - } else if (prev_inst->value.type->id == TypeTableEntryIdErrorUnion) { - return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type->data.error_union.payload_type); } else { return get_error_union_type(ira->codegen, err_set_type, prev_inst->value.type); } @@ -10645,7 +10647,7 @@ static TypeTableEntry *ir_analyze_bin_op_cmp(IrAnalyze *ira, IrInstructionBinOp } IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; type_ensure_zero_bits_known(ira->codegen, resolved_type); @@ -11035,7 +11037,7 @@ static TypeTableEntry *ir_analyze_bin_op_math(IrAnalyze *ira, IrInstructionBinOp IrInstruction *op1 = bin_op_instruction->op1->other; IrInstruction *op2 = bin_op_instruction->op2->other; IrInstruction *instructions[] = {op1, op2}; - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, instructions, 2); + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, bin_op_instruction->base.source_node, nullptr, instructions, 2); if (type_is_invalid(resolved_type)) return resolved_type; IrBinOp op_id = bin_op_instruction->op_id; @@ -13004,7 +13006,7 @@ static TypeTableEntry *ir_analyze_instruction_phi(IrAnalyze *ira, IrInstructionP return first_value->value.type; } - TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, + TypeTableEntry *resolved_type = ir_resolve_peer_types(ira, phi_instruction->base.source_node, nullptr, new_incoming_values.items, new_incoming_values.length); if (type_is_invalid(resolved_type)) return resolved_type; @@ -18696,7 +18698,7 @@ TypeTableEntry *ir_analyze(CodeGen *codegen, IrExecutable *old_exec, IrExecutabl } else if (ira->src_implicit_return_type_list.length == 0) { return codegen->builtin_types.entry_unreachable; } else { - return ir_resolve_peer_types(ira, expected_type_source_node, ira->src_implicit_return_type_list.items, + return ir_resolve_peer_types(ira, expected_type_source_node, expected_type, ira->src_implicit_return_type_list.items, ira->src_implicit_return_type_list.length); } } diff --git a/test/cases/error.zig b/test/cases/error.zig index c64c835fc4..2a1433df5b 100644 --- a/test/cases/error.zig +++ b/test/cases/error.zig @@ -202,3 +202,42 @@ const Error = error{}; fn foo3(b: usize) Error!usize { return b; } + + +test "error: Infer error set from literals" { + _ = nullLiteral("n") catch |err| handleErrors(err); + _ = floatLiteral("n") catch |err| handleErrors(err); + _ = intLiteral("n") catch |err| handleErrors(err); + _ = comptime nullLiteral("n") catch |err| handleErrors(err); + _ = comptime floatLiteral("n") catch |err| handleErrors(err); + _ = comptime intLiteral("n") catch |err| handleErrors(err); +} + +fn handleErrors(err: var) noreturn { + switch (err) { + error.T => {} + } + + unreachable; +} + +fn nullLiteral(str: []const u8) !?i64 { + if (str[0] == 'n') + return null; + + return error.T; +} + +fn floatLiteral(str: []const u8) !?f64 { + if (str[0] == 'n') + return 1.0; + + return error.T; +} + +fn intLiteral(str: []const u8) !?i64 { + if (str[0] == 'n') + return 1; + + return error.T; +} -- cgit v1.2.3 From fba0347ec43fb5c06b5ac9bec541b740d95194fe Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 17:17:48 +0200 Subject: .ReturnType and @ArgType now emits errors on unresolved types related: #846 --- src/ir.cpp | 19 +++++++++++++++++++ test/compile_errors.zig | 17 +++++++++++++++++ 2 files changed, 36 insertions(+) (limited to 'src/ir.cpp') diff --git a/src/ir.cpp b/src/ir.cpp index d8156b214e..641b8fc30c 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -13859,6 +13859,15 @@ static TypeTableEntry *ir_analyze_instruction_field_ptr(IrAnalyze *ira, IrInstru } } else if (child_type->id == TypeTableEntryIdFn) { if (buf_eql_str(field_name, "ReturnType")) { + if (child_type->data.fn.fn_type_id.return_type == nullptr) { + // Return type can only ever be null, if the function is generic + assert(child_type->data.fn.is_generic); + + ir_add_error(ira, &field_ptr_instruction->base, + buf_sprintf("ReturnType has not been resolved because '%s' is generic", buf_ptr(&child_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + bool ptr_is_const = true; bool ptr_is_volatile = false; return ir_analyze_const_ptr(ira, &field_ptr_instruction->base, @@ -17860,6 +17869,16 @@ static TypeTableEntry *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstruc ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); out_val->data.x_type = fn_type_id->param_info[arg_index].type; + if (out_val->data.x_type == nullptr) { + // Args are only unresolved if our function is generic. + assert(fn_type->data.fn.is_generic); + + ir_add_error(ira, arg_index_inst, + buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_usize " because '%s' is generic", + arg_index, buf_ptr(&fn_type->name))); + return ira->codegen->builtin_types.entry_invalid; + } + return ira->codegen->builtin_types.entry_type; } diff --git a/test/compile_errors.zig b/test/compile_errors.zig index f8febc27b8..52e063eb39 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -3209,4 +3209,21 @@ pub fn addCases(cases: &tests.CompileErrorContext) void { \\} , ".tmp_source.zig:5:42: error: zero-bit field 'val' in struct 'Empty' has no offset"); + + cases.add("getting return type of generic function", + \\fn generic(a: var) void {} + \\comptime { + \\ _ = @typeOf(generic).ReturnType; + \\} + , + ".tmp_source.zig:3:25: error: ReturnType has not been resolved because 'fn(var)var' is generic"); + + cases.add("getting @ArgType of generic function", + \\fn generic(a: var) void {} + \\comptime { + \\ _ = @ArgType(@typeOf(generic), 0); + \\} + , + ".tmp_source.zig:3:36: error: @ArgType could not resolve the type of arg 0 because 'fn(var)var' is generic"); + } -- cgit v1.2.3 From 341f8c1e8680aa3cfbeba6833f85df00355a95ef Mon Sep 17 00:00:00 2001 From: Jimmi Holst Christensen Date: Sat, 28 Apr 2018 17:57:47 +0200 Subject: Fixed wrong formatting for arg_index when reporting @ArgType error --- src/ir.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src/ir.cpp') diff --git a/src/ir.cpp b/src/ir.cpp index 641b8fc30c..4bf8240472 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -17874,8 +17874,8 @@ static TypeTableEntry *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstruc assert(fn_type->data.fn.is_generic); ir_add_error(ira, arg_index_inst, - buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_usize " because '%s' is generic", - arg_index, buf_ptr(&fn_type->name))); + buf_sprintf("@ArgType could not resolve the type of arg %" ZIG_PRI_u64 " because '%s' is generic", + arg_index, buf_ptr(&fn_type->name))); return ira->codegen->builtin_types.entry_invalid; } -- cgit v1.2.3 From 96ecb402590df7a02526009f1630f27e14a0e77c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 28 Apr 2018 17:53:06 -0400 Subject: add fuzz tests for std.atomic.Stack --- src/ir.cpp | 5 +++ std/atomic/stack.zig | 87 +++++++++++++++++++++++++++++++++++++++++++++++++--- std/heap.zig | 64 +++++++++++++++++++++++++++++++++----- 3 files changed, 143 insertions(+), 13 deletions(-) (limited to 'src/ir.cpp') diff --git a/src/ir.cpp b/src/ir.cpp index 4bf8240472..469900bf07 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -18184,6 +18184,11 @@ static TypeTableEntry *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstr } else { if (!ir_resolve_atomic_order(ira, instruction->ordering->other, &ordering)) return ira->codegen->builtin_types.entry_invalid; + if (ordering == AtomicOrderUnordered) { + ir_add_error(ira, instruction->ordering, + buf_sprintf("@atomicRmw atomic ordering must not be Unordered")); + return ira->codegen->builtin_types.entry_invalid; + } } if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value.data.x_ptr.mut == ConstPtrMutComptimeVar) diff --git a/std/atomic/stack.zig b/std/atomic/stack.zig index 4ceecb7b1d..a1e686155c 100644 --- a/std/atomic/stack.zig +++ b/std/atomic/stack.zig @@ -1,3 +1,6 @@ +const builtin = @import("builtin"); +const AtomicOrder = builtin.AtomicOrder; + /// Many reader, many writer, non-allocating, thread-safe, lock-free pub fn Stack(comptime T: type) type { return struct { @@ -20,26 +23,100 @@ pub fn Stack(comptime T: type) type { /// being the first item in the stack, returns the other item that was there. pub fn pushFirst(self: &Self, node: &Node) ?&Node { node.next = null; - return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.AcqRel, AtomicOrder.AcqRel); + return @cmpxchgStrong(?&Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst); } pub fn push(self: &Self, node: &Node) void { - var root = @atomicLoad(?&Node, &self.root, AtomicOrder.Acquire); + var root = @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst); while (true) { node.next = root; - root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.Release, AtomicOrder.Acquire) ?? break; + root = @cmpxchgWeak(?&Node, &self.root, root, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? break; } } pub fn pop(self: &Self) ?&Node { var root = @atomicLoad(?&Node, &self.root, AtomicOrder.Acquire); while (true) { - root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.Release, AtomicOrder.Acquire) ?? return root; + root = @cmpxchgWeak(?&Node, &self.root, root, (root ?? return null).next, AtomicOrder.SeqCst, AtomicOrder.SeqCst) ?? return root; } } pub fn isEmpty(self: &Self) bool { - return @atomicLoad(?&Node, &self.root, AtomicOrder.Relaxed) == null; + return @atomicLoad(?&Node, &self.root, AtomicOrder.SeqCst) == null; } }; } + +const std = @import("std"); +const Context = struct { + allocator: &std.mem.Allocator, + stack: &Stack(i32), + put_sum: isize, + get_sum: isize, + puts_done: u8, // TODO make this a bool +}; +const puts_per_thread = 1000; +const put_thread_count = 3; + +test "std.atomic.stack" { + var direct_allocator = std.heap.DirectAllocator.init(); + defer direct_allocator.deinit(); + + var plenty_of_memory = try direct_allocator.allocator.alloc(u8, 64 * 1024 * 1024); + defer direct_allocator.allocator.free(plenty_of_memory); + + var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory); + var a = &fixed_buffer_allocator.allocator; + + var stack = Stack(i32).init(); + var context = Context { + .allocator = a, + .stack = &stack, + .put_sum = 0, + .get_sum = 0, + .puts_done = 0, + }; + + var putters: [put_thread_count]&std.os.Thread = undefined; + for (putters) |*t| { + *t = try std.os.spawnThreadAllocator(a, &context, startPuts); + } + var getters: [put_thread_count]&std.os.Thread = undefined; + for (getters) |*t| { + *t = try std.os.spawnThreadAllocator(a, &context, startGets); + } + + for (putters) |t| t.wait(); + _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst); + for (getters) |t| t.wait(); + + std.debug.assert(context.put_sum == context.get_sum); +} + +fn startPuts(ctx: &Context) u8 { + var put_count: usize = puts_per_thread; + var r = std.rand.DefaultPrng.init(0xdeadbeef); + while (put_count != 0) : (put_count -= 1) { + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz + const x = @bitCast(i32, r.random.scalar(u32)); + const node = ctx.allocator.create(Stack(i32).Node) catch unreachable; + node.data = x; + ctx.stack.push(node); + _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + } + return 0; +} + +fn startGets(ctx: &Context) u8 { + while (true) { + while (ctx.stack.pop()) |node| { + std.os.time.sleep(0, 1); // let the os scheduler be our fuzz + _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); + } + + if (@atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1) { + break; + } + } + return 0; +} diff --git a/std/heap.zig b/std/heap.zig index b3a1e6bf27..d632b44cd1 100644 --- a/std/heap.zig +++ b/std/heap.zig @@ -47,13 +47,6 @@ pub const DirectAllocator = struct { const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void; - //pub const canary_bytes = []u8 {48, 239, 128, 46, 18, 49, 147, 9, 195, 59, 203, 3, 245, 54, 9, 122}; - //pub const want_safety = switch (builtin.mode) { - // builtin.Mode.Debug => true, - // builtin.Mode.ReleaseSafe => true, - // else => false, - //}; - pub fn init() DirectAllocator { return DirectAllocator { .allocator = Allocator { @@ -298,7 +291,7 @@ pub const FixedBufferAllocator = struct { fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); - const addr = @ptrToInt(&self.buffer[self.end_index]); + const addr = @ptrToInt(self.buffer.ptr) + self.end_index; const rem = @rem(addr, alignment); const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); const adjusted_index = self.end_index + march_forward_bytes; @@ -325,6 +318,54 @@ pub const FixedBufferAllocator = struct { fn free(allocator: &Allocator, bytes: []u8) void { } }; +/// lock free +pub const ThreadSafeFixedBufferAllocator = struct { + allocator: Allocator, + end_index: usize, + buffer: []u8, + + pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { + return ThreadSafeFixedBufferAllocator { + .allocator = Allocator { + .allocFn = alloc, + .reallocFn = realloc, + .freeFn = free, + }, + .buffer = buffer, + .end_index = 0, + }; + } + + fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 { + const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator); + var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst); + while (true) { + const addr = @ptrToInt(self.buffer.ptr) + end_index; + const rem = @rem(addr, alignment); + const march_forward_bytes = if (rem == 0) 0 else (alignment - rem); + const adjusted_index = end_index + march_forward_bytes; + const new_end_index = adjusted_index + n; + if (new_end_index > self.buffer.len) { + return error.OutOfMemory; + } + end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, + builtin.AtomicOrder.SeqCst, builtin.AtomicOrder.SeqCst) ?? return self.buffer[adjusted_index .. new_end_index]; + } + } + + fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 { + if (new_size <= old_mem.len) { + return old_mem[0..new_size]; + } else { + const result = try alloc(allocator, new_size, alignment); + mem.copy(u8, result, old_mem); + return result; + } + } + + fn free(allocator: &Allocator, bytes: []u8) void { } +}; + test "c_allocator" { @@ -363,6 +404,13 @@ test "FixedBufferAllocator" { try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); } +test "ThreadSafeFixedBufferAllocator" { + var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]); + + try testAllocator(&fixed_buffer_allocator.allocator); + try testAllocatorLargeAlignment(&fixed_buffer_allocator.allocator); +} + fn testAllocator(allocator: &mem.Allocator) !void { var slice = try allocator.alloc(&i32, 100); -- cgit v1.2.3