From 19691c0b174f283ffe5b6c3fe8533ef458736064 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 15 Sep 2021 12:37:32 -0700 Subject: stage2: implement `@fence` --- src/codegen/c.zig | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'src/codegen/c.zig') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index ff49b18f7b..a2e2d7b20d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -842,6 +842,7 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM .breakpoint => try airBreakpoint(o), .unreach => try airUnreach(o), + .fence => try airFence(o, inst), // TODO use a different strategy for add that communicates to the optimizer // that wrapping is UB. @@ -1439,6 +1440,17 @@ fn airBreakpoint(o: *Object) !CValue { return CValue.none; } +fn airFence(o: *Object, inst: Air.Inst.Index) !CValue { + const atomic_order = o.air.instructions.items(.data)[inst].fence; + const writer = o.writer(); + + try writer.writeAll("zig_fence("); + try writeMemoryOrder(writer, atomic_order); + try writer.writeAll(");\n"); + + return CValue.none; +} + fn airUnreach(o: *Object) !CValue { try o.writer().writeAll("zig_unreachable();\n"); return CValue.none; -- cgit v1.2.3 From b67d1810be3234c363ee2929ffcc91083bfb0ae5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 15 Sep 2021 18:55:39 -0700 Subject: stage2: implement `@atomicRmw` and `@atomicLoad` * langref: add some more "see also" links for atomics * Add the following AIR instructions - atomic_load - atomic_store_unordered - atomic_store_monotonic - atomic_store_release - atomic_store_seq_cst - atomic_rmw * Implement those AIR instructions in LLVM and C backends. * AstGen: make the `ty` result locations for `@atomicRmw`, `@atomicLoad`, and `@atomicStore` be `coerced_ty` to avoid unnecessary ZIR instructions when Sema will be doing the coercions redundantly. * Sema for `@atomicLoad` and `@atomicRmw` is done, however Sema for `@atomicStore` is not yet implemented. - comptime eval for `@atomicRmw` is not yet implemented. * Sema: flesh out `coerceInMemoryAllowed` a little bit more. It can now handle pointers. --- doc/langref.html.in | 12 ++- src/Air.zig | 60 +++++++++++- src/AstGen.zig | 39 ++++---- src/Liveness.zig | 13 +++ src/Sema.zig | 192 +++++++++++++++++++++++++++++++++++++-- src/codegen.zig | 23 +++++ src/codegen/c.zig | 83 ++++++++++++++++- src/codegen/llvm.zig | 160 ++++++++++++++++++++++++++++++-- src/codegen/llvm/bindings.zig | 63 +++++++++++++ src/link/C/zig.h | 57 ++++++++++-- src/print_air.zig | 36 ++++++++ test/behavior/atomics.zig | 23 +++++ test/behavior/atomics_stage1.zig | 23 ----- 13 files changed, 708 insertions(+), 76 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/doc/langref.html.in b/doc/langref.html.in index 9a3eef2390..3f33123372 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -7216,7 +7216,9 @@ fn func(y: *i32) void { {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

+ {#see_also|@atomicStore|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#} {#header_close#} + {#header_open|@atomicRmw#}
{#syntax#}@atomicRmw(comptime T: type, ptr: *T, comptime op: builtin.AtomicRmwOp, operand: T, comptime ordering: builtin.AtomicOrder) T{#endsyntax#}

@@ -7242,7 +7244,9 @@ fn func(y: *i32) void {

  • {#syntax#}.Max{#endsyntax#} - stores the operand if it is larger. Supports integers and floats.
  • {#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.
  • + {#see_also|@atomicStore|@atomicLoad|@fence|@cmpxchgWeak|@cmpxchgStrong#} {#header_close#} + {#header_open|@atomicStore#}
    {#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}

    @@ -7252,6 +7256,7 @@ fn func(y: *i32) void { {#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float, an integer or an enum.

    + {#see_also|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak|@cmpxchgStrong#} {#header_close#} {#header_open|@bitCast#} @@ -7540,8 +7545,9 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v an integer or an enum.

    {#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

    - {#see_also|Compile Variables|cmpxchgWeak#} + {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgWeak#} {#header_close#} + {#header_open|@cmpxchgWeak#}
    {#syntax#}@cmpxchgWeak(comptime T: type, ptr: *T, expected_value: T, new_value: T, success_order: AtomicOrder, fail_order: AtomicOrder) ?T{#endsyntax#}

    @@ -7569,7 +7575,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val an integer or an enum.

    {#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}

    - {#see_also|Compile Variables|cmpxchgStrong#} + {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@fence|@cmpxchgStrong#} {#header_close#} {#header_open|@compileError#} @@ -7849,7 +7855,7 @@ export fn @"A function name that is a complete sentence."() void {}

    {#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("std").builtin.AtomicOrder{#endsyntax#}.

    - {#see_also|Compile Variables#} + {#see_also|@atomicStore|@atomicLoad|@atomicRmw|@cmpxchgWeak|@cmpxchgStrong#} {#header_close#} {#header_open|@field#} diff --git a/src/Air.zig b/src/Air.zig index e4289c2826..2834699d69 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -127,14 +127,11 @@ pub const Inst = struct { /// Lowers to a hardware trap instruction, or the next best thing. /// Result type is always void. breakpoint, - /// Lowers to a memory fence instruction. - /// Result type is always void. - /// Uses the `fence` field. - fence, /// Function call. /// Result type is the return type of the function being called. /// Uses the `pl_op` field with the `Call` payload. operand is the callee. call, + /// `<`. Result type is always bool. /// Uses the `bin_op` field. cmp_lt, @@ -153,6 +150,7 @@ pub const Inst = struct { /// `!=`. Result type is always bool. /// Uses the `bin_op` field. cmp_neq, + /// Conditional branch. /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. @@ -313,10 +311,33 @@ pub const Inst = struct { /// Given a pointer to an array, return a slice. /// Uses the `ty_op` field. array_to_slice, + /// Uses the `ty_pl` field with payload `Cmpxchg`. cmpxchg_weak, /// Uses the `ty_pl` field with payload `Cmpxchg`. cmpxchg_strong, + /// Lowers to a memory fence instruction. + /// Result type is always void. + /// Uses the `fence` field. + fence, + /// Atomically load from a pointer. + /// Result type is the element type of the pointer. + /// Uses the `atomic_load` field. + atomic_load, + /// Atomically store through a pointer. + /// Result type is always `void`. + /// Uses the `bin_op` field. LHS is pointer, RHS is element. + atomic_store_unordered, + /// Same as `atomic_store_unordered` but with `AtomicOrder.Monotonic`. + atomic_store_monotonic, + /// Same as `atomic_store_unordered` but with `AtomicOrder.Release`. + atomic_store_release, + /// Same as `atomic_store_unordered` but with `AtomicOrder.SeqCst`. + atomic_store_seq_cst, + /// Atomically read-modify-write via a pointer. + /// Result type is the element type of the pointer. + /// Uses the `pl_op` field with payload `AtomicRmw`. Operand is `ptr`. + atomic_rmw, pub fn fromCmpOp(op: std.math.CompareOperator) Tag { return switch (op) { @@ -385,6 +406,10 @@ pub const Inst = struct { column: u32, }, fence: std.builtin.AtomicOrder, + atomic_load: struct { + ptr: Ref, + order: std.builtin.AtomicOrder, + }, // Make sure we don't accidentally add a field to make this union // bigger than expected. Note that in Debug builds, Zig is allowed @@ -469,6 +494,21 @@ pub const Cmpxchg = struct { } }; +pub const AtomicRmw = struct { + operand: Inst.Ref, + /// 0b00000000000000000000000000000XXX - ordering + /// 0b0000000000000000000000000XXXX000 - op + flags: u32, + + pub fn ordering(self: AtomicRmw) std.builtin.AtomicOrder { + return @intToEnum(std.builtin.AtomicOrder, @truncate(u3, self.flags)); + } + + pub fn op(self: AtomicRmw) std.builtin.AtomicRmwOp { + return @intToEnum(std.builtin.AtomicRmwOp, @truncate(u4, self.flags >> 3)); + } +}; + pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; const extra = air.extraData(Block, body_index); @@ -572,6 +612,10 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .dbg_stmt, .store, .fence, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, => return Type.initTag(.void), .ptrtoint, @@ -594,6 +638,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { const inner_ptr_ty = outer_ptr_ty.elemType(); return inner_ptr_ty.elemType(); }, + .atomic_load => { + const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr); + return ptr_ty.elemType(); + }, + .atomic_rmw => { + const ptr_ty = air.typeOf(datas[inst].pl_op.operand); + return ptr_ty.elemType(); + }, } } diff --git a/src/AstGen.zig b/src/AstGen.zig index ac4c807027..b176136ba4 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -7316,6 +7316,7 @@ fn builtinCall( .atomic_load => { const int_type = try typeExpr(gz, scope, params[0]); + // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, @@ -7325,16 +7326,17 @@ fn builtinCall( .elem_type = int_type, }, } }); - const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]); - const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[2]); const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.Bin{ - .lhs = ptr, - .rhs = ordering, + // zig fmt: off + .lhs = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), + .rhs = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]), + // zig fmt: on }); return rvalue(gz, rl, result, node); }, .atomic_rmw => { const int_type = try typeExpr(gz, scope, params[0]); + // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, @@ -7344,20 +7346,19 @@ fn builtinCall( .elem_type = int_type, }, } }); - const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]); - const operation = try expr(gz, scope, .{ .ty = .atomic_rmw_op_type }, params[2]); - const operand = try expr(gz, scope, .{ .ty = int_type }, params[3]); - const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[4]); const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{ - .ptr = ptr, - .operation = operation, - .operand = operand, - .ordering = ordering, + // zig fmt: off + .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), + .operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]), + .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]), + .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]), + // zig fmt: on }); return rvalue(gz, rl, result, node); }, .atomic_store => { const int_type = try typeExpr(gz, scope, params[0]); + // TODO allow this pointer type to be volatile const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{ .ptr_type_simple = .{ .is_allowzero = false, @@ -7367,13 +7368,12 @@ fn builtinCall( .elem_type = int_type, }, } }); - const ptr = try expr(gz, scope, .{ .ty = ptr_type }, params[1]); - const operand = try expr(gz, scope, .{ .ty = int_type }, params[2]); - const ordering = try expr(gz, scope, .{ .ty = .atomic_order_type }, params[3]); const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{ - .ptr = ptr, - .operand = operand, - .ordering = ordering, + // zig fmt: off + .ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]), + .operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]), + .ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]), + // zig fmt: on }); return rvalue(gz, rl, result, node); }, @@ -7456,12 +7456,11 @@ fn builtinCall( }, .Vector => { const result = try gz.addPlNode(.vector_type, node, Zir.Inst.Bin{ - .lhs = try comptimeExpr(gz, scope, .{.ty = .u32_type}, params[0]), + .lhs = try comptimeExpr(gz, scope, .{ .ty = .u32_type }, params[0]), .rhs = try typeExpr(gz, scope, params[1]), }); return rvalue(gz, rl, result, node); }, - } // zig fmt: on } diff --git a/src/Liveness.zig b/src/Liveness.zig index a7519a33ee..599507500e 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -252,6 +252,10 @@ fn analyzeInst( .ptr_ptr_elem_val, .shl, .shr, + .atomic_store_unordered, + .atomic_store_monotonic, + .atomic_store_release, + .atomic_store_seq_cst, => { const o = inst_datas[inst].bin_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); @@ -345,6 +349,15 @@ fn analyzeInst( const extra = a.air.extraData(Air.Cmpxchg, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.ptr, extra.expected_value, extra.new_value }); }, + .atomic_load => { + const ptr = inst_datas[inst].atomic_load.ptr; + return trackOperands(a, new_set, inst, main_tomb, .{ ptr, .none, .none }); + }, + .atomic_rmw => { + const pl_op = inst_datas[inst].pl_op; + const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; + return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); + }, .br => { const br = inst_datas[inst].br; return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none }); diff --git a/src/Sema.zig b/src/Sema.zig index e679f03fcc..c163178890 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -7549,6 +7549,19 @@ fn resolveAtomicOrder( return val.toEnum(std.builtin.AtomicOrder); } +fn resolveAtomicRmwOp( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, +) CompileError!std.builtin.AtomicRmwOp { + const atomic_rmw_op_ty = try sema.getBuiltinType(block, src, "AtomicRmwOp"); + const air_ref = sema.resolveInst(zir_ref); + const coerced = try sema.coerce(block, atomic_rmw_op_ty, air_ref, src); + const val = try sema.resolveConstValue(block, src, coerced); + return val.toEnum(std.builtin.AtomicRmwOp); +} + fn zirCmpxchg( sema: *Sema, block: *Scope.Block, @@ -7664,14 +7677,108 @@ fn zirSelect(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); + const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; + // zig fmt: off + const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + // zig fmt: on + const ptr = sema.resolveInst(extra.lhs); + const elem_ty = sema.typeOf(ptr).elemType(); + try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty); + const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs); + + switch (order) { + .Release, .AcqRel => { + return sema.mod.fail( + &block.base, + order_src, + "@atomicLoad atomic ordering must not be Release or AcqRel", + .{}, + ); + }, + else => {}, + } + + if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| { + if (try ptr_val.pointerDeref(sema.arena)) |elem_val| { + return sema.addConstant(elem_ty, elem_val); + } + } + + try sema.requireRuntimeBlock(block, ptr_src); + return block.addInst(.{ + .tag = .atomic_load, + .data = .{ .atomic_load = .{ + .ptr = ptr, + .order = order, + } }, + }); } fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const mod = sema.mod; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data; const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); + // zig fmt: off + const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node }; + const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node }; + // zig fmt: on + const ptr = sema.resolveInst(extra.ptr); + const operand_ty = sema.typeOf(ptr).elemType(); + try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty); + const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation); + + switch (operand_ty.zigTypeTag()) { + .Enum => if (op != .Xchg) { + return mod.fail(&block.base, op_src, "@atomicRmw with enum only allowed with .Xchg", .{}); + }, + .Bool => if (op != .Xchg) { + return mod.fail(&block.base, op_src, "@atomicRmw with bool only allowed with .Xchg", .{}); + }, + .Float => switch (op) { + .Xchg, .Add, .Sub => {}, + else => return mod.fail(&block.base, op_src, "@atomicRmw with float only allowed with .Xchg, .Add, and .Sub", .{}), + }, + else => {}, + } + const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src); + const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering); + + if (order == .Unordered) { + return mod.fail(&block.base, order_src, "@atomicRmw atomic ordering must not be Unordered", .{}); + } + + // special case zero bit types + if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| { + return sema.addConstant(operand_ty, val); + } + + const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { + if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| { + _ = ptr_val; + _ = operand_val; + return mod.fail(&block.base, src, "TODO implement Sema for @atomicRmw at comptime", .{}); + } else break :rs operand_src; + } else ptr_src; + + const flags: u32 = @as(u32, @enumToInt(order)) | (@as(u32, @enumToInt(op)) << 3); + + try sema.requireRuntimeBlock(block, runtime_src); + return block.addInst(.{ + .tag = .atomic_rmw, + .data = .{ .pl_op = .{ + .operand = ptr, + .payload = try sema.addExtra(Air.AtomicRmw{ + .operand = operand, + .flags = flags, + }), + } }, + }); } fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8848,7 +8955,7 @@ fn coerce( if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty, false); if (in_memory_result == .ok) { return sema.bitcast(block, dest_type, inst, inst_src); } @@ -8890,11 +8997,12 @@ fn coerce( const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + const dest_is_mut = !dest_type.isConstPtr(); + if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr; if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); - switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { + switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut)) { .ok => {}, .no_match => break :src_array_ptr, } @@ -9001,10 +9109,80 @@ const InMemoryCoercionResult = enum { no_match, }; -fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult { +/// If pointers have the same representation in runtime memory, a bitcast AIR instruction +/// may be used for the coercion. +/// * `const` attribute can be gained +/// * `volatile` attribute can be gained +/// * `allowzero` attribute can be gained (whether from explicit attribute, C pointer, or optional pointer) but only if !dest_is_mut +/// * alignment can be decreased +/// * bit offset attributes must match exactly +/// * `*`/`[*]` must match exactly, but `[*c]` matches either one +/// * sentinel-terminated pointers can coerce into `[*]` +/// TODO improve this function to report recursive compile errors like it does in stage1. +/// look at the function types_match_const_cast_only +fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InMemoryCoercionResult { if (dest_type.eql(src_type)) return .ok; + if (dest_type.zigTypeTag() == .Pointer and + src_type.zigTypeTag() == .Pointer) + { + const dest_info = dest_type.ptrInfo().data; + const src_info = src_type.ptrInfo().data; + + const child = coerceInMemoryAllowed(dest_info.pointee_type, src_info.pointee_type, dest_info.mutable); + if (child == .no_match) { + return child; + } + + const ok_sent = dest_info.sentinel == null or src_info.size == .C or + (src_info.sentinel != null and + dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); + if (!ok_sent) { + return .no_match; + } + + const ok_ptr_size = src_info.size == dest_info.size or + src_info.size == .C or dest_info.size == .C; + if (!ok_ptr_size) { + return .no_match; + } + + const ok_cv_qualifiers = + (src_info.mutable or !dest_info.mutable) and + (!src_info.@"volatile" or dest_info.@"volatile"); + + if (!ok_cv_qualifiers) { + return .no_match; + } + + const ok_allows_zero = (dest_info.@"allowzero" and + (src_info.@"allowzero" or !dest_is_mut)) or + (!dest_info.@"allowzero" and !src_info.@"allowzero"); + if (!ok_allows_zero) { + return .no_match; + } + + if (dest_type.hasCodeGenBits() != src_type.hasCodeGenBits()) { + return .no_match; + } + + if (src_info.host_size != dest_info.host_size or + src_info.bit_offset != dest_info.bit_offset) + { + return .no_match; + } + + assert(src_info.@"align" != 0); + assert(dest_info.@"align" != 0); + + if (dest_info.@"align" > src_info.@"align") { + return .no_match; + } + + return .ok; + } + // TODO: implement more of this function return .no_match; diff --git a/src/codegen.zig b/src/codegen.zig index 75e7a56b15..08ee358bff 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -860,6 +860,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .array_to_slice => try self.airArrayToSlice(inst), .cmpxchg_strong => try self.airCmpxchg(inst), .cmpxchg_weak => try self.airCmpxchg(inst), + .atomic_rmw => try self.airAtomicRmw(inst), + .atomic_load => try self.airAtomicLoad(inst), + + .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), + .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), + .atomic_store_release => try self.airAtomicStore(inst, .Release), + .atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst), .struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0), .struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1), @@ -4773,6 +4780,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ extra.ptr, extra.expected_value, extra.new_value }); } + fn airAtomicRmw(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airCmpxchg for {}", .{self.target.cpu.arch}); + } + + fn airAtomicLoad(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airAtomicLoad for {}", .{self.target.cpu.arch}); + } + + fn airAtomicStore(self: *Self, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void { + _ = inst; + _ = order; + return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); + } + fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a2e2d7b20d..5eb4388a9e 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -914,6 +914,13 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM .array_to_slice => try airArrayToSlice(o, inst), .cmpxchg_weak => try airCmpxchg(o, inst, "weak"), .cmpxchg_strong => try airCmpxchg(o, inst, "strong"), + .atomic_rmw => try airAtomicRmw(o, inst), + .atomic_load => try airAtomicLoad(o, inst), + + .atomic_store_unordered => try airAtomicStore(o, inst, toMemoryOrder(.Unordered)), + .atomic_store_monotonic => try airAtomicStore(o, inst, toMemoryOrder(.Monotonic)), + .atomic_store_release => try airAtomicStore(o, inst, toMemoryOrder(.Release)), + .atomic_store_seq_cst => try airAtomicStore(o, inst, toMemoryOrder(.SeqCst)), .struct_field_ptr_index_0 => try airStructFieldPtrIndex(o, inst, 0), .struct_field_ptr_index_1 => try airStructFieldPtrIndex(o, inst, 1), @@ -1917,8 +1924,61 @@ fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { return local; } -fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { - const str = switch (order) { +fn airAtomicRmw(o: *Object, inst: Air.Inst.Index) !CValue { + const pl_op = o.air.instructions.items(.data)[inst].pl_op; + const extra = o.air.extraData(Air.AtomicRmw, pl_op.payload).data; + const inst_ty = o.air.typeOfIndex(inst); + const ptr = try o.resolveInst(pl_op.operand); + const operand = try o.resolveInst(extra.operand); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.print(" = zig_atomicrmw_{s}(", .{toAtomicRmwSuffix(extra.op())}); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try o.writeCValue(writer, operand); + try writer.writeAll(", "); + try writeMemoryOrder(writer, extra.ordering()); + try writer.writeAll(");\n"); + + return local; +} + +fn airAtomicLoad(o: *Object, inst: Air.Inst.Index) !CValue { + const atomic_load = o.air.instructions.items(.data)[inst].atomic_load; + const inst_ty = o.air.typeOfIndex(inst); + const ptr = try o.resolveInst(atomic_load.ptr); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.writeAll(" = zig_atomic_load("); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try writeMemoryOrder(writer, atomic_load.order); + try writer.writeAll(");\n"); + + return local; +} + +fn airAtomicStore(o: *Object, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const ptr = try o.resolveInst(bin_op.lhs); + const element = try o.resolveInst(bin_op.rhs); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); + const writer = o.writer(); + + try writer.writeAll(" = zig_atomic_store("); + try o.writeCValue(writer, ptr); + try writer.writeAll(", "); + try o.writeCValue(writer, element); + try writer.print(", {s});\n", .{order}); + + return local; +} + +fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { + return switch (order) { .Unordered => "memory_order_relaxed", .Monotonic => "memory_order_consume", .Acquire => "memory_order_acquire", @@ -1926,7 +1986,24 @@ fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { .AcqRel => "memory_order_acq_rel", .SeqCst => "memory_order_seq_cst", }; - return w.writeAll(str); +} + +fn writeMemoryOrder(w: anytype, order: std.builtin.AtomicOrder) !void { + return w.writeAll(toMemoryOrder(order)); +} + +fn toAtomicRmwSuffix(order: std.builtin.AtomicRmwOp) []const u8 { + return switch (order) { + .Xchg => "xchg", + .Add => "add", + .Sub => "sub", + .And => "and", + .Nand => "nand", + .Or => "or", + .Xor => "xor", + .Max => "max", + .Min => "min", + }; } fn IndentWriter(comptime UnderlyingWriter: type) type { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 569f857caa..af25ae51b1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1060,6 +1060,13 @@ pub const FuncGen = struct { .cmpxchg_weak => try self.airCmpxchg(inst, true), .cmpxchg_strong => try self.airCmpxchg(inst, false), .fence => try self.airFence(inst), + .atomic_rmw => try self.airAtomicRmw(inst), + .atomic_load => try self.airAtomicLoad(inst), + + .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), + .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), + .atomic_store_release => try self.airAtomicStore(inst, .Release), + .atomic_store_seq_cst => try self.airAtomicStore(inst, .SequentiallyConsistent), .struct_field_ptr => try self.airStructFieldPtr(inst), .struct_field_val => try self.airStructFieldVal(inst), @@ -1983,20 +1990,19 @@ pub const FuncGen = struct { fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dest_ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); const src_operand = try self.resolveInst(bin_op.rhs); - // TODO set volatile on this store properly - _ = self.builder.buildStore(src_operand, dest_ptr); + _ = self.store(dest_ptr, ptr_ty, src_operand); return null; } fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); - if (!is_volatile and self.liveness.isUnused(inst)) + const ptr_ty = self.air.typeOf(ty_op.operand); + if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null; const ptr = try self.resolveInst(ty_op.operand); - // TODO set volatile on this load properly - return self.builder.buildLoad(ptr, ""); + return self.load(ptr, ptr_ty); } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { @@ -2033,14 +2039,12 @@ pub const FuncGen = struct { new_value = self.builder.buildZExt(new_value, abi_ty, ""); } } - const success_order = toLlvmAtomicOrdering(extra.successOrder()); - const failure_order = toLlvmAtomicOrdering(extra.failureOrder()); const result = self.builder.buildCmpXchg( ptr, expected_value, new_value, - success_order, - failure_order, + toLlvmAtomicOrdering(extra.successOrder()), + toLlvmAtomicOrdering(extra.failureOrder()), is_weak, self.single_threaded, ); @@ -2066,6 +2070,109 @@ pub const FuncGen = struct { return self.builder.buildInsertValue(partial, non_null_bit, 1, ""); } + fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data; + const ptr = try self.resolveInst(pl_op.operand); + const ptr_ty = self.air.typeOf(pl_op.operand); + const operand_ty = ptr_ty.elemType(); + const operand = try self.resolveInst(extra.operand); + const is_signed_int = operand_ty.isSignedInt(); + const is_float = operand_ty.isFloat(); + const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); + const ordering = toLlvmAtomicOrdering(extra.ordering()); + const single_threaded = llvm.Bool.fromBool(self.single_threaded); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, op == .Xchg); + if (opt_abi_ty) |abi_ty| { + // operand needs widening and truncating or bitcasting. + const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + const casted_operand = if (is_float) + self.builder.buildBitCast(operand, abi_ty, "") + else if (is_signed_int) + self.builder.buildSExt(operand, abi_ty, "") + else + self.builder.buildZExt(operand, abi_ty, ""); + + const uncasted_result = self.builder.buildAtomicRmw( + op, + casted_ptr, + casted_operand, + ordering, + single_threaded, + ); + const operand_llvm_ty = try self.dg.llvmType(operand_ty); + if (is_float) { + return self.builder.buildBitCast(uncasted_result, operand_llvm_ty, ""); + } else { + return self.builder.buildTrunc(uncasted_result, operand_llvm_ty, ""); + } + } + + if (operand.typeOf().getTypeKind() != .Pointer) { + return self.builder.buildAtomicRmw(op, ptr, operand, ordering, single_threaded); + } + + // It's a pointer but we need to treat it as an int. + const usize_llvm_ty = try self.dg.llvmType(Type.initTag(.usize)); + const casted_ptr = self.builder.buildBitCast(ptr, usize_llvm_ty.pointerType(0), ""); + const casted_operand = self.builder.buildPtrToInt(operand, usize_llvm_ty, ""); + const uncasted_result = self.builder.buildAtomicRmw( + op, + casted_ptr, + casted_operand, + ordering, + single_threaded, + ); + const operand_llvm_ty = try self.dg.llvmType(operand_ty); + return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, ""); + } + + fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; + const ptr = try self.resolveInst(atomic_load.ptr); + const ptr_ty = self.air.typeOf(atomic_load.ptr); + const ordering = toLlvmAtomicOrdering(atomic_load.order); + const operand_ty = ptr_ty.elemType(); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); + + if (opt_abi_ty) |abi_ty| { + // operand needs widening and truncating + const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + const load_inst = self.load(casted_ptr, ptr_ty); + load_inst.setOrdering(ordering); + return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), ""); + } + const load_inst = self.load(ptr, ptr_ty); + load_inst.setOrdering(ordering); + return load_inst; + } + + fn airAtomicStore( + self: *FuncGen, + inst: Air.Inst.Index, + ordering: llvm.AtomicOrdering, + ) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + var ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); + var element = try self.resolveInst(bin_op.rhs); + const operand_ty = ptr_ty.elemType(); + const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); + + if (opt_abi_ty) |abi_ty| { + // operand needs widening + ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), ""); + if (operand_ty.isSignedInt()) { + element = self.builder.buildSExt(element, abi_ty, ""); + } else { + element = self.builder.buildZExt(element, abi_ty, ""); + } + } + const store_inst = self.store(ptr, ptr_ty, element); + store_inst.setOrdering(ordering); + return null; + } + fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); @@ -2074,6 +2181,21 @@ pub const FuncGen = struct { // `getIntrinsicDeclaration` return self.llvmModule().getIntrinsicDeclaration(id, null, 0); } + + fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) *const llvm.Value { + _ = ptr_ty; // TODO set volatile and alignment on this load properly + return self.builder.buildLoad(ptr, ""); + } + + fn store( + self: *FuncGen, + ptr: *const llvm.Value, + ptr_ty: Type, + elem: *const llvm.Value, + ) *const llvm.Value { + _ = ptr_ty; // TODO set volatile and alignment on this store properly + return self.builder.buildStore(elem, ptr); + } }; fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void { @@ -2227,3 +2349,21 @@ fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) llvm.AtomicOrderi .SeqCst => .SequentiallyConsistent, }; } + +fn toLlvmAtomicRmwBinOp( + op: std.builtin.AtomicRmwOp, + is_signed: bool, + is_float: bool, +) llvm.AtomicRMWBinOp { + return switch (op) { + .Xchg => .Xchg, + .Add => if (is_float) llvm.AtomicRMWBinOp.FAdd else return .Add, + .Sub => if (is_float) llvm.AtomicRMWBinOp.FSub else return .Sub, + .And => .And, + .Nand => .Nand, + .Or => .Or, + .Xor => .Xor, + .Max => if (is_signed) llvm.AtomicRMWBinOp.Max else return .UMax, + .Min => if (is_signed) llvm.AtomicRMWBinOp.Min else return .UMin, + }; +} diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 3fed3ca879..3bbc24e174 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -133,6 +133,9 @@ pub const Value = opaque { pub const constIntToPtr = LLVMConstIntToPtr; extern fn LLVMConstIntToPtr(ConstantVal: *const Value, ToType: *const Type) *const Value; + + pub const setOrdering = LLVMSetOrdering; + extern fn LLVMSetOrdering(MemoryAccessInst: *const Value, Ordering: AtomicOrdering) void; }; pub const Type = opaque { @@ -167,6 +170,9 @@ pub const Type = opaque { ElementCount: c_uint, Packed: Bool, ) void; + + pub const getTypeKind = LLVMGetTypeKind; + extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind; }; pub const Module = opaque { @@ -477,6 +483,14 @@ pub const Builder = opaque { Name: [*:0]const u8, ) *const Value; + pub const buildIntToPtr = LLVMBuildIntToPtr; + extern fn LLVMBuildIntToPtr( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + pub const buildStructGEP = LLVMBuildStructGEP; extern fn LLVMBuildStructGEP( B: *const Builder, @@ -530,6 +544,16 @@ pub const Builder = opaque { singleThread: Bool, Name: [*:0]const u8, ) *const Value; + + pub const buildAtomicRmw = LLVMBuildAtomicRMW; + extern fn LLVMBuildAtomicRMW( + B: *const Builder, + op: AtomicRMWBinOp, + PTR: *const Value, + Val: *const Value, + ordering: AtomicOrdering, + singleThread: Bool, + ) *const Value; }; pub const IntPredicate = enum(c_uint) { @@ -901,3 +925,42 @@ pub const AtomicOrdering = enum(c_uint) { AcquireRelease = 6, SequentiallyConsistent = 7, }; + +pub const AtomicRMWBinOp = enum(c_int) { + Xchg, + Add, + Sub, + And, + Nand, + Or, + Xor, + Max, + Min, + UMax, + UMin, + FAdd, + FSub, +}; + +pub const TypeKind = enum(c_int) { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, + ScalableVector, + BFloat, + X86_AMX, +}; diff --git a/src/link/C/zig.h b/src/link/C/zig.h index 28d6f2dd17..e19a138c1b 100644 --- a/src/link/C/zig.h +++ b/src/link/C/zig.h @@ -62,16 +62,61 @@ #if __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) #include -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, expected, desired, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit(obj, expected, desired, succ, fail) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) atomic_compare_exchange_strong_explicit(obj, &(expected), desired, succ, fail) +#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) atomic_compare_exchange_weak_explicit (obj, &(expected), desired, succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order) atomic_exchange_explicit (obj, arg, order) +#define zig_atomicrmw_add (obj, arg, order) atomic_fetch_add_explicit (obj, arg, order) +#define zig_atomicrmw_sub (obj, arg, order) atomic_fetch_sub_explicit (obj, arg, order) +#define zig_atomicrmw_or (obj, arg, order) atomic_fetch_or_explicit (obj, arg, order) +#define zig_atomicrmw_xor (obj, arg, order) atomic_fetch_xor_explicit (obj, arg, order) +#define zig_atomicrmw_and (obj, arg, order) atomic_fetch_and_explicit (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order) atomic_fetch_nand_explicit(obj, arg, order) +#define zig_atomicrmw_min (obj, arg, order) atomic_fetch_min_explicit (obj, arg, order) +#define zig_atomicrmw_max (obj, arg, order) atomic_fetch_max_explicit (obj, arg, order) +#define zig_atomic_store (obj, arg, order) atomic_store_explicit (obj, arg, order) +#define zig_atomic_load (obj, order) atomic_load_explicit (obj, order) #define zig_fence(order) atomic_thread_fence(order) #elif __GNUC__ -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __sync_val_compare_and_swap(obj, expected, desired) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __sync_val_compare_and_swap(obj, expected, desired) -#define zig_fence(order) __sync_synchronize(order) +#define memory_order_relaxed __ATOMIC_RELAXED +#define memory_order_consume __ATOMIC_CONSUME +#define memory_order_acquire __ATOMIC_ACQUIRE +#define memory_order_release __ATOMIC_RELEASE +#define memory_order_acq_rel __ATOMIC_ACQ_REL +#define memory_order_seq_cst __ATOMIC_SEQ_CST +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) +#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) +#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order) +#define zig_atomicrmw_add (obj, arg, order) __atomic_fetch_add (obj, arg, order) +#define zig_atomicrmw_sub (obj, arg, order) __atomic_fetch_sub (obj, arg, order) +#define zig_atomicrmw_or (obj, arg, order) __atomic_fetch_or (obj, arg, order) +#define zig_atomicrmw_xor (obj, arg, order) __atomic_fetch_xor (obj, arg, order) +#define zig_atomicrmw_and (obj, arg, order) __atomic_fetch_and (obj, arg, order) +#define zig_atomicrmw_nand(obj, arg, order) __atomic_fetch_nand(obj, arg, order) +#define zig_atomicrmw_min (obj, arg, order) __atomic_fetch_min (obj, arg, order) +#define zig_atomicrmw_max (obj, arg, order) __atomic_fetch_max (obj, arg, order) +#define zig_atomic_store (obj, arg, order) __atomic_store (obj, arg, order) +#define zig_atomic_load (obj, order) __atomic_load (obj, order) +#define zig_fence(order) __atomic_thread_fence(order) #else +#define memory_order_relaxed 0 +#define memory_order_consume 1 +#define memory_order_acquire 2 +#define memory_order_release 3 +#define memory_order_acq_rel 4 +#define memory_order_seq_cst 5 #define zig_cmpxchg_strong(obj, expected, desired, succ, fail) zig_unimplemented() -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) zig_unimplemented() +#define zig_cmpxchg_weak (obj, expected, desired, succ, fail) zig_unimplemented() +#define zig_atomicrmw_xchg(obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_add (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_sub (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_or (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_xor (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_and (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_nand(obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_min (obj, arg, order) zig_unimplemented() +#define zig_atomicrmw_max (obj, arg, order) zig_unimplemented() +#define zig_atomic_store (obj, arg, order) zig_unimplemented() +#define zig_atomic_load (obj, order) zig_unimplemented() #define zig_fence(order) zig_unimplemented() #endif diff --git a/src/print_air.zig b/src/print_air.zig index 82068188fd..39ae4251fa 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -193,6 +193,12 @@ const Writer = struct { .switch_br => try w.writeSwitchBr(s, inst), .cmpxchg_weak, .cmpxchg_strong => try w.writeCmpxchg(s, inst), .fence => try w.writeFence(s, inst), + .atomic_load => try w.writeAtomicLoad(s, inst), + .atomic_store_unordered => try w.writeAtomicStore(s, inst, .Unordered), + .atomic_store_monotonic => try w.writeAtomicStore(s, inst, .Monotonic), + .atomic_store_release => try w.writeAtomicStore(s, inst, .Release), + .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst), + .atomic_rmw => try w.writeAtomicRmw(s, inst), } } @@ -283,6 +289,36 @@ const Writer = struct { try s.print("{s}", .{@tagName(atomic_order)}); } + fn writeAtomicLoad(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const atomic_load = w.air.instructions.items(.data)[inst].atomic_load; + + try w.writeOperand(s, inst, 0, atomic_load.ptr); + try s.print(", {s}", .{@tagName(atomic_load.order)}); + } + + fn writeAtomicStore( + w: *Writer, + s: anytype, + inst: Air.Inst.Index, + order: std.builtin.AtomicOrder, + ) @TypeOf(s).Error!void { + const bin_op = w.air.instructions.items(.data)[inst].bin_op; + try w.writeOperand(s, inst, 0, bin_op.lhs); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, bin_op.rhs); + try s.print(", {s}", .{@tagName(order)}); + } + + fn writeAtomicRmw(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.AtomicRmw, pl_op.payload).data; + + try w.writeOperand(s, inst, 0, pl_op.operand); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, extra.operand); + try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) }); + } + fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const val = w.air.values[ty_pl.payload]; diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index b29f9c9c6c..01ec767253 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -30,3 +30,26 @@ test "fence" { @fence(.SeqCst); x = 5678; } + +test "atomicrmw and atomicload" { + var data: u8 = 200; + try testAtomicRmw(&data); + try expect(data == 42); + try testAtomicLoad(&data); +} + +fn testAtomicRmw(ptr: *u8) !void { + const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst); + try expect(prev_value == 200); + comptime { + var x: i32 = 1234; + const y: i32 = 12345; + try expect(@atomicLoad(i32, &x, .SeqCst) == 1234); + try expect(@atomicLoad(i32, &y, .SeqCst) == 12345); + } +} + +fn testAtomicLoad(ptr: *u8) !void { + const x = @atomicLoad(u8, ptr, .SeqCst); + try expect(x == 42); +} diff --git a/test/behavior/atomics_stage1.zig b/test/behavior/atomics_stage1.zig index e9de7dac6c..b13e3a62c6 100644 --- a/test/behavior/atomics_stage1.zig +++ b/test/behavior/atomics_stage1.zig @@ -3,29 +3,6 @@ const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const builtin = @import("builtin"); -test "atomicrmw and atomicload" { - var data: u8 = 200; - try testAtomicRmw(&data); - try expect(data == 42); - try testAtomicLoad(&data); -} - -fn testAtomicRmw(ptr: *u8) !void { - const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst); - try expect(prev_value == 200); - comptime { - var x: i32 = 1234; - const y: i32 = 12345; - try expect(@atomicLoad(i32, &x, .SeqCst) == 1234); - try expect(@atomicLoad(i32, &y, .SeqCst) == 12345); - } -} - -fn testAtomicLoad(ptr: *u8) !void { - const x = @atomicLoad(u8, ptr, .SeqCst); - try expect(x == 42); -} - test "cmpxchg with ptr" { var data1: i32 = 1234; var data2: i32 = 5678; -- cgit v1.2.3 From ea393b2bca7587955df81d149caecc5522944d15 Mon Sep 17 00:00:00 2001 From: Robin Voetter Date: Sun, 29 Aug 2021 06:08:19 +0200 Subject: Address Spaces: Implement in LLVM codegen --- src/Module.zig | 2 +- src/Sema.zig | 2 +- src/codegen.zig | 2 +- src/codegen/c.zig | 2 +- src/codegen/llvm.zig | 40 ++++++++++++++++++++++++++++-------- src/codegen/llvm/bindings.zig | 8 ++++++++ src/type.zig | 48 +++++++++++++++++++++++++++++++++++-------- src/zig_llvm.cpp | 5 +++++ src/zig_llvm.h | 3 +++ 9 files changed, 91 insertions(+), 21 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Module.zig b/src/Module.zig index b2cef0792a..1f9b5abcb9 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -4761,7 +4761,7 @@ pub fn populateTestFunctions(mod: *Module) !void { const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const builtin_namespace = builtin_file.root_decl.?.namespace; const decl = builtin_namespace.decls.get("test_functions").?; - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); const array_decl = d: { diff --git a/src/Sema.zig b/src/Sema.zig index 4b75cc4c43..d6e926e604 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8390,7 +8390,7 @@ fn fieldVal( .Pointer => switch (object_ty.ptrSize()) { .Slice => { if (mem.eql(u8, field_name, "ptr")) { - const buf = try arena.create(Type.Payload.ElemType); + const buf = try arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = object_ty.slicePtrFieldType(buf); if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); diff --git a/src/codegen.zig b/src/codegen.zig index e0047de1f7..cbee79f139 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -4873,7 +4873,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); const slice_len = typed_value.val.sliceLen(); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5eb4388a9e..7a2dc343f7 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -251,7 +251,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderType(writer, t); try writer.writeAll("){"); - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, t.slicePtrFieldType(&buf), val); try writer.writeAll(", "); try writer.print("{d}", .{val.sliceLen()}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 53e57ee219..8b7282160e 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -554,7 +554,8 @@ pub const DeclGen = struct { @intCast(c_uint, fn_param_len), .False, ); - const llvm_fn = self.llvmModule().addFunction(decl.name, fn_type); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + const llvm_fn = self.llvmModule().addFunctionInAddressSpace(decl.name, fn_type, llvm_addrspace); const is_extern = decl.val.tag() == .extern_fn; if (!is_extern) { @@ -576,7 +577,27 @@ pub const DeclGen = struct { if (llvm_module.getNamedGlobal(decl.name)) |val| return val; // TODO: remove this redundant `llvmType`, it is also called in `genTypedValue`. const llvm_type = try self.llvmType(decl.ty); - return llvm_module.addGlobal(llvm_type, decl.name); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + return llvm_module.addGlobalInAddressSpace(llvm_type, decl.name, llvm_addrspace); + } + + fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint { + const target = self.module.getTarget(); + return switch (address_space) { + .generic => llvm.address_space.default, + .gs => switch (target.cpu.arch) { + .i386, .x86_64 => llvm.address_space.x86.gs, + else => unreachable, + }, + .fs => switch (target.cpu.arch) { + .i386, .x86_64 => llvm.address_space.x86.fs, + else => unreachable, + }, + .ss => switch (target.cpu.arch) { + .i386, .x86_64 => llvm.address_space.x86.ss, + else => unreachable, + }, + }; } fn llvmType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type { @@ -605,7 +626,7 @@ pub const DeclGen = struct { .Bool => return self.context.intType(1), .Pointer => { if (t.isSlice()) { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = t.slicePtrFieldType(&buf); const fields: [2]*const llvm.Type = .{ @@ -615,7 +636,8 @@ pub const DeclGen = struct { return self.context.structType(&fields, fields.len, .False); } else { const elem_type = try self.llvmType(t.elemType()); - return elem_type.pointerType(0); + const llvm_addrspace = self.llvmAddressSpace(t.ptrAddressSpace()); + return elem_type.pointerType(llvm_addrspace); } }, .Array => { @@ -681,7 +703,8 @@ pub const DeclGen = struct { @intCast(c_uint, llvm_params.len), llvm.Bool.fromBool(is_var_args), ); - return llvm_fn_ty.pointerType(0); + const llvm_addrspace = self.llvmAddressSpace(t.fnAddressSpace()); + return llvm_fn_ty.pointerType(llvm_addrspace); }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, @@ -749,7 +772,7 @@ pub const DeclGen = struct { .Pointer => switch (tv.val.tag()) { .decl_ref => { if (tv.ty.isSlice()) { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -779,12 +802,13 @@ pub const DeclGen = struct { decl.alive = true; const val = try self.resolveGlobalDecl(decl); const llvm_var_type = try self.llvmType(tv.ty); - const llvm_type = llvm_var_type.pointerType(0); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + const llvm_type = llvm_var_type.pointerType(llvm_addrspace); return val.constBitCast(llvm_type); }, .slice => { const slice = tv.val.castTag(.slice).?.data; - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*const llvm.Value = .{ try self.genTypedValue(.{ .ty = tv.ty.slicePtrFieldType(&buf), diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 67b39784b1..e50589dee1 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -197,6 +197,9 @@ pub const Module = opaque { pub const addFunction = LLVMAddFunction; extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value; + pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace; + extern fn ZigLLVMAddFunctionInAddressSpace(*const Module, Name: [*:0]const u8, FunctionTy: *const Type, AddressSpace: c_uint) *const Value; + pub const getNamedFunction = LLVMGetNamedFunction; extern fn LLVMGetNamedFunction(*const Module, Name: [*:0]const u8) ?*const Value; @@ -209,6 +212,9 @@ pub const Module = opaque { pub const addGlobal = LLVMAddGlobal; extern fn LLVMAddGlobal(M: *const Module, Ty: *const Type, Name: [*:0]const u8) *const Value; + pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace; + extern fn LLVMAddGlobalInAddressSpace(M: *const Module, Ty: *const Type, Name: [*:0]const u8, AddressSpace: c_uint) *const Value; + pub const getNamedGlobal = LLVMGetNamedGlobal; extern fn LLVMGetNamedGlobal(M: *const Module, Name: [*:0]const u8) ?*const Value; @@ -975,6 +981,8 @@ pub const TypeKind = enum(c_int) { }; pub const address_space = struct { + pub const default = 0; + // See llvm/lib/Target/X86/X86.h pub const x86_64 = x86; pub const x86 = struct { diff --git a/src/type.zig b/src/type.zig index b15026b595..9cda4aacf7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2161,42 +2161,72 @@ pub const Type = extern union { }; } - pub fn slicePtrFieldType(self: Type, buffer: *Payload.ElemType) Type { + pub const SlicePtrFieldTypeBuffer = union { + elem_type: Payload.ElemType, + pointer: Payload.Pointer, + }; + + pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { switch (self.tag()) { .const_slice_u8 => return Type.initTag(.manyptr_const_u8), .const_slice => { const elem_type = self.castTag(.const_slice).?.data; - buffer.* = .{ + buffer.elem_type = .{ .base = .{ .tag = .many_const_pointer }, .data = elem_type, }; - return Type.initPayload(&buffer.base); + return Type.initPayload(&buffer.elem_type.base); }, .mut_slice => { const elem_type = self.castTag(.mut_slice).?.data; - buffer.* = .{ + buffer.elem_type = .{ .base = .{ .tag = .many_mut_pointer }, .data = elem_type, }; - return Type.initPayload(&buffer.base); + return Type.initPayload(&buffer.elem_type.base); }, .pointer => { const payload = self.castTag(.pointer).?.data; assert(payload.size == .Slice); - if (payload.mutable) { - buffer.* = .{ + + if (payload.sentinel != null or + payload.@"align" != 0 or + payload.@"addrspace" != .generic or + payload.bit_offset != 0 or + payload.host_size != 0 or + payload.@"allowzero" or + payload.@"volatile" + ) { + buffer.pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many + }, + }; + return Type.initPayload(&buffer.pointer.base); + } else if (payload.mutable) { + buffer.elem_type = .{ .base = .{ .tag = .many_mut_pointer }, .data = payload.pointee_type, }; + return Type.initPayload(&buffer.elem_type.base); } else { - buffer.* = .{ + buffer.elem_type = .{ .base = .{ .tag = .many_const_pointer }, .data = payload.pointee_type, }; + return Type.initPayload(&buffer.elem_type.base); } - return Type.initPayload(&buffer.base); }, else => unreachable, diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index e1ab74f423..6e136161a6 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -416,6 +416,11 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) { return wrap(Type::getTokenTy(*unwrap(context_ref))); } +LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace) { + Function* func = Function::Create(unwrap(FunctionTy), GlobalValue::ExternalLinkage, AddressSpace, Name, unwrap(M)); + return wrap(func); +} + LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args, unsigned NumArgs, ZigLLVM_CallingConv CC, ZigLLVM_CallAttr attr, const char *Name) { diff --git a/src/zig_llvm.h b/src/zig_llvm.h index be279d86e1..49a4c0e8fd 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -65,6 +65,9 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref); +ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, + LLVMTypeRef FunctionTy, unsigned AddressSpace); + enum ZigLLVM_CallingConv { ZigLLVM_C = 0, ZigLLVM_Fast = 8, -- cgit v1.2.3 From b9d3527e0ed53c4796ab64b4df7daf0909739807 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 20 Sep 2021 14:13:33 -0700 Subject: stage2: implement comptime `@atomicRmw` * introduce float_to_int and int_to_float AIR instructionts and implement for the LLVM backend and C backend. * Sema: implement `zirIntToFloat`. * Sema: implement `@atomicRmw` comptime evaluation - introduce `storePtrVal` for when one needs to store a Value to a pointer which is a Value, and assert it happens at comptime. * Value: introduce new functionality: - intToFloat - numberAddWrap - numberSubWrap - numberMax - numberMin - bitwiseAnd - bitwiseNand (not implemented yet) - bitwiseOr - bitwiseXor * Sema: hook up `zirBitwise` to the new Value bitwise implementations * Type: rename `isFloat` to `isRuntimeFloat` because it returns `false` for `comptime_float`. --- src/Air.zig | 8 ++ src/Liveness.zig | 2 + src/Sema.zig | 240 ++++++++++++++++++++++++++------------- src/codegen.zig | 22 ++++ src/codegen/c.zig | 20 ++++ src/codegen/llvm.zig | 48 +++++++- src/codegen/llvm/bindings.zig | 32 ++++++ src/print_air.zig | 2 + src/type.zig | 45 +++++--- src/value.zig | 224 ++++++++++++++++++++++++++++++++++++ test/behavior/atomics.zig | 29 +++++ test/behavior/atomics_stage1.zig | 29 ----- 12 files changed, 573 insertions(+), 128 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index 2834699d69..ad95200001 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -311,6 +311,12 @@ pub const Inst = struct { /// Given a pointer to an array, return a slice. /// Uses the `ty_op` field. array_to_slice, + /// Given a float operand, return the integer with the closest mathematical meaning. + /// Uses the `ty_op` field. + float_to_int, + /// Given an integer operand, return the float with the closest mathematical meaning. + /// Uses the `ty_op` field. + int_to_float, /// Uses the `ty_pl` field with payload `Cmpxchg`. cmpxchg_weak, @@ -598,6 +604,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .struct_field_ptr_index_2, .struct_field_ptr_index_3, .array_to_slice, + .float_to_int, + .int_to_float, => return air.getRefType(datas[inst].ty_op.ty), .loop, diff --git a/src/Liveness.zig b/src/Liveness.zig index 6e6a3ccf1f..1d34da091d 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -293,6 +293,8 @@ fn analyzeInst( .struct_field_ptr_index_2, .struct_field_ptr_index_3, .array_to_slice, + .float_to_int, + .int_to_float, => { const o = inst_datas[inst].ty_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); diff --git a/src/Sema.zig b/src/Sema.zig index 5471795317..72dc25a58a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4830,8 +4830,8 @@ fn analyzeSwitch( var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand_ty.minInt(&arena, mod.getTarget()); - const max_int = try operand_ty.maxInt(&arena, mod.getTarget()); + const min_int = try operand_ty.minInt(&arena.allocator, mod.getTarget()); + const max_int = try operand_ty.maxInt(&arena.allocator, mod.getTarget()); if (try range_set.spans(min_int, max_int, operand_ty)) { if (special_prong == .@"else") { return mod.fail( @@ -5671,10 +5671,13 @@ fn zirBitwise( if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.addConstUndef(resolved_type); - } - return sema.mod.fail(&block.base, src, "TODO implement comptime bitwise operations", .{}); + const result_val = switch (air_tag) { + .bit_and => try lhs_val.bitwiseAnd(rhs_val, sema.arena), + .bit_or => try lhs_val.bitwiseOr(rhs_val, sema.arena), + .xor => try lhs_val.bitwiseXor(rhs_val, sema.arena), + else => unreachable, + }; + return sema.addConstant(scalar_type, result_val); } } @@ -6028,8 +6031,8 @@ fn analyzeArithmetic( } if (zir_tag == .mod_rem) { - const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isFloat(); - const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isFloat(); + const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isRuntimeFloat(); + const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isRuntimeFloat(); if (dirty_lhs or dirty_rhs) { return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); } @@ -7298,13 +7301,30 @@ fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); + // TODO don't forget the safety check! return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); + const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; + const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const dest_ty = try sema.resolveType(block, ty_src, extra.lhs); + const operand = sema.resolveInst(extra.rhs); + const operand_ty = sema.typeOf(operand); + + try sema.checkIntType(block, ty_src, dest_ty); + try sema.checkFloatType(block, operand_src, operand_ty); + + if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { + const target = sema.mod.getTarget(); + const result_val = try val.intToFloat(sema.arena, dest_ty, target); + return sema.addConstant(dest_ty, result_val); + } + + try sema.requireRuntimeBlock(block, operand_src); + return block.addTyOp(.int_to_float, dest_ty, operand); } fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -7542,6 +7562,34 @@ fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } +fn checkIntType( + sema: *Sema, + block: *Scope.Block, + ty_src: LazySrcLoc, + ty: Type, +) CompileError!void { + switch (ty.zigTypeTag()) { + .ComptimeInt, .Int => {}, + else => return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{ + ty, + }), + } +} + +fn checkFloatType( + sema: *Sema, + block: *Scope.Block, + ty_src: LazySrcLoc, + ty: Type, +) CompileError!void { + switch (ty.zigTypeTag()) { + .ComptimeFloat, .Float => {}, + else => return sema.mod.fail(&block.base, ty_src, "expected float type, found '{}'", .{ + ty, + }), + } +} + fn checkAtomicOperandType( sema: *Sema, block: *Scope.Block, @@ -7815,9 +7863,23 @@ fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |operand_val| { - _ = ptr_val; - _ = operand_val; - return mod.fail(&block.base, src, "TODO implement Sema for @atomicRmw at comptime", .{}); + const target = sema.mod.getTarget(); + const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src; + const new_val = switch (op) { + // zig fmt: off + .Xchg => operand_val, + .Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target), + .Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target), + .And => try stored_val.bitwiseAnd (operand_val, sema.arena), + .Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena), + .Or => try stored_val.bitwiseOr (operand_val, sema.arena), + .Xor => try stored_val.bitwiseXor (operand_val, sema.arena), + .Max => try stored_val.numberMax (operand_val, sema.arena), + .Min => try stored_val.numberMin (operand_val, sema.arena), + // zig fmt: on + }; + try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty); + return sema.addConstant(operand_ty, stored_val); } else break :rs operand_src; } else ptr_src; @@ -9298,33 +9360,38 @@ fn coerceNum( const target = sema.mod.getTarget(); - if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) { - if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { - if (val.floatHasFraction()) { - return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty }); + switch (dst_zig_tag) { + .ComptimeInt, .Int => { + if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { + if (val.floatHasFraction()) { + return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty }); + } + return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{}); + } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { + if (!val.intFitsInType(dest_type, target)) { + return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val }); + } + return try sema.addConstant(dest_type, val); } - return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{}); - } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { - if (!val.intFitsInType(dest_type, target)) { - return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val }); + }, + .ComptimeFloat, .Float => { + if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { + const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) { + error.Overflow => return sema.mod.fail( + &block.base, + inst_src, + "cast of value {} to type '{}' loses information", + .{ val, dest_type }, + ), + error.OutOfMemory => return error.OutOfMemory, + }; + return try sema.addConstant(dest_type, res); + } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { + const result_val = try val.intToFloat(sema.arena, dest_type, target); + return try sema.addConstant(dest_type, result_val); } - return try sema.addConstant(dest_type, val); - } - } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { - if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { - const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) { - error.Overflow => return sema.mod.fail( - &block.base, - inst_src, - "cast of value {} to type '{}' loses information", - .{ val, dest_type }, - ), - error.OutOfMemory => return error.OutOfMemory, - }; - return try sema.addConstant(dest_type, res); - } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { - return sema.mod.fail(&block.base, inst_src, "TODO int to float", .{}); - } + }, + else => {}, } return null; } @@ -9375,42 +9442,10 @@ fn storePtr2( return; const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { - if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| { - const const_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse - return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{}); - - if (decl_ref_mut.data.runtime_index < block.runtime_index) { - if (block.runtime_cond) |cond_src| { - const msg = msg: { - const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{}); - errdefer msg.destroy(sema.gpa); - try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{}); - break :msg msg; - }; - return sema.mod.failWithOwnedErrorMsg(&block.base, msg); - } - if (block.runtime_loop) |loop_src| { - const msg = msg: { - const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{}); - errdefer msg.destroy(sema.gpa); - try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{}); - break :msg msg; - }; - return sema.mod.failWithOwnedErrorMsg(&block.base, msg); - } - unreachable; - } - var new_arena = std.heap.ArenaAllocator.init(sema.gpa); - errdefer new_arena.deinit(); - const new_ty = try elem_ty.copy(&new_arena.allocator); - const new_val = try const_val.copy(&new_arena.allocator); - const decl = decl_ref_mut.data.decl; - var old_arena = decl.value_arena.?.promote(sema.gpa); - decl.value_arena = null; - try decl.finalizeNewArena(&new_arena); - decl.ty = new_ty; - decl.val = new_val; - old_arena.deinit(); + const operand_val = (try sema.resolveMaybeUndefVal(block, operand_src, operand)) orelse + return sema.mod.fail(&block.base, src, "cannot store runtime value in compile time variable", .{}); + if (ptr_val.tag() == .decl_ref_mut) { + try sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty); return; } break :rs operand_src; @@ -9422,6 +9457,53 @@ fn storePtr2( _ = try block.addBinOp(air_tag, ptr, operand); } +/// Call when you have Value objects rather than Air instructions, and you want to +/// assert the store must be done at comptime. +fn storePtrVal( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + ptr_val: Value, + operand_val: Value, + operand_ty: Type, +) !void { + if (ptr_val.castTag(.decl_ref_mut)) |decl_ref_mut| { + if (decl_ref_mut.data.runtime_index < block.runtime_index) { + if (block.runtime_cond) |cond_src| { + const msg = msg: { + const msg = try sema.mod.errMsg(&block.base, src, "store to comptime variable depends on runtime condition", .{}); + errdefer msg.destroy(sema.gpa); + try sema.mod.errNote(&block.base, cond_src, msg, "runtime condition here", .{}); + break :msg msg; + }; + return sema.mod.failWithOwnedErrorMsg(&block.base, msg); + } + if (block.runtime_loop) |loop_src| { + const msg = msg: { + const msg = try sema.mod.errMsg(&block.base, src, "cannot store to comptime variable in non-inline loop", .{}); + errdefer msg.destroy(sema.gpa); + try sema.mod.errNote(&block.base, loop_src, msg, "non-inline loop here", .{}); + break :msg msg; + }; + return sema.mod.failWithOwnedErrorMsg(&block.base, msg); + } + unreachable; + } + var new_arena = std.heap.ArenaAllocator.init(sema.gpa); + errdefer new_arena.deinit(); + const new_ty = try operand_ty.copy(&new_arena.allocator); + const new_val = try operand_val.copy(&new_arena.allocator); + const decl = decl_ref_mut.data.decl; + var old_arena = decl.value_arena.?.promote(sema.gpa); + decl.value_arena = null; + try decl.finalizeNewArena(&new_arena); + decl.ty = new_ty; + decl.val = new_val; + old_arena.deinit(); + return; + } +} + fn bitcast( sema: *Sema, block: *Scope.Block, @@ -9801,11 +9883,11 @@ fn cmpNumeric( const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| lhs_val.compareWithZero(.lt) else - (lhs_ty.isFloat() or lhs_ty.isSignedInt()); + (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt()); const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| rhs_val.compareWithZero(.lt) else - (rhs_ty.isFloat() or rhs_ty.isSignedInt()); + (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -10031,7 +10113,7 @@ fn resolvePeerTypes( } continue; } - if (chosen_ty.isFloat() and candidate_ty.isFloat()) { + if (chosen_ty.isRuntimeFloat() and candidate_ty.isRuntimeFloat()) { if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; chosen_i = candidate_i + 1; @@ -10049,13 +10131,13 @@ fn resolvePeerTypes( continue; } - if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isFloat()) { + if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isRuntimeFloat()) { chosen = candidate; chosen_i = candidate_i + 1; continue; } - if (chosen_ty.isFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) { + if (chosen_ty.isRuntimeFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) { continue; } diff --git a/src/codegen.zig b/src/codegen.zig index e0047de1f7..2a33795d0f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -858,6 +858,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .struct_field_ptr=> try self.airStructFieldPtr(inst), .struct_field_val=> try self.airStructFieldVal(inst), .array_to_slice => try self.airArrayToSlice(inst), + .int_to_float => try self.airIntToFloat(inst), + .float_to_int => try self.airFloatToInt(inst), .cmpxchg_strong => try self.airCmpxchg(inst), .cmpxchg_weak => try self.airCmpxchg(inst), .atomic_rmw => try self.airAtomicRmw(inst), @@ -4769,6 +4771,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + fn airIntToFloat(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airIntToFloat for {}", .{ + self.target.cpu.arch, + }), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + + fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airFloatToInt for {}", .{ + self.target.cpu.arch, + }), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5eb4388a9e..6da791cb46 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -917,6 +917,8 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM .atomic_rmw => try airAtomicRmw(o, inst), .atomic_load => try airAtomicLoad(o, inst), + .int_to_float, .float_to_int => try airSimpleCast(o, inst), + .atomic_store_unordered => try airAtomicStore(o, inst, toMemoryOrder(.Unordered)), .atomic_store_monotonic => try airAtomicStore(o, inst, toMemoryOrder(.Monotonic)), .atomic_store_release => try airAtomicStore(o, inst, toMemoryOrder(.Release)), @@ -1899,6 +1901,24 @@ fn airArrayToSlice(o: *Object, inst: Air.Inst.Index) !CValue { return local; } +/// Emits a local variable with the result type and initializes it +/// with the operand. +fn airSimpleCast(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const writer = o.writer(); + const operand = try o.resolveInst(ty_op.operand); + + try writer.writeAll(" = "); + try o.writeCValue(writer, operand); + try writer.writeAll(";\n"); + return local; +} + fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; const extra = o.air.extraData(Air.Cmpxchg, ty_pl.payload).data; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 53e57ee219..2e835260af 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1122,6 +1122,8 @@ pub const FuncGen = struct { .slice_ptr => try self.airSliceField(inst, 0), .slice_len => try self.airSliceField(inst, 1), .array_to_slice => try self.airArrayToSlice(inst), + .float_to_int => try self.airFloatToInt(inst), + .int_to_float => try self.airIntToFloat(inst), .cmpxchg_weak => try self.airCmpxchg(inst, true), .cmpxchg_strong => try self.airCmpxchg(inst, false), .fence => try self.airFence(inst), @@ -1372,6 +1374,40 @@ pub const FuncGen = struct { return self.builder.buildInsertValue(partial, len, 1, ""); } + fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const dest_ty = self.air.typeOfIndex(inst); + const dest_llvm_ty = try self.dg.llvmType(dest_ty); + + if (dest_ty.isSignedInt()) { + return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); + } else { + return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); + } + } + + fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const dest_ty = self.air.typeOfIndex(inst); + const dest_llvm_ty = try self.dg.llvmType(dest_ty); + + // TODO set fast math flag + + if (dest_ty.isSignedInt()) { + return self.builder.buildFPToSI(operand, dest_llvm_ty, ""); + } else { + return self.builder.buildFPToUI(operand, dest_llvm_ty, ""); + } + } + fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -1818,7 +1854,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, ""); + if (inst_ty.isRuntimeFloat()) return self.builder.buildFAdd(lhs, rhs, ""); if (wrap) return self.builder.buildAdd(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); @@ -1833,7 +1869,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, ""); + if (inst_ty.isRuntimeFloat()) return self.builder.buildFSub(lhs, rhs, ""); if (wrap) return self.builder.buildSub(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); @@ -1848,7 +1884,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, ""); + if (inst_ty.isRuntimeFloat()) return self.builder.buildFMul(lhs, rhs, ""); if (wrap) return self.builder.buildMul(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); @@ -1863,7 +1899,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFDiv(lhs, rhs, ""); + if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, ""); return self.builder.buildUDiv(lhs, rhs, ""); } @@ -1876,7 +1912,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFRem(lhs, rhs, ""); + if (inst_ty.isRuntimeFloat()) return self.builder.buildFRem(lhs, rhs, ""); if (inst_ty.isSignedInt()) return self.builder.buildSRem(lhs, rhs, ""); return self.builder.buildURem(lhs, rhs, ""); } @@ -2165,7 +2201,7 @@ pub const FuncGen = struct { const operand_ty = ptr_ty.elemType(); const operand = try self.resolveInst(extra.operand); const is_signed_int = operand_ty.isSignedInt(); - const is_float = operand_ty.isFloat(); + const is_float = operand_ty.isRuntimeFloat(); const op = toLlvmAtomicRmwBinOp(extra.op(), is_signed_int, is_float); const ordering = toLlvmAtomicOrdering(extra.ordering()); const single_threaded = llvm.Bool.fromBool(self.single_threaded); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index db1dcd22f2..16445fa2d1 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -563,6 +563,38 @@ pub const Builder = opaque { ordering: AtomicOrdering, singleThread: Bool, ) *const Value; + + pub const buildFPToUI = LLVMBuildFPToUI; + extern fn LLVMBuildFPToUI( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + + pub const buildFPToSI = LLVMBuildFPToSI; + extern fn LLVMBuildFPToSI( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + + pub const buildUIToFP = LLVMBuildUIToFP; + extern fn LLVMBuildUIToFP( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + + pub const buildSIToFP = LLVMBuildSIToFP; + extern fn LLVMBuildSIToFP( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; }; pub const IntPredicate = enum(c_uint) { diff --git a/src/print_air.zig b/src/print_air.zig index 39ae4251fa..3d13fa688f 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -175,6 +175,8 @@ const Writer = struct { .struct_field_ptr_index_2, .struct_field_ptr_index_3, .array_to_slice, + .int_to_float, + .float_to_int, => try w.writeTyOp(s, inst), .block, diff --git a/src/type.zig b/src/type.zig index ec73ae1196..122faefbc7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2523,7 +2523,8 @@ pub const Type = extern union { }; } - pub fn isFloat(self: Type) bool { + /// Returns `false` for `comptime_float`. + pub fn isRuntimeFloat(self: Type) bool { return switch (self.tag()) { .f16, .f32, @@ -2536,13 +2537,29 @@ pub const Type = extern union { }; } - /// Asserts the type is a fixed-size float. + /// Returns `true` for `comptime_float`. + pub fn isAnyFloat(self: Type) bool { + return switch (self.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .comptime_float, + => true, + + else => false, + }; + } + + /// Asserts the type is a fixed-size float or comptime_float. + /// Returns 128 for comptime_float types. pub fn floatBits(self: Type, target: Target) u16 { return switch (self.tag()) { .f16 => 16, .f32 => 32, .f64 => 64, - .f128 => 128, + .f128, .comptime_float => 128, .c_longdouble => CType.longdouble.sizeInBits(target), else => unreachable, @@ -2879,7 +2896,7 @@ pub const Type = extern union { } /// Asserts that self.zigTypeTag() == .Int. - pub fn minInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value { + pub fn minInt(self: Type, arena: *Allocator, target: Target) !Value { assert(self.zigTypeTag() == .Int); const info = self.intInfo(target); @@ -2889,35 +2906,35 @@ pub const Type = extern union { if ((info.bits - 1) <= std.math.maxInt(u6)) { const n: i64 = -(@as(i64, 1) << @truncate(u6, info.bits - 1)); - return Value.Tag.int_i64.create(&arena.allocator, n); + return Value.Tag.int_i64.create(arena, n); } - var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1); + var res = try std.math.big.int.Managed.initSet(arena, 1); try res.shiftLeft(res, info.bits - 1); res.negate(); const res_const = res.toConst(); if (res_const.positive) { - return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs); + return Value.Tag.int_big_positive.create(arena, res_const.limbs); } else { - return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs); + return Value.Tag.int_big_negative.create(arena, res_const.limbs); } } /// Asserts that self.zigTypeTag() == .Int. - pub fn maxInt(self: Type, arena: *std.heap.ArenaAllocator, target: Target) !Value { + pub fn maxInt(self: Type, arena: *Allocator, target: Target) !Value { assert(self.zigTypeTag() == .Int); const info = self.intInfo(target); if (info.signedness == .signed and (info.bits - 1) <= std.math.maxInt(u6)) { const n: i64 = (@as(i64, 1) << @truncate(u6, info.bits - 1)) - 1; - return Value.Tag.int_i64.create(&arena.allocator, n); + return Value.Tag.int_i64.create(arena, n); } else if (info.signedness == .signed and info.bits <= std.math.maxInt(u6)) { const n: u64 = (@as(u64, 1) << @truncate(u6, info.bits)) - 1; - return Value.Tag.int_u64.create(&arena.allocator, n); + return Value.Tag.int_u64.create(arena, n); } - var res = try std.math.big.int.Managed.initSet(&arena.allocator, 1); + var res = try std.math.big.int.Managed.initSet(arena, 1); try res.shiftLeft(res, info.bits - @boolToInt(info.signedness == .signed)); const one = std.math.big.int.Const{ .limbs = &[_]std.math.big.Limb{1}, @@ -2927,9 +2944,9 @@ pub const Type = extern union { const res_const = res.toConst(); if (res_const.positive) { - return Value.Tag.int_big_positive.create(&arena.allocator, res_const.limbs); + return Value.Tag.int_big_positive.create(arena, res_const.limbs); } else { - return Value.Tag.int_big_negative.create(&arena.allocator, res_const.limbs); + return Value.Tag.int_big_negative.create(arena, res_const.limbs); } } diff --git a/src/value.zig b/src/value.zig index 88d0d04086..177359d652 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1524,6 +1524,230 @@ pub const Value = extern union { }; } + pub fn intToFloat(val: Value, allocator: *Allocator, dest_ty: Type, target: Target) !Value { + switch (val.tag()) { + .undef, .zero, .one => return val, + .int_u64 => { + return intToFloatInner(val.castTag(.int_u64).?.data, allocator, dest_ty, target); + }, + .int_i64 => { + return intToFloatInner(val.castTag(.int_i64).?.data, allocator, dest_ty, target); + }, + .int_big_positive, .int_big_negative => @panic("big int to float"), + else => unreachable, + } + } + + fn intToFloatInner(x: anytype, arena: *Allocator, dest_ty: Type, target: Target) !Value { + switch (dest_ty.floatBits(target)) { + 16 => return Value.Tag.float_16.create(arena, @intToFloat(f16, x)), + 32 => return Value.Tag.float_32.create(arena, @intToFloat(f32, x)), + 64 => return Value.Tag.float_64.create(arena, @intToFloat(f64, x)), + 128 => return Value.Tag.float_128.create(arena, @intToFloat(f128, x)), + else => unreachable, + } + } + + /// Supports both floats and ints; handles undefined. + pub fn numberAddWrap( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + return floatAdd(lhs, rhs, ty, arena); + } + const result = try intAdd(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + @panic("TODO comptime wrapping integer addition"); + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + @panic("TODO comptime wrapping integer addition"); + } + + return result; + } + + /// Supports both floats and ints; handles undefined. + pub fn numberSubWrap( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + return floatSub(lhs, rhs, ty, arena); + } + const result = try intSub(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + @panic("TODO comptime wrapping integer subtraction"); + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + @panic("TODO comptime wrapping integer subtraction"); + } + + return result; + } + + /// Supports both floats and ints; handles undefined. + pub fn numberMax(lhs: Value, rhs: Value, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + + switch (lhs_bigint.order(rhs_bigint)) { + .lt => result_bigint.copy(rhs_bigint), + .gt, .eq => result_bigint.copy(lhs_bigint), + } + + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(arena, result_limbs); + } else { + return Value.Tag.int_big_negative.create(arena, result_limbs); + } + } + + /// Supports both floats and ints; handles undefined. + pub fn numberMin(lhs: Value, rhs: Value, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + + switch (lhs_bigint.order(rhs_bigint)) { + .lt => result_bigint.copy(lhs_bigint), + .gt, .eq => result_bigint.copy(rhs_bigint), + } + + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(arena, result_limbs); + } else { + return Value.Tag.int_big_negative.create(arena, result_limbs); + } + } + + /// operands must be integers; handles undefined. + pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitAnd(lhs_bigint, rhs_bigint); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(arena, result_limbs); + } else { + return Value.Tag.int_big_negative.create(arena, result_limbs); + } + } + + /// operands must be integers; handles undefined. + pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + _ = ty; + _ = arena; + @panic("TODO comptime bitwise NAND"); + } + + /// operands must be integers; handles undefined. + pub fn bitwiseOr(lhs: Value, rhs: Value, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitOr(lhs_bigint, rhs_bigint); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(arena, result_limbs); + } else { + return Value.Tag.int_big_negative.create(arena, result_limbs); + } + } + + /// operands must be integers; handles undefined. + pub fn bitwiseXor(lhs: Value, rhs: Value, arena: *Allocator) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitXor(lhs_bigint, rhs_bigint); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(arena, result_limbs); + } else { + return Value.Tag.int_big_negative.create(arena, result_limbs); + } + } + pub fn intAdd(lhs: Value, rhs: Value, allocator: *Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 75e33477bc..311fb4b3b2 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -138,3 +138,32 @@ test "atomic store" { @atomicStore(u32, &x, 12345678, .SeqCst); try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678); } + +test "atomic store comptime" { + comptime try testAtomicStore(); + try testAtomicStore(); +} + +fn testAtomicStore() !void { + var x: u32 = 0; + @atomicStore(u32, &x, 1, .SeqCst); + try expect(@atomicLoad(u32, &x, .SeqCst) == 1); + @atomicStore(u32, &x, 12345678, .SeqCst); + try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678); +} + +test "atomicrmw with floats" { + try testAtomicRmwFloat(); + comptime try testAtomicRmwFloat(); +} + +fn testAtomicRmwFloat() !void { + var x: f32 = 0; + try expect(x == 0); + _ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst); + try expect(x == 1); + _ = @atomicRmw(f32, &x, .Add, 5, .SeqCst); + try expect(x == 6); + _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst); + try expect(x == 4); +} diff --git a/test/behavior/atomics_stage1.zig b/test/behavior/atomics_stage1.zig index 22e3ae5939..424f33b403 100644 --- a/test/behavior/atomics_stage1.zig +++ b/test/behavior/atomics_stage1.zig @@ -3,35 +3,6 @@ const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const builtin = @import("builtin"); -test "atomic store comptime" { - comptime try testAtomicStore(); - try testAtomicStore(); -} - -fn testAtomicStore() !void { - var x: u32 = 0; - @atomicStore(u32, &x, 1, .SeqCst); - try expect(@atomicLoad(u32, &x, .SeqCst) == 1); - @atomicStore(u32, &x, 12345678, .SeqCst); - try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678); -} - -test "atomicrmw with floats" { - try testAtomicRmwFloat(); - comptime try testAtomicRmwFloat(); -} - -fn testAtomicRmwFloat() !void { - var x: f32 = 0; - try expect(x == 0); - _ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst); - try expect(x == 1); - _ = @atomicRmw(f32, &x, .Add, 5, .SeqCst); - try expect(x == 6); - _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst); - try expect(x == 4); -} - test "atomicrmw with ints" { try testAtomicRmwInt(); comptime try testAtomicRmwInt(); -- cgit v1.2.3 From 5913140b6bf96e168a0167906a78e2d4aac5bd9d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 21 Sep 2021 15:08:32 -0700 Subject: stage2: free Sema's arena after generating machine code Previously, linker backends or machine code backends were able to hold on to references to inside Sema's temporary arena. However there can be large objects stored there that we want to free after machine code is generated. The primary change in this commit is to use a temporary arena for Sema of function bodies that gets freed after machine code backend finishes handling `updateFunc` (at the same time that Air and Liveness get freed). The other changes in this commit are fixing issues that fell out from the primary change. * The C linker backend is rewritten to handle updateDecl and updateFunc separately. Also, all Decl updates get access to typedefs and fwd_decls, not only functions. * The C linker backend is updated to the new API that does not depend on allocateDeclIndexes and does not have to handle garbage collected decls. * The C linker backend uses an arena for Type/Value objects that `typedefs` references. These can be garbage collected every so often after flush(), however that garbage collection code is not implemented at this time. It will be pretty simple, just allocate a new arena, copy all the Type objects to it, update the keys of the hash map, free the old arena. * Sema: fix a handful of instances of not copying Type/Value objects from the temporary arena into the appropriate Decl arena. * Type: fix some function types not reporting hasCodeGenBits() correctly. --- src/Compilation.zig | 14 +- src/Module.zig | 32 +- src/Sema.zig | 16 +- src/codegen/c.zig | 1095 ++++++++++++++++++++++++++------------------------- src/link.zig | 10 +- src/link/C.zig | 258 +++++++----- src/type.zig | 10 +- 7 files changed, 773 insertions(+), 662 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Compilation.zig b/src/Compilation.zig index 53e643acb8..48c907c759 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2145,7 +2145,11 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; const decl = func.owner_decl; - var air = module.analyzeFnBody(decl, func) catch |err| switch (err) { + var tmp_arena = std.heap.ArenaAllocator.init(gpa); + defer tmp_arena.deinit(); + const sema_arena = &tmp_arena.allocator; + + var air = module.analyzeFnBody(decl, func, sema_arena) catch |err| switch (err) { error.AnalysisFail => { assert(func.state != .in_progress); continue; @@ -2207,16 +2211,20 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const decl_emit_h = decl.getEmitH(module); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); + var typedefs_arena = std.heap.ArenaAllocator.init(gpa); + defer typedefs_arena.deinit(); var dg: c_codegen.DeclGen = .{ + .gpa = gpa, .module = module, .error_msg = null, .decl = decl, .fwd_decl = fwd_decl.toManaged(gpa), - // we don't want to emit optionals and error unions to headers since they have no ABI - .typedefs = undefined, + .typedefs = c_codegen.TypedefMap.init(gpa), + .typedefs_arena = &typedefs_arena.allocator, }; defer dg.fwd_decl.deinit(); + defer dg.typedefs.deinit(); c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { diff --git a/src/Module.zig b/src/Module.zig index fd275b507f..88817efc26 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -610,7 +610,7 @@ pub const Decl = struct { /// If the Decl has a value and it is a function, return it, /// otherwise null. - pub fn getFunction(decl: *Decl) ?*Fn { + pub fn getFunction(decl: *const Decl) ?*Fn { if (!decl.owns_tv) return null; const func = (decl.val.castTag(.function) orelse return null).data; assert(func.owner_decl == decl); @@ -3789,7 +3789,7 @@ pub fn clearDecl( .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty }, - .c => .{ .c = link.File.C.DeclBlock.empty }, + .c => .{ .c = {} }, .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty }, .spirv => .{ .spirv = {} }, }; @@ -3798,7 +3798,7 @@ pub fn clearDecl( .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .plan9 => .{ .plan9 = {} }, - .c => .{ .c = link.File.C.FnBlock.empty }, + .c => .{ .c = {} }, .wasm => .{ .wasm = link.File.Wasm.FnData.empty }, .spirv => .{ .spirv = .{} }, }; @@ -3828,10 +3828,13 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { // about the Decl in the first place. // Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we // must call `freeDecl` in the linker backend now. - if (decl.has_tv) { - if (decl.ty.hasCodeGenBits()) { - mod.comp.bin_file.freeDecl(decl); - } + switch (mod.comp.bin_file.tag) { + .c => {}, // this linker backend has already migrated to the new API + else => if (decl.has_tv) { + if (decl.ty.hasCodeGenBits()) { + mod.comp.bin_file.freeDecl(decl); + } + }, } const dependants = decl.dependants.keys(); @@ -3893,22 +3896,16 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn, arena: *Allocator) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); const gpa = mod.gpa; - // Use the Decl's arena for function memory. - var arena = decl.value_arena.?.promote(gpa); - defer decl.value_arena.?.* = arena.state; - - const fn_ty = decl.ty; - var sema: Sema = .{ .mod = mod, .gpa = gpa, - .arena = &arena.allocator, + .arena = arena, .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, @@ -3942,6 +3939,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { // This could be a generic function instantiation, however, in which case we need to // map the comptime parameters to constant values and only emit arg AIR instructions // for the runtime ones. + const fn_ty = decl.ty; const runtime_params_len = @intCast(u32, fn_ty.fnParamLen()); try inner_block.instructions.ensureTotalCapacity(gpa, runtime_params_len); try sema.air_instructions.ensureUnusedCapacity(gpa, fn_info.total_params_len * 2); // * 2 for the `addType` @@ -4072,7 +4070,7 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast. .elf => .{ .elf = link.File.Elf.TextBlock.empty }, .macho => .{ .macho = link.File.MachO.TextBlock.empty }, .plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty }, - .c => .{ .c = link.File.C.DeclBlock.empty }, + .c => .{ .c = {} }, .wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty }, .spirv => .{ .spirv = {} }, }, @@ -4081,7 +4079,7 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast. .elf => .{ .elf = link.File.Elf.SrcFn.empty }, .macho => .{ .macho = link.File.MachO.SrcFn.empty }, .plan9 => .{ .plan9 = {} }, - .c => .{ .c = link.File.C.FnBlock.empty }, + .c => .{ .c = {} }, .wasm => .{ .wasm = link.File.Wasm.FnData.empty }, .spirv => .{ .spirv = .{} }, }, diff --git a/src/Sema.zig b/src/Sema.zig index 786db21b4a..f6bea69129 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2999,6 +2999,8 @@ fn analyzeCall( // TODO: check whether any external comptime memory was mutated by the // comptime function call. If so, then do not memoize the call here. + // TODO: re-evaluate whether memoized_calls needs its own arena. I think + // it should be fine to use the Decl arena for the function. { var arena_allocator = std.heap.ArenaAllocator.init(gpa); errdefer arena_allocator.deinit(); @@ -3009,7 +3011,7 @@ fn analyzeCall( } try mod.memoized_calls.put(gpa, memoized_call_key, .{ - .val = result_val, + .val = try result_val.copy(arena), .arena = arena_allocator.state, }); delete_memoized_call_key = false; @@ -5876,10 +5878,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr else try Type.Tag.array.create(anon_decl.arena(), .{ .len = final_len, .elem_type = lhs_info.elem_type }); const val = try Value.Tag.array.create(anon_decl.arena(), buf); - return sema.analyzeDeclRef(try anon_decl.finish( - ty, - val, - )); + return sema.analyzeDeclRef(try anon_decl.finish(ty, val)); } return sema.mod.fail(&block.base, lhs_src, "TODO array_cat more types of Values", .{}); } else { @@ -5941,10 +5940,7 @@ fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr } } const val = try Value.Tag.array.create(anon_decl.arena(), buf); - return sema.analyzeDeclRef(try anon_decl.finish( - final_ty, - val, - )); + return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val)); } return sema.mod.fail(&block.base, lhs_src, "TODO array_mul more types of Values", .{}); } @@ -9979,7 +9975,7 @@ fn analyzeRef( var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( - operand_ty, + try operand_ty.copy(anon_decl.arena()), try val.copy(anon_decl.arena()), )); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index fb8211f6b8..c4e1d48062 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -91,55 +91,76 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { return .{ .data = ident }; } -/// This data is available when outputting .c code for a Module. +/// This data is available when outputting .c code for a `*Module.Fn`. /// It is not available when generating .h file. -pub const Object = struct { - dg: DeclGen, +pub const Function = struct { air: Air, liveness: Liveness, - gpa: *mem.Allocator, - code: std.ArrayList(u8), value_map: CValueMap, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, - indent_writer: IndentWriter(std.ArrayList(u8).Writer), + object: Object, + func: *Module.Fn, - fn resolveInst(o: *Object, inst: Air.Inst.Ref) !CValue { - if (o.air.value(inst)) |_| { + fn resolveInst(f: *Function, inst: Air.Inst.Ref) !CValue { + if (f.air.value(inst)) |_| { return CValue{ .constant = inst }; } const index = Air.refToIndex(inst).?; - return o.value_map.get(index).?; // Assertion means instruction does not dominate usage. + return f.value_map.get(index).?; // Assertion means instruction does not dominate usage. } - fn allocLocalValue(o: *Object) CValue { - const result = o.next_local_index; - o.next_local_index += 1; + fn allocLocalValue(f: *Function) CValue { + const result = f.next_local_index; + f.next_local_index += 1; return .{ .local = result }; } - fn allocLocal(o: *Object, ty: Type, mutability: Mutability) !CValue { - const local_value = o.allocLocalValue(); - try o.renderTypeAndName(o.writer(), ty, local_value, mutability); + fn allocLocal(f: *Function, ty: Type, mutability: Mutability) !CValue { + const local_value = f.allocLocalValue(); + try f.object.renderTypeAndName(f.object.writer(), ty, local_value, mutability); return local_value; } + fn writeCValue(f: *Function, w: anytype, c_value: CValue) !void { + switch (c_value) { + .constant => |inst| { + const ty = f.air.typeOf(inst); + const val = f.air.value(inst).?; + return f.object.dg.renderValue(w, ty, val); + }, + else => return Object.writeCValue(w, c_value), + } + } + + fn fail(f: *Function, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + return f.object.dg.fail(format, args); + } + + fn renderType(f: *Function, w: anytype, t: Type) !void { + return f.object.dg.renderType(w, t); + } +}; + +/// This data is available when outputting .c code for a `Module`. +/// It is not available when generating .h file. +pub const Object = struct { + dg: DeclGen, + code: std.ArrayList(u8), + indent_writer: IndentWriter(std.ArrayList(u8).Writer), + fn writer(o: *Object) IndentWriter(std.ArrayList(u8).Writer).Writer { return o.indent_writer.writer(); } - fn writeCValue(o: *Object, w: anytype, c_value: CValue) !void { + fn writeCValue(w: anytype, c_value: CValue) !void { switch (c_value) { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| { - const ty = o.air.typeOf(inst); - const val = o.air.value(inst).?; - return o.dg.renderValue(w, ty, val); - }, + .constant => unreachable, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -153,7 +174,7 @@ pub const Object = struct { name: CValue, mutability: Mutability, ) error{ OutOfMemory, AnalysisFail }!void { - var suffix = std.ArrayList(u8).init(o.gpa); + var suffix = std.ArrayList(u8).init(o.dg.gpa); defer suffix.deinit(); var render_ty = ty; @@ -177,7 +198,7 @@ pub const Object = struct { .Const => try w.writeAll("const "), .Mut => {}, } - try o.writeCValue(w, name); + try writeCValue(w, name); try w.writeAll(")("); const param_len = render_ty.fnParamLen(); const is_var_args = render_ty.fnIsVarArgs(); @@ -205,7 +226,7 @@ pub const Object = struct { .Mut => "", }; try w.print(" {s}", .{const_prefix}); - try o.writeCValue(w, name); + try writeCValue(w, name); } try w.writeAll(suffix.items); } @@ -213,11 +234,14 @@ pub const Object = struct { /// This data is available both when outputting .c code and when outputting an .h file. pub const DeclGen = struct { + gpa: *std.mem.Allocator, module: *Module, decl: *Decl, fwd_decl: std.ArrayList(u8), error_msg: ?*Module.ErrorMsg, + /// The key of this map is Type which has references to typedefs_arena. typedefs: TypedefMap, + typedefs_arena: *std.mem.Allocator, fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); @@ -545,7 +569,10 @@ pub const DeclGen = struct { try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); - dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); + dg.typedefs.putAssumeCapacityNoClobber( + try t.copy(dg.typedefs_arena), + .{ .name = name, .rendered = rendered }, + ); } else { try dg.renderType(w, t.elemType()); try w.writeAll(" *"); @@ -586,7 +613,10 @@ pub const DeclGen = struct { try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); - dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); + dg.typedefs.putAssumeCapacityNoClobber( + try t.copy(dg.typedefs_arena), + .{ .name = name, .rendered = rendered }, + ); }, .ErrorSet => { comptime std.debug.assert(Type.initTag(.anyerror).abiSize(std.Target.current) == 2); @@ -626,7 +656,10 @@ pub const DeclGen = struct { try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); - dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); + dg.typedefs.putAssumeCapacityNoClobber( + try t.copy(dg.typedefs_arena), + .{ .name = name, .rendered = rendered }, + ); }, .Struct => { if (dg.typedefs.get(t)) |some| { @@ -659,7 +692,10 @@ pub const DeclGen = struct { try dg.typedefs.ensureUnusedCapacity(1); try w.writeAll(name); - dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered }); + dg.typedefs.putAssumeCapacityNoClobber( + try t.copy(dg.typedefs_arena), + .{ .name = name, .rendered = rendered }, + ); }, .Enum => { // For enums, we simply use the integer tag type. @@ -724,6 +760,29 @@ pub const DeclGen = struct { } }; +pub fn genFunc(f: *Function) !void { + const tracy = trace(@src()); + defer tracy.end(); + + const o = &f.object; + const is_global = o.dg.module.decl_exports.contains(f.func.owner_decl); + const fwd_decl_writer = o.dg.fwd_decl.writer(); + if (is_global) { + try fwd_decl_writer.writeAll("ZIG_EXTERN_C "); + } + try o.dg.renderFunctionSignature(fwd_decl_writer, is_global); + try fwd_decl_writer.writeAll(";\n"); + + try o.indent_writer.insertNewline(); + try o.dg.renderFunctionSignature(o.writer(), is_global); + + try o.writer().writeByte(' '); + const main_body = f.air.getMainBody(); + try genBody(f, main_body); + + try o.indent_writer.insertNewline(); +} + pub fn genDecl(o: *Object) !void { const tracy = trace(@src()); defer tracy.end(); @@ -732,28 +791,6 @@ pub fn genDecl(o: *Object) !void { .ty = o.dg.decl.ty, .val = o.dg.decl.val, }; - if (tv.val.castTag(.function)) |func_payload| { - const func: *Module.Fn = func_payload.data; - if (func.owner_decl == o.dg.decl) { - const is_global = o.dg.declIsGlobal(tv); - const fwd_decl_writer = o.dg.fwd_decl.writer(); - if (is_global) { - try fwd_decl_writer.writeAll("ZIG_EXTERN_C "); - } - try o.dg.renderFunctionSignature(fwd_decl_writer, is_global); - try fwd_decl_writer.writeAll(";\n"); - - try o.indent_writer.insertNewline(); - try o.dg.renderFunctionSignature(o.writer(), is_global); - - try o.writer().writeByte(' '); - const main_body = o.air.getMainBody(); - try genBody(o, main_body); - - try o.indent_writer.insertNewline(); - return; - } - } if (tv.val.tag() == .extern_fn) { const writer = o.writer(); try writer.writeAll("ZIG_EXTERN_C "); @@ -821,250 +858,250 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { - const writer = o.writer(); +fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { + const writer = f.object.writer(); if (body.len == 0) { try writer.writeAll("{}"); return; } try writer.writeAll("{\n"); - o.indent_writer.pushIndent(); + f.object.indent_writer.pushIndent(); - const air_tags = o.air.instructions.items(.tag); + const air_tags = f.air.instructions.items(.tag); for (body) |inst| { const result_value = switch (air_tags[inst]) { // zig fmt: off .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies - .arg => airArg(o), + .arg => airArg(f), - .breakpoint => try airBreakpoint(o), - .unreach => try airUnreach(o), - .fence => try airFence(o, inst), + .breakpoint => try airBreakpoint(f), + .unreach => try airUnreach(f), + .fence => try airFence(f, inst), // TODO use a different strategy for add that communicates to the optimizer // that wrapping is UB. - .add, .ptr_add => try airBinOp( o, inst, " + "), - .addwrap => try airWrapOp(o, inst, " + ", "addw_"), + .add, .ptr_add => try airBinOp( f, inst, " + "), + .addwrap => try airWrapOp(f, inst, " + ", "addw_"), // TODO use a different strategy for sub that communicates to the optimizer // that wrapping is UB. - .sub, .ptr_sub => try airBinOp( o, inst, " - "), - .subwrap => try airWrapOp(o, inst, " - ", "subw_"), + .sub, .ptr_sub => try airBinOp( f, inst, " - "), + .subwrap => try airWrapOp(f, inst, " - ", "subw_"), // TODO use a different strategy for mul that communicates to the optimizer // that wrapping is UB. - .mul => try airBinOp( o, inst, " * "), - .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"), + .mul => try airBinOp( f, inst, " * "), + .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"), // TODO use a different strategy for div that communicates to the optimizer // that wrapping is UB. - .div => try airBinOp( o, inst, " / "), - .rem => try airBinOp( o, inst, " % "), + .div => try airBinOp( f, inst, " / "), + .rem => try airBinOp( f, inst, " % "), - .cmp_eq => try airBinOp(o, inst, " == "), - .cmp_gt => try airBinOp(o, inst, " > "), - .cmp_gte => try airBinOp(o, inst, " >= "), - .cmp_lt => try airBinOp(o, inst, " < "), - .cmp_lte => try airBinOp(o, inst, " <= "), - .cmp_neq => try airBinOp(o, inst, " != "), + .cmp_eq => try airBinOp(f, inst, " == "), + .cmp_gt => try airBinOp(f, inst, " > "), + .cmp_gte => try airBinOp(f, inst, " >= "), + .cmp_lt => try airBinOp(f, inst, " < "), + .cmp_lte => try airBinOp(f, inst, " <= "), + .cmp_neq => try airBinOp(f, inst, " != "), // bool_and and bool_or are non-short-circuit operations - .bool_and => try airBinOp(o, inst, " & "), - .bool_or => try airBinOp(o, inst, " | "), - .bit_and => try airBinOp(o, inst, " & "), - .bit_or => try airBinOp(o, inst, " | "), - .xor => try airBinOp(o, inst, " ^ "), - - .shr => try airBinOp(o, inst, " >> "), - .shl => try airBinOp(o, inst, " << "), - - .not => try airNot( o, inst), - - .optional_payload => try airOptionalPayload(o, inst), - .optional_payload_ptr => try airOptionalPayload(o, inst), - - .is_err => try airIsErr(o, inst, "", ".", "!="), - .is_non_err => try airIsErr(o, inst, "", ".", "=="), - .is_err_ptr => try airIsErr(o, inst, "*", "->", "!="), - .is_non_err_ptr => try airIsErr(o, inst, "*", "->", "=="), - - .is_null => try airIsNull(o, inst, "==", ""), - .is_non_null => try airIsNull(o, inst, "!=", ""), - .is_null_ptr => try airIsNull(o, inst, "==", "[0]"), - .is_non_null_ptr => try airIsNull(o, inst, "!=", "[0]"), - - .alloc => try airAlloc(o, inst), - .assembly => try airAsm(o, inst), - .block => try airBlock(o, inst), - .bitcast => try airBitcast(o, inst), - .call => try airCall(o, inst), - .dbg_stmt => try airDbgStmt(o, inst), - .intcast => try airIntCast(o, inst), - .trunc => try airTrunc(o, inst), - .bool_to_int => try airBoolToInt(o, inst), - .load => try airLoad(o, inst), - .ret => try airRet(o, inst), - .store => try airStore(o, inst), - .loop => try airLoop(o, inst), - .cond_br => try airCondBr(o, inst), - .br => try airBr(o, inst), - .switch_br => try airSwitchBr(o, inst), - .wrap_optional => try airWrapOptional(o, inst), - .struct_field_ptr => try airStructFieldPtr(o, inst), - .array_to_slice => try airArrayToSlice(o, inst), - .cmpxchg_weak => try airCmpxchg(o, inst, "weak"), - .cmpxchg_strong => try airCmpxchg(o, inst, "strong"), - .atomic_rmw => try airAtomicRmw(o, inst), - .atomic_load => try airAtomicLoad(o, inst), - - .int_to_float, .float_to_int => try airSimpleCast(o, inst), - - .atomic_store_unordered => try airAtomicStore(o, inst, toMemoryOrder(.Unordered)), - .atomic_store_monotonic => try airAtomicStore(o, inst, toMemoryOrder(.Monotonic)), - .atomic_store_release => try airAtomicStore(o, inst, toMemoryOrder(.Release)), - .atomic_store_seq_cst => try airAtomicStore(o, inst, toMemoryOrder(.SeqCst)), - - .struct_field_ptr_index_0 => try airStructFieldPtrIndex(o, inst, 0), - .struct_field_ptr_index_1 => try airStructFieldPtrIndex(o, inst, 1), - .struct_field_ptr_index_2 => try airStructFieldPtrIndex(o, inst, 2), - .struct_field_ptr_index_3 => try airStructFieldPtrIndex(o, inst, 3), - - .struct_field_val => try airStructFieldVal(o, inst), - .slice_ptr => try airSliceField(o, inst, ".ptr;\n"), - .slice_len => try airSliceField(o, inst, ".len;\n"), - - .ptr_elem_val => try airPtrElemVal(o, inst, "["), - .ptr_ptr_elem_val => try airPtrElemVal(o, inst, "[0]["), - .ptr_elem_ptr => try airPtrElemPtr(o, inst), - .slice_elem_val => try airSliceElemVal(o, inst, "["), - .ptr_slice_elem_val => try airSliceElemVal(o, inst, "[0]["), - - .unwrap_errunion_payload => try airUnwrapErrUnionPay(o, inst), - .unwrap_errunion_err => try airUnwrapErrUnionErr(o, inst), - .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(o, inst), - .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(o, inst), - .wrap_errunion_payload => try airWrapErrUnionPay(o, inst), - .wrap_errunion_err => try airWrapErrUnionErr(o, inst), - - .ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), - .floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + .bool_and => try airBinOp(f, inst, " & "), + .bool_or => try airBinOp(f, inst, " | "), + .bit_and => try airBinOp(f, inst, " & "), + .bit_or => try airBinOp(f, inst, " | "), + .xor => try airBinOp(f, inst, " ^ "), + + .shr => try airBinOp(f, inst, " >> "), + .shl => try airBinOp(f, inst, " << "), + + .not => try airNot( f, inst), + + .optional_payload => try airOptionalPayload(f, inst), + .optional_payload_ptr => try airOptionalPayload(f, inst), + + .is_err => try airIsErr(f, inst, "", ".", "!="), + .is_non_err => try airIsErr(f, inst, "", ".", "=="), + .is_err_ptr => try airIsErr(f, inst, "*", "->", "!="), + .is_non_err_ptr => try airIsErr(f, inst, "*", "->", "=="), + + .is_null => try airIsNull(f, inst, "==", ""), + .is_non_null => try airIsNull(f, inst, "!=", ""), + .is_null_ptr => try airIsNull(f, inst, "==", "[0]"), + .is_non_null_ptr => try airIsNull(f, inst, "!=", "[0]"), + + .alloc => try airAlloc(f, inst), + .assembly => try airAsm(f, inst), + .block => try airBlock(f, inst), + .bitcast => try airBitcast(f, inst), + .call => try airCall(f, inst), + .dbg_stmt => try airDbgStmt(f, inst), + .intcast => try airIntCast(f, inst), + .trunc => try airTrunc(f, inst), + .bool_to_int => try airBoolToInt(f, inst), + .load => try airLoad(f, inst), + .ret => try airRet(f, inst), + .store => try airStore(f, inst), + .loop => try airLoop(f, inst), + .cond_br => try airCondBr(f, inst), + .br => try airBr(f, inst), + .switch_br => try airSwitchBr(f, inst), + .wrap_optional => try airWrapOptional(f, inst), + .struct_field_ptr => try airStructFieldPtr(f, inst), + .array_to_slice => try airArrayToSlice(f, inst), + .cmpxchg_weak => try airCmpxchg(f, inst, "weak"), + .cmpxchg_strong => try airCmpxchg(f, inst, "strong"), + .atomic_rmw => try airAtomicRmw(f, inst), + .atomic_load => try airAtomicLoad(f, inst), + + .int_to_float, .float_to_int => try airSimpleCast(f, inst), + + .atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)), + .atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)), + .atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.Release)), + .atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.SeqCst)), + + .struct_field_ptr_index_0 => try airStructFieldPtrIndex(f, inst, 0), + .struct_field_ptr_index_1 => try airStructFieldPtrIndex(f, inst, 1), + .struct_field_ptr_index_2 => try airStructFieldPtrIndex(f, inst, 2), + .struct_field_ptr_index_3 => try airStructFieldPtrIndex(f, inst, 3), + + .struct_field_val => try airStructFieldVal(f, inst), + .slice_ptr => try airSliceField(f, inst, ".ptr;\n"), + .slice_len => try airSliceField(f, inst, ".len;\n"), + + .ptr_elem_val => try airPtrElemVal(f, inst, "["), + .ptr_ptr_elem_val => try airPtrElemVal(f, inst, "[0]["), + .ptr_elem_ptr => try airPtrElemPtr(f, inst), + .slice_elem_val => try airSliceElemVal(f, inst, "["), + .ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["), + + .unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst), + .unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst), + .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(f, inst), + .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(f, inst), + .wrap_errunion_payload => try airWrapErrUnionPay(f, inst), + .wrap_errunion_err => try airWrapErrUnionErr(f, inst), + + .ptrtoint => return f.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + .floatcast => return f.fail("TODO: C backend: implement codegen for floatcast", .{}), // zig fmt: on }; switch (result_value) { .none => {}, - else => try o.value_map.putNoClobber(inst, result_value), + else => try f.value_map.putNoClobber(inst, result_value), } } - o.indent_writer.popIndent(); + f.object.indent_writer.popIndent(); try writer.writeAll("}"); } -fn airSliceField(o: *Object, inst: Air.Inst.Index, suffix: []const u8) !CValue { - if (o.liveness.isUnused(inst)) +fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const operand = try o.resolveInst(ty_op.operand); - const writer = o.writer(); - const local = try o.allocLocal(Type.initTag(.usize), .Const); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const operand = try f.resolveInst(ty_op.operand); + const writer = f.object.writer(); + const local = try f.allocLocal(Type.initTag(.usize), .Const); try writer.writeAll(" = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(suffix); return local; } -fn airPtrElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue { +fn airPtrElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue { const is_volatile = false; // TODO - if (!is_volatile and o.liveness.isUnused(inst)) + if (!is_volatile and f.liveness.isUnused(inst)) return CValue.none; _ = prefix; - return o.dg.fail("TODO: C backend: airPtrElemVal", .{}); + return f.fail("TODO: C backend: airPtrElemVal", .{}); } -fn airPtrElemPtr(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - return o.dg.fail("TODO: C backend: airPtrElemPtr", .{}); + return f.fail("TODO: C backend: airPtrElemPtr", .{}); } -fn airSliceElemVal(o: *Object, inst: Air.Inst.Index, prefix: []const u8) !CValue { +fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue { const is_volatile = false; // TODO - if (!is_volatile and o.liveness.isUnused(inst)) + if (!is_volatile and f.liveness.isUnused(inst)) return CValue.none; - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const slice = try o.resolveInst(bin_op.lhs); - const index = try o.resolveInst(bin_op.rhs); - const writer = o.writer(); - const local = try o.allocLocal(o.air.typeOfIndex(inst), .Const); + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const slice = try f.resolveInst(bin_op.lhs); + const index = try f.resolveInst(bin_op.rhs); + const writer = f.object.writer(); + const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const); try writer.writeAll(" = "); - try o.writeCValue(writer, slice); + try f.writeCValue(writer, slice); try writer.writeAll(prefix); - try o.writeCValue(writer, index); + try f.writeCValue(writer, index); try writer.writeAll("];\n"); return local; } -fn airAlloc(o: *Object, inst: Air.Inst.Index) !CValue { - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); +fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); // First line: the variable used as data storage. const elem_type = inst_ty.elemType(); const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; - const local = try o.allocLocal(elem_type, mutability); + const local = try f.allocLocal(elem_type, mutability); try writer.writeAll(";\n"); return CValue{ .local_ref = local.local }; } -fn airArg(o: *Object) CValue { - const i = o.next_arg_index; - o.next_arg_index += 1; +fn airArg(f: *Function) CValue { + const i = f.next_arg_index; + f.next_arg_index += 1; return .{ .arg = i }; } -fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr(); - if (!is_volatile and o.liveness.isUnused(inst)) +fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const is_volatile = f.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and f.liveness.isUnused(inst)) return CValue.none; - const inst_ty = o.air.typeOfIndex(inst); - const operand = try o.resolveInst(ty_op.operand); - const writer = o.writer(); - const local = try o.allocLocal(inst_ty, .Const); + const inst_ty = f.air.typeOfIndex(inst); + const operand = try f.resolveInst(ty_op.operand); + const writer = f.object.writer(); + const local = try f.allocLocal(inst_ty, .Const); switch (operand) { .local_ref => |i| { const wrapped: CValue = .{ .local = i }; try writer.writeAll(" = "); - try o.writeCValue(writer, wrapped); + try f.writeCValue(writer, wrapped); try writer.writeAll(";\n"); }, .decl_ref => |decl| { const wrapped: CValue = .{ .decl = decl }; try writer.writeAll(" = "); - try o.writeCValue(writer, wrapped); + try f.writeCValue(writer, wrapped); try writer.writeAll(";\n"); }, else => { try writer.writeAll(" = *"); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); }, } return local; } -fn airRet(o: *Object, inst: Air.Inst.Index) !CValue { - const un_op = o.air.instructions.items(.data)[inst].un_op; - const writer = o.writer(); - if (o.air.typeOf(un_op).hasCodeGenBits()) { - const operand = try o.resolveInst(un_op); +fn airRet(f: *Function, inst: Air.Inst.Index) !CValue { + const un_op = f.air.instructions.items(.data)[inst].un_op; + const writer = f.object.writer(); + if (f.air.typeOf(un_op).hasCodeGenBits()) { + const operand = try f.resolveInst(un_op); try writer.writeAll("return "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); } else { try writer.writeAll("return;\n"); @@ -1072,75 +1109,75 @@ fn airRet(o: *Object, inst: Air.Inst.Index) !CValue { return CValue.none; } -fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const operand = try o.resolveInst(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const operand = try f.resolveInst(ty_op.operand); - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); - try o.dg.renderType(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll(")"); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } -fn airTrunc(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const operand = try o.resolveInst(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const operand = try f.resolveInst(ty_op.operand); _ = operand; - return o.dg.fail("TODO: C backend: airTrunc", .{}); + return f.fail("TODO: C backend: airTrunc", .{}); } -fn airBoolToInt(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airBoolToInt(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const un_op = o.air.instructions.items(.data)[inst].un_op; - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const operand = try o.resolveInst(un_op); - const local = try o.allocLocal(inst_ty, .Const); + const un_op = f.air.instructions.items(.data)[inst].un_op; + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); + const operand = try f.resolveInst(un_op); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } -fn airStore(o: *Object, inst: Air.Inst.Index) !CValue { +fn airStore(f: *Function, inst: Air.Inst.Index) !CValue { // *a = b; - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const dest_ptr = try o.resolveInst(bin_op.lhs); - const src_val = try o.resolveInst(bin_op.rhs); + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const dest_ptr = try f.resolveInst(bin_op.lhs); + const src_val = try f.resolveInst(bin_op.rhs); - const writer = o.writer(); + const writer = f.object.writer(); switch (dest_ptr) { .local_ref => |i| { const dest: CValue = .{ .local = i }; - try o.writeCValue(writer, dest); + try f.writeCValue(writer, dest); try writer.writeAll(" = "); - try o.writeCValue(writer, src_val); + try f.writeCValue(writer, src_val); try writer.writeAll(";\n"); }, .decl_ref => |decl| { const dest: CValue = .{ .decl = decl }; - try o.writeCValue(writer, dest); + try f.writeCValue(writer, dest); try writer.writeAll(" = "); - try o.writeCValue(writer, src_val); + try f.writeCValue(writer, src_val); try writer.writeAll(";\n"); }, else => { try writer.writeAll("*"); - try o.writeCValue(writer, dest_ptr); + try f.writeCValue(writer, dest_ptr); try writer.writeAll(" = "); - try o.writeCValue(writer, src_val); + try f.writeCValue(writer, src_val); try writer.writeAll(";\n"); }, } @@ -1148,17 +1185,17 @@ fn airStore(o: *Object, inst: Air.Inst.Index) !CValue { } fn airWrapOp( - o: *Object, + f: *Function, inst: Air.Inst.Index, str_op: [*:0]const u8, fn_op: [*:0]const u8, ) !CValue { - if (o.liveness.isUnused(inst)) + if (f.liveness.isUnused(inst)) return CValue.none; - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const inst_ty = o.air.typeOfIndex(inst); - const int_info = inst_ty.intInfo(o.dg.module.getTarget()); + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const inst_ty = f.air.typeOfIndex(inst); + const int_info = inst_ty.intInfo(f.object.dg.module.getTarget()); const bits = int_info.bits; // if it's an unsigned int with non-arbitrary bit size then we can just add @@ -1168,12 +1205,12 @@ fn airWrapOp( else => false, }; if (ok_bits or inst_ty.tag() != .int_unsigned) { - return try airBinOp(o, inst, str_op); + return try airBinOp(f, inst, str_op); } } if (bits > 64) { - return o.dg.fail("TODO: C backend: airWrapOp for large integers", .{}); + return f.fail("TODO: C backend: airWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1220,11 +1257,11 @@ fn airWrapOp( }, }; - const lhs = try o.resolveInst(bin_op.lhs); - const rhs = try o.resolveInst(bin_op.rhs); - const w = o.writer(); + const lhs = try f.resolveInst(bin_op.lhs); + const rhs = try f.resolveInst(bin_op.rhs); + const w = f.object.writer(); - const ret = try o.allocLocal(inst_ty, .Mut); + const ret = try f.allocLocal(inst_ty, .Mut); try w.print(" = zig_{s}", .{fn_op}); switch (inst_ty.tag()) { @@ -1250,71 +1287,71 @@ fn airWrapOp( } try w.writeByte('('); - try o.writeCValue(w, lhs); + try f.writeCValue(w, lhs); try w.writeAll(", "); - try o.writeCValue(w, rhs); + try f.writeCValue(w, rhs); if (int_info.signedness == .signed) { try w.print(", {s}", .{min}); } try w.print(", {s});", .{max}); - try o.indent_writer.insertNewline(); + try f.object.indent_writer.insertNewline(); return ret; } -fn airNot(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const op = try o.resolveInst(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const op = try f.resolveInst(ty_op.operand); - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); if (inst_ty.zigTypeTag() == .Bool) try writer.writeAll("!") else try writer.writeAll("~"); - try o.writeCValue(writer, op); + try f.writeCValue(writer, op); try writer.writeAll(";\n"); return local; } -fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue { - if (o.liveness.isUnused(inst)) +fn airBinOp(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const lhs = try o.resolveInst(bin_op.lhs); - const rhs = try o.resolveInst(bin_op.rhs); + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const lhs = try f.resolveInst(bin_op.lhs); + const rhs = try f.resolveInst(bin_op.rhs); - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); - try o.writeCValue(writer, lhs); + try f.writeCValue(writer, lhs); try writer.print("{s}", .{operator}); - try o.writeCValue(writer, rhs); + try f.writeCValue(writer, rhs); try writer.writeAll(";\n"); return local; } -fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { - const pl_op = o.air.instructions.items(.data)[inst].pl_op; - const extra = o.air.extraData(Air.Call, pl_op.payload); - const args = @bitCast([]const Air.Inst.Ref, o.air.extra[extra.end..][0..extra.data.args_len]); - const fn_ty = o.air.typeOf(pl_op.operand); +fn airCall(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const extra = f.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]); + const fn_ty = f.air.typeOf(pl_op.operand); const ret_ty = fn_ty.fnReturnType(); - const unused_result = o.liveness.isUnused(inst); - const writer = o.writer(); + const unused_result = f.liveness.isUnused(inst); + const writer = f.object.writer(); var result_local: CValue = .none; if (unused_result) { @@ -1322,11 +1359,11 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { try writer.print("(void)", .{}); } } else { - result_local = try o.allocLocal(ret_ty, .Const); + result_local = try f.allocLocal(ret_ty, .Const); try writer.writeAll(" = "); } - if (o.air.value(pl_op.operand)) |func_val| { + if (f.air.value(pl_op.operand)) |func_val| { const fn_decl = if (func_val.castTag(.extern_fn)) |extern_fn| extern_fn.data else if (func_val.castTag(.function)) |func_payload| @@ -1336,8 +1373,8 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { try writer.writeAll(mem.spanZ(fn_decl.name)); } else { - const callee = try o.resolveInst(pl_op.operand); - try o.writeCValue(writer, callee); + const callee = try f.resolveInst(pl_op.operand); + try f.writeCValue(writer, callee); } try writer.writeAll("("); @@ -1345,113 +1382,113 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { if (i != 0) { try writer.writeAll(", "); } - if (o.air.value(arg)) |val| { - try o.dg.renderValue(writer, o.air.typeOf(arg), val); + if (f.air.value(arg)) |val| { + try f.object.dg.renderValue(writer, f.air.typeOf(arg), val); } else { - const val = try o.resolveInst(arg); - try o.writeCValue(writer, val); + const val = try f.resolveInst(arg); + try f.writeCValue(writer, val); } } try writer.writeAll(");\n"); return result_local; } -fn airDbgStmt(o: *Object, inst: Air.Inst.Index) !CValue { - const dbg_stmt = o.air.instructions.items(.data)[inst].dbg_stmt; - const writer = o.writer(); +fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { + const dbg_stmt = f.air.instructions.items(.data)[inst].dbg_stmt; + const writer = f.object.writer(); try writer.print("#line {d}\n", .{dbg_stmt.line + 1}); return CValue.none; } -fn airBlock(o: *Object, inst: Air.Inst.Index) !CValue { - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const extra = o.air.extraData(Air.Block, ty_pl.payload); - const body = o.air.extra[extra.end..][0..extra.data.body_len]; +fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const extra = f.air.extraData(Air.Block, ty_pl.payload); + const body = f.air.extra[extra.end..][0..extra.data.body_len]; - const block_id: usize = o.next_block_index; - o.next_block_index += 1; - const writer = o.writer(); + const block_id: usize = f.next_block_index; + f.next_block_index += 1; + const writer = f.object.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const result = if (inst_ty.tag() != .void and !o.liveness.isUnused(inst)) blk: { + const inst_ty = f.air.typeOfIndex(inst); + const result = if (inst_ty.tag() != .void and !f.liveness.isUnused(inst)) blk: { // allocate a location for the result - const local = try o.allocLocal(inst_ty, .Mut); + const local = try f.allocLocal(inst_ty, .Mut); try writer.writeAll(";\n"); break :blk local; } else CValue{ .none = {} }; - try o.blocks.putNoClobber(o.gpa, inst, .{ + try f.blocks.putNoClobber(f.object.dg.gpa, inst, .{ .block_id = block_id, .result = result, }); - try genBody(o, body); - try o.indent_writer.insertNewline(); + try genBody(f, body); + try f.object.indent_writer.insertNewline(); // label must be followed by an expression, add an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); return result; } -fn airBr(o: *Object, inst: Air.Inst.Index) !CValue { - const branch = o.air.instructions.items(.data)[inst].br; - const block = o.blocks.get(branch.block_inst).?; +fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { + const branch = f.air.instructions.items(.data)[inst].br; + const block = f.blocks.get(branch.block_inst).?; const result = block.result; - const writer = o.writer(); + const writer = f.object.writer(); // If result is .none then the value of the block is unused. if (result != .none) { - const operand = try o.resolveInst(branch.operand); - try o.writeCValue(writer, result); + const operand = try f.resolveInst(branch.operand); + try f.writeCValue(writer, result); try writer.writeAll(" = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); } - try o.writer().print("goto zig_block_{d};\n", .{block.block_id}); + try f.object.writer().print("goto zig_block_{d};\n", .{block.block_id}); return CValue.none; } -fn airBitcast(o: *Object, inst: Air.Inst.Index) !CValue { - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const operand = try o.resolveInst(ty_op.operand); +fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const operand = try f.resolveInst(ty_op.operand); - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); + const writer = f.object.writer(); + const inst_ty = f.air.typeOfIndex(inst); if (inst_ty.zigTypeTag() == .Pointer and - o.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer) + f.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer) { - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); - try o.dg.renderType(writer, inst_ty); + try f.renderType(writer, inst_ty); try writer.writeAll(")"); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } - const local = try o.allocLocal(inst_ty, .Mut); + const local = try f.allocLocal(inst_ty, .Mut); try writer.writeAll(";\n"); try writer.writeAll("memcpy(&"); - try o.writeCValue(writer, local); + try f.writeCValue(writer, local); try writer.writeAll(", &"); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(", sizeof "); - try o.writeCValue(writer, local); + try f.writeCValue(writer, local); try writer.writeAll(");\n"); return local; } -fn airBreakpoint(o: *Object) !CValue { - try o.writer().writeAll("zig_breakpoint();\n"); +fn airBreakpoint(f: *Function) !CValue { + try f.object.writer().writeAll("zig_breakpoint();\n"); return CValue.none; } -fn airFence(o: *Object, inst: Air.Inst.Index) !CValue { - const atomic_order = o.air.instructions.items(.data)[inst].fence; - const writer = o.writer(); +fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { + const atomic_order = f.air.instructions.items(.data)[inst].fence; + const writer = f.object.writer(); try writer.writeAll("zig_fence("); try writeMemoryOrder(writer, atomic_order); @@ -1460,85 +1497,85 @@ fn airFence(o: *Object, inst: Air.Inst.Index) !CValue { return CValue.none; } -fn airUnreach(o: *Object) !CValue { - try o.writer().writeAll("zig_unreachable();\n"); +fn airUnreach(f: *Function) !CValue { + try f.object.writer().writeAll("zig_unreachable();\n"); return CValue.none; } -fn airLoop(o: *Object, inst: Air.Inst.Index) !CValue { - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const loop = o.air.extraData(Air.Block, ty_pl.payload); - const body = o.air.extra[loop.end..][0..loop.data.body_len]; - try o.writer().writeAll("while (true) "); - try genBody(o, body); - try o.indent_writer.insertNewline(); +fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { + const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const loop = f.air.extraData(Air.Block, ty_pl.payload); + const body = f.air.extra[loop.end..][0..loop.data.body_len]; + try f.object.writer().writeAll("while (true) "); + try genBody(f, body); + try f.object.indent_writer.insertNewline(); return CValue.none; } -fn airCondBr(o: *Object, inst: Air.Inst.Index) !CValue { - const pl_op = o.air.instructions.items(.data)[inst].pl_op; - const cond = try o.resolveInst(pl_op.operand); - const extra = o.air.extraData(Air.CondBr, pl_op.payload); - const then_body = o.air.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = o.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const writer = o.writer(); +fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const cond = try f.resolveInst(pl_op.operand); + const extra = f.air.extraData(Air.CondBr, pl_op.payload); + const then_body = f.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = f.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const writer = f.object.writer(); try writer.writeAll("if ("); - try o.writeCValue(writer, cond); + try f.writeCValue(writer, cond); try writer.writeAll(") "); - try genBody(o, then_body); + try genBody(f, then_body); try writer.writeAll(" else "); - try genBody(o, else_body); - try o.indent_writer.insertNewline(); + try genBody(f, else_body); + try f.object.indent_writer.insertNewline(); return CValue.none; } -fn airSwitchBr(o: *Object, inst: Air.Inst.Index) !CValue { - const pl_op = o.air.instructions.items(.data)[inst].pl_op; - const condition = try o.resolveInst(pl_op.operand); - const condition_ty = o.air.typeOf(pl_op.operand); - const switch_br = o.air.extraData(Air.SwitchBr, pl_op.payload); - const writer = o.writer(); +fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const condition = try f.resolveInst(pl_op.operand); + const condition_ty = f.air.typeOf(pl_op.operand); + const switch_br = f.air.extraData(Air.SwitchBr, pl_op.payload); + const writer = f.object.writer(); try writer.writeAll("switch ("); - try o.writeCValue(writer, condition); + try f.writeCValue(writer, condition); try writer.writeAll(") {"); - o.indent_writer.pushIndent(); + f.object.indent_writer.pushIndent(); var extra_index: usize = switch_br.end; var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { - const case = o.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @bitCast([]const Air.Inst.Ref, o.air.extra[case.end..][0..case.data.items_len]); - const case_body = o.air.extra[case.end + items.len ..][0..case.data.body_len]; + const case = f.air.extraData(Air.SwitchBr.Case, extra_index); + const items = @bitCast([]const Air.Inst.Ref, f.air.extra[case.end..][0..case.data.items_len]); + const case_body = f.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + case.data.items_len + case_body.len; for (items) |item| { - try o.indent_writer.insertNewline(); + try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); - try o.dg.renderValue(writer, condition_ty, o.air.value(item).?); + try f.object.dg.renderValue(writer, condition_ty, f.air.value(item).?); try writer.writeAll(": "); } // The case body must be noreturn so we don't need to insert a break. - try genBody(o, case_body); + try genBody(f, case_body); } - const else_body = o.air.extra[extra_index..][0..switch_br.data.else_body_len]; - try o.indent_writer.insertNewline(); + const else_body = f.air.extra[extra_index..][0..switch_br.data.else_body_len]; + try f.object.indent_writer.insertNewline(); try writer.writeAll("default: "); - try genBody(o, else_body); - try o.indent_writer.insertNewline(); + try genBody(f, else_body); + try f.object.indent_writer.insertNewline(); - o.indent_writer.popIndent(); + f.object.indent_writer.popIndent(); try writer.writeAll("}\n"); return CValue.none; } -fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { - const air_datas = o.air.instructions.items(.data); - const air_extra = o.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); - const zir = o.dg.decl.namespace.file_scope.zir; +fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { + const air_datas = f.air.instructions.items(.data); + const air_extra = f.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); + const zir = f.object.dg.decl.namespace.file_scope.zir; const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended; const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand); const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source); @@ -1547,14 +1584,14 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { const clobbers_len = @truncate(u5, extended.small >> 10); _ = clobbers_len; // TODO honor these const is_volatile = @truncate(u1, extended.small >> 15) != 0; - const outputs = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end..][0..outputs_len]); - const args = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end + outputs.len ..][0..args_len]); + const outputs = @bitCast([]const Air.Inst.Ref, f.air.extra[air_extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, f.air.extra[air_extra.end + outputs.len ..][0..args_len]); if (outputs_len > 1) { - return o.dg.fail("TODO implement codegen for asm with more than 1 output", .{}); + return f.fail("TODO implement codegen for asm with more than 1 output", .{}); } - if (o.liveness.isUnused(inst) and !is_volatile) + if (f.liveness.isUnused(inst) and !is_volatile) return CValue.none; var extra_i: usize = zir_extra.end; @@ -1569,28 +1606,28 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { }; const args_extra_begin = extra_i; - const writer = o.writer(); + const writer = f.object.writer(); for (args) |arg| { const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); extra_i = input.end; const constraint = zir.nullTerminatedString(input.data.constraint); if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') { const reg = constraint[1 .. constraint.len - 1]; - const arg_c_value = try o.resolveInst(arg); + const arg_c_value = try f.resolveInst(arg); try writer.writeAll("register "); - try o.dg.renderType(writer, o.air.typeOf(arg)); + try f.renderType(writer, f.air.typeOf(arg)); try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg }); - try o.writeCValue(writer, arg_c_value); + try f.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail("TODO non-explicit inline asm regs", .{}); + return f.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, asm_source }); if (output_constraint) |_| { - return o.dg.fail("TODO: CBE inline asm output", .{}); + return f.fail("TODO: CBE inline asm output", .{}); } if (args.len > 0) { if (output_constraint == null) { @@ -1616,30 +1653,30 @@ fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { } try writer.writeAll(");\n"); - if (o.liveness.isUnused(inst)) + if (f.liveness.isUnused(inst)) return CValue.none; - return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); + return f.fail("TODO: C backend: inline asm expression result used", .{}); } fn airIsNull( - o: *Object, + f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8, deref_suffix: [*:0]const u8, ) !CValue { - if (o.liveness.isUnused(inst)) + if (f.liveness.isUnused(inst)) return CValue.none; - const un_op = o.air.instructions.items(.data)[inst].un_op; - const writer = o.writer(); - const operand = try o.resolveInst(un_op); + const un_op = f.air.instructions.items(.data)[inst].un_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(un_op); - const local = try o.allocLocal(Type.initTag(.bool), .Const); + const local = try f.allocLocal(Type.initTag(.bool), .Const); try writer.writeAll(" = ("); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); - if (o.air.typeOf(un_op).isPtrLikeOptional()) { + if (f.air.typeOf(un_op).isPtrLikeOptional()) { // operand is a regular pointer, test `operand !=/== NULL` try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); } else { @@ -1648,14 +1685,14 @@ fn airIsNull( return local; } -fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airOptionalPayload(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); - const operand_ty = o.air.typeOf(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const operand_ty = f.air.typeOf(ty_op.operand); const opt_ty = if (operand_ty.zigTypeTag() == .Pointer) operand_ty.elemType() @@ -1668,98 +1705,98 @@ fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue { return operand; } - const inst_ty = o.air.typeOfIndex(inst); + const inst_ty = f.air.typeOfIndex(inst); const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print("){s}payload;\n", .{maybe_deref}); return local; } -fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) // TODO this @as is needed because of a stage1 bug return @as(CValue, CValue.none); - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const extra = o.air.extraData(Air.StructField, ty_pl.payload).data; - const struct_ptr = try o.resolveInst(extra.struct_operand); - const struct_ptr_ty = o.air.typeOf(extra.struct_operand); - return structFieldPtr(o, inst, struct_ptr_ty, struct_ptr, extra.field_index); + const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; + const struct_ptr = try f.resolveInst(extra.struct_operand); + const struct_ptr_ty = f.air.typeOf(extra.struct_operand); + return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, extra.field_index); } -fn airStructFieldPtrIndex(o: *Object, inst: Air.Inst.Index, index: u8) !CValue { - if (o.liveness.isUnused(inst)) +fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue { + if (f.liveness.isUnused(inst)) // TODO this @as is needed because of a stage1 bug return @as(CValue, CValue.none); - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const struct_ptr = try o.resolveInst(ty_op.operand); - const struct_ptr_ty = o.air.typeOf(ty_op.operand); - return structFieldPtr(o, inst, struct_ptr_ty, struct_ptr, index); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const struct_ptr = try f.resolveInst(ty_op.operand); + const struct_ptr_ty = f.air.typeOf(ty_op.operand); + return structFieldPtr(f, inst, struct_ptr_ty, struct_ptr, index); } -fn structFieldPtr(o: *Object, inst: Air.Inst.Index, struct_ptr_ty: Type, struct_ptr: CValue, index: u32) !CValue { - const writer = o.writer(); +fn structFieldPtr(f: *Function, inst: Air.Inst.Index, struct_ptr_ty: Type, struct_ptr: CValue, index: u32) !CValue { + const writer = f.object.writer(); const struct_obj = struct_ptr_ty.elemType().castTag(.@"struct").?.data; const field_name = struct_obj.fields.keys()[index]; - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); switch (struct_ptr) { .local_ref => |i| { try writer.print(" = &t{d}.{};\n", .{ i, fmtIdent(field_name) }); }, else => { try writer.writeAll(" = &"); - try o.writeCValue(writer, struct_ptr); + try f.writeCValue(writer, struct_ptr); try writer.print("->{};\n", .{fmtIdent(field_name)}); }, } return local; } -fn airStructFieldVal(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const extra = o.air.extraData(Air.StructField, ty_pl.payload).data; - const writer = o.writer(); - const struct_byval = try o.resolveInst(extra.struct_operand); - const struct_ty = o.air.typeOf(extra.struct_operand); + const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const extra = f.air.extraData(Air.StructField, ty_pl.payload).data; + const writer = f.object.writer(); + const struct_byval = try f.resolveInst(extra.struct_operand); + const struct_ty = f.air.typeOf(extra.struct_operand); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_name = struct_obj.fields.keys()[extra.field_index]; - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); - try o.writeCValue(writer, struct_byval); + try f.writeCValue(writer, struct_byval); try writer.print(".{};\n", .{fmtIdent(field_name)}); return local; } // *(E!T) -> E NOT *E -fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const inst_ty = o.air.typeOfIndex(inst); - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); - const operand_ty = o.air.typeOf(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const inst_ty = f.air.typeOfIndex(inst); + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const operand_ty = f.air.typeOf(ty_op.operand); const payload_ty = operand_ty.errorUnionPayload(); if (!payload_ty.hasCodeGenBits()) { if (operand_ty.zigTypeTag() == .Pointer) { - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = *"); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } else { @@ -1769,172 +1806,172 @@ fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue { const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print("){s}error;\n", .{maybe_deref}); return local; } -fn airUnwrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); - const operand_ty = o.air.typeOf(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const operand_ty = f.air.typeOf(ty_op.operand); const payload_ty = operand_ty.errorUnionPayload(); if (!payload_ty.hasCodeGenBits()) { return CValue.none; } - const inst_ty = o.air.typeOfIndex(inst); + const inst_ty = f.air.typeOfIndex(inst); const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print("){s}payload;\n", .{maybe_deref}); return local; } -fn airWrapOptional(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); - const inst_ty = o.air.typeOfIndex(inst); + const inst_ty = f.air.typeOfIndex(inst); if (inst_ty.isPtrLikeOptional()) { // the operand is just a regular pointer, no need to do anything special. return operand; } // .wrap_optional is used to convert non-optionals into optionals so it can never be null. - const local = try o.allocLocal(inst_ty, .Const); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .is_null = false, .payload ="); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll("};\n"); return local; } -fn airWrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const writer = o.writer(); - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const operand = try o.resolveInst(ty_op.operand); + const writer = f.object.writer(); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const operand = try f.resolveInst(ty_op.operand); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(" };\n"); return local; } -fn airWrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = 0, .payload = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(" };\n"); return local; } fn airIsErr( - o: *Object, + f: *Function, inst: Air.Inst.Index, deref_prefix: [*:0]const u8, deref_suffix: [*:0]const u8, op_str: [*:0]const u8, ) !CValue { - if (o.liveness.isUnused(inst)) + if (f.liveness.isUnused(inst)) return CValue.none; - const un_op = o.air.instructions.items(.data)[inst].un_op; - const writer = o.writer(); - const operand = try o.resolveInst(un_op); - const operand_ty = o.air.typeOf(un_op); - const local = try o.allocLocal(Type.initTag(.bool), .Const); + const un_op = f.air.instructions.items(.data)[inst].un_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(un_op); + const operand_ty = f.air.typeOf(un_op); + const local = try f.allocLocal(Type.initTag(.bool), .Const); const payload_ty = operand_ty.errorUnionPayload(); if (!payload_ty.hasCodeGenBits()) { try writer.print(" = {s}", .{deref_prefix}); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print(" {s} 0;\n", .{op_str}); } else { try writer.writeAll(" = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print("{s}error {s} 0;\n", .{ deref_suffix, op_str }); } return local; } -fn airArrayToSlice(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); - const array_len = o.air.typeOf(ty_op.operand).elemType().arrayLen(); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + const array_len = f.air.typeOf(ty_op.operand).elemType().arrayLen(); try writer.writeAll(" = { .ptr = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.print(", .len = {d} }};\n", .{array_len}); return local; } /// Emits a local variable with the result type and initializes it /// with the operand. -fn airSimpleCast(o: *Object, inst: Air.Inst.Index) !CValue { - if (o.liveness.isUnused(inst)) +fn airSimpleCast(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); - const ty_op = o.air.instructions.items(.data)[inst].ty_op; - const writer = o.writer(); - const operand = try o.resolveInst(ty_op.operand); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); try writer.writeAll(" = "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } -fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const extra = o.air.extraData(Air.Cmpxchg, ty_pl.payload).data; - const inst_ty = o.air.typeOfIndex(inst); - const ptr = try o.resolveInst(extra.ptr); - const expected_value = try o.resolveInst(extra.expected_value); - const new_value = try o.resolveInst(extra.new_value); - const local = try o.allocLocal(inst_ty, .Const); - const writer = o.writer(); +fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { + const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; + const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; + const inst_ty = f.air.typeOfIndex(inst); + const ptr = try f.resolveInst(extra.ptr); + const expected_value = try f.resolveInst(extra.expected_value); + const new_value = try f.resolveInst(extra.new_value); + const local = try f.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); try writer.print(" = zig_cmpxchg_{s}(", .{flavor}); - try o.writeCValue(writer, ptr); + try f.writeCValue(writer, ptr); try writer.writeAll(", "); - try o.writeCValue(writer, expected_value); + try f.writeCValue(writer, expected_value); try writer.writeAll(", "); - try o.writeCValue(writer, new_value); + try f.writeCValue(writer, new_value); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.successOrder()); try writer.writeAll(", "); @@ -1944,19 +1981,19 @@ fn airCmpxchg(o: *Object, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { return local; } -fn airAtomicRmw(o: *Object, inst: Air.Inst.Index) !CValue { - const pl_op = o.air.instructions.items(.data)[inst].pl_op; - const extra = o.air.extraData(Air.AtomicRmw, pl_op.payload).data; - const inst_ty = o.air.typeOfIndex(inst); - const ptr = try o.resolveInst(pl_op.operand); - const operand = try o.resolveInst(extra.operand); - const local = try o.allocLocal(inst_ty, .Const); - const writer = o.writer(); +fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; + const inst_ty = f.air.typeOfIndex(inst); + const ptr = try f.resolveInst(pl_op.operand); + const operand = try f.resolveInst(extra.operand); + const local = try f.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); try writer.print(" = zig_atomicrmw_{s}(", .{toAtomicRmwSuffix(extra.op())}); - try o.writeCValue(writer, ptr); + try f.writeCValue(writer, ptr); try writer.writeAll(", "); - try o.writeCValue(writer, operand); + try f.writeCValue(writer, operand); try writer.writeAll(", "); try writeMemoryOrder(writer, extra.ordering()); try writer.writeAll(");\n"); @@ -1964,15 +2001,15 @@ fn airAtomicRmw(o: *Object, inst: Air.Inst.Index) !CValue { return local; } -fn airAtomicLoad(o: *Object, inst: Air.Inst.Index) !CValue { - const atomic_load = o.air.instructions.items(.data)[inst].atomic_load; - const inst_ty = o.air.typeOfIndex(inst); - const ptr = try o.resolveInst(atomic_load.ptr); - const local = try o.allocLocal(inst_ty, .Const); - const writer = o.writer(); +fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { + const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; + const inst_ty = f.air.typeOfIndex(inst); + const ptr = try f.resolveInst(atomic_load.ptr); + const local = try f.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); try writer.writeAll(" = zig_atomic_load("); - try o.writeCValue(writer, ptr); + try f.writeCValue(writer, ptr); try writer.writeAll(", "); try writeMemoryOrder(writer, atomic_load.order); try writer.writeAll(");\n"); @@ -1980,18 +2017,18 @@ fn airAtomicLoad(o: *Object, inst: Air.Inst.Index) !CValue { return local; } -fn airAtomicStore(o: *Object, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const ptr = try o.resolveInst(bin_op.lhs); - const element = try o.resolveInst(bin_op.rhs); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); - const writer = o.writer(); +fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const ptr = try f.resolveInst(bin_op.lhs); + const element = try f.resolveInst(bin_op.rhs); + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + const writer = f.object.writer(); try writer.writeAll(" = zig_atomic_store("); - try o.writeCValue(writer, ptr); + try f.writeCValue(writer, ptr); try writer.writeAll(", "); - try o.writeCValue(writer, element); + try f.writeCValue(writer, element); try writer.print(", {s});\n", .{order}); return local; diff --git a/src/link.zig b/src/link.zig index e649101f08..4f21a10d18 100644 --- a/src/link.zig +++ b/src/link.zig @@ -149,7 +149,7 @@ pub const File = struct { coff: Coff.TextBlock, macho: MachO.TextBlock, plan9: Plan9.DeclBlock, - c: C.DeclBlock, + c: void, wasm: Wasm.DeclBlock, spirv: void, }; @@ -159,7 +159,7 @@ pub const File = struct { coff: Coff.SrcFn, macho: MachO.SrcFn, plan9: void, - c: C.FnBlock, + c: void, wasm: Wasm.FnData, spirv: SpirV.FnData, }; @@ -372,16 +372,18 @@ pub const File = struct { /// Must be called before any call to updateDecl or updateDeclExports for /// any given Decl. + /// TODO we're transitioning to deleting this function and instead having + /// each linker backend notice the first time updateDecl or updateFunc is called, or + /// a callee referenced from AIR. pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void { log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name }); switch (base.tag) { .coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl), .elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl), .macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl), - .c => return @fieldParentPtr(C, "base", base).allocateDeclIndexes(decl), .wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl), - .spirv => {}, + .c, .spirv => {}, } } diff --git a/src/link/C.zig b/src/link/C.zig index 103cb60901..8689a6859a 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -21,30 +21,34 @@ base: link.File, /// This linker backend does not try to incrementally link output C source code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(*const Module.Decl, DeclBlock) = .{}, +/// Stores Type/Value data for `typedefs` to reference. +/// Accumulates allocations and then there is a periodic garbage collection after flush(). +arena: std.heap.ArenaAllocator, /// Per-declaration data. For functions this is the body, and /// the forward declaration is stored in the FnBlock. -pub const DeclBlock = struct { - code: std.ArrayListUnmanaged(u8), - - pub const empty: DeclBlock = .{ - .code = .{}, - }; -}; - -/// Per-function data. -pub const FnBlock = struct { - fwd_decl: std.ArrayListUnmanaged(u8), - typedefs: codegen.TypedefMap.Unmanaged, - - pub const empty: FnBlock = .{ - .fwd_decl = .{}, - .typedefs = .{}, - }; +const DeclBlock = struct { + code: std.ArrayListUnmanaged(u8) = .{}, + fwd_decl: std.ArrayListUnmanaged(u8) = .{}, + /// Each Decl stores a mapping of Zig Types to corresponding C types, for every + /// Zig Type used by the Decl. In flush(), we iterate over each Decl + /// and emit the typedef code for all types, making sure to not emit the same thing twice. + /// Any arena memory the Type points to lives in the `arena` field of `C`. + typedefs: codegen.TypedefMap.Unmanaged = .{}, + + fn deinit(db: *DeclBlock, gpa: *Allocator) void { + db.code.deinit(gpa); + db.fwd_decl.deinit(gpa); + for (db.typedefs.values()) |typedef| { + gpa.free(typedef.rendered); + } + db.typedefs.deinit(gpa); + db.* = undefined; + } }; -pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*C { +pub fn openPath(gpa: *Allocator, sub_path: []const u8, options: link.Options) !*C { assert(options.object_format == .c); if (options.use_llvm) return error.LLVMHasNoCBackend; @@ -57,15 +61,16 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio }); errdefer file.close(); - var c_file = try allocator.create(C); - errdefer allocator.destroy(c_file); + var c_file = try gpa.create(C); + errdefer gpa.destroy(c_file); c_file.* = C{ + .arena = std.heap.ArenaAllocator.init(gpa), .base = .{ .tag = .c, .options = options, .file = file, - .allocator = allocator, + .allocator = gpa, }, }; @@ -73,38 +78,105 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio } pub fn deinit(self: *C) void { - for (self.decl_table.keys()) |key| { - deinitDecl(self.base.allocator, key); + const gpa = self.base.allocator; + + for (self.decl_table.values()) |*db| { + db.deinit(gpa); } - self.decl_table.deinit(self.base.allocator); -} + self.decl_table.deinit(gpa); -pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void { - _ = self; - _ = decl; + self.arena.deinit(); } pub fn freeDecl(self: *C, decl: *Module.Decl) void { - _ = self.decl_table.swapRemove(decl); - deinitDecl(self.base.allocator, decl); + const gpa = self.base.allocator; + if (self.decl_table.fetchSwapRemove(decl)) |*kv| { + kv.value.deinit(gpa); + } } -fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { - decl.link.c.code.deinit(gpa); - decl.fn_link.c.fwd_decl.deinit(gpa); - for (decl.fn_link.c.typedefs.values()) |value| { - gpa.free(value.rendered); +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + const gop = try self.decl_table.getOrPut(self.base.allocator, decl); + if (!gop.found_existing) { + gop.value_ptr.* = .{}; + } + const fwd_decl = &gop.value_ptr.fwd_decl; + const typedefs = &gop.value_ptr.typedefs; + const code = &gop.value_ptr.code; + fwd_decl.shrinkRetainingCapacity(0); + { + for (typedefs.values()) |value| { + module.gpa.free(value.rendered); + } + } + typedefs.clearRetainingCapacity(); + code.shrinkRetainingCapacity(0); + + var function: codegen.Function = .{ + .value_map = codegen.CValueMap.init(module.gpa), + .air = air, + .liveness = liveness, + .func = func, + .object = .{ + .dg = .{ + .gpa = module.gpa, + .module = module, + .error_msg = null, + .decl = decl, + .fwd_decl = fwd_decl.toManaged(module.gpa), + .typedefs = typedefs.promote(module.gpa), + .typedefs_arena = &self.arena.allocator, + }, + .code = code.toManaged(module.gpa), + .indent_writer = undefined, // set later so we can get a pointer to object.code + }, + }; + + function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; + defer { + function.value_map.deinit(); + function.blocks.deinit(module.gpa); + function.object.code.deinit(); + function.object.dg.fwd_decl.deinit(); + for (function.object.dg.typedefs.values()) |value| { + module.gpa.free(value.rendered); + } + function.object.dg.typedefs.deinit(); } - decl.fn_link.c.typedefs.deinit(gpa); + + codegen.genFunc(&function) catch |err| switch (err) { + error.AnalysisFail => { + try module.failed_decls.put(module.gpa, decl, function.object.dg.error_msg.?); + return; + }, + else => |e| return e, + }; + + fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); + typedefs.* = function.object.dg.typedefs.unmanaged; + function.object.dg.typedefs.unmanaged = .{}; + code.* = function.object.code.moveToUnmanaged(); + + // Free excess allocated memory for this Decl. + fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); + code.shrinkAndFree(module.gpa, code.items.len); } -pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { - // Keep track of all decls so we can iterate over them on flush(). - _ = try self.decl_table.getOrPut(self.base.allocator, decl); +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); - const fwd_decl = &decl.fn_link.c.fwd_decl; - const typedefs = &decl.fn_link.c.typedefs; - const code = &decl.link.c.code; + const gop = try self.decl_table.getOrPut(self.base.allocator, decl); + if (!gop.found_existing) { + gop.value_ptr.* = .{}; + } + const fwd_decl = &gop.value_ptr.fwd_decl; + const typedefs = &gop.value_ptr.typedefs; + const code = &gop.value_ptr.code; fwd_decl.shrinkRetainingCapacity(0); { for (typedefs.values()) |value| { @@ -116,23 +188,19 @@ pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, var object: codegen.Object = .{ .dg = .{ + .gpa = module.gpa, .module = module, .error_msg = null, .decl = decl, .fwd_decl = fwd_decl.toManaged(module.gpa), .typedefs = typedefs.promote(module.gpa), + .typedefs_arena = &self.arena.allocator, }, - .gpa = module.gpa, .code = code.toManaged(module.gpa), - .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code - .air = air, - .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { - object.value_map.deinit(); - object.blocks.deinit(module.gpa); object.code.deinit(); object.dg.fwd_decl.deinit(); for (object.dg.typedefs.values()) |value| { @@ -159,24 +227,12 @@ pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, code.shrinkAndFree(module.gpa, code.items.len); } -pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { - const tracy = trace(@src()); - defer tracy.end(); - - return self.finishUpdateDecl(module, func.owner_decl, air, liveness); -} - -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - return self.finishUpdateDecl(module, decl, undefined, undefined); -} - pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. - return self.updateDecl(module, decl); + _ = self; + _ = module; + _ = decl; } pub fn flush(self: *C, comp: *Compilation) !void { @@ -223,32 +279,42 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { var typedefs = std.HashMap(Type, void, Type.HashContext64, std.hash_map.default_max_load_percentage).init(comp.gpa); defer typedefs.deinit(); - // Typedefs, forward decls and non-functions first. + // Typedefs, forward decls, and non-functions first. // TODO: performance investigation: would keeping a list of Decls that we should // generate, rather than querying here, be faster? - for (self.decl_table.keys()) |decl| { - if (!decl.has_tv) continue; - const buf = buf: { - if (decl.val.castTag(.function)) |_| { - try typedefs.ensureUnusedCapacity(@intCast(u32, decl.fn_link.c.typedefs.count())); - var it = decl.fn_link.c.typedefs.iterator(); - while (it.next()) |new| { - const gop = typedefs.getOrPutAssumeCapacity(new.key_ptr.*); - if (!gop.found_existing) { - try err_typedef_writer.writeAll(new.value_ptr.rendered); - } + const decl_keys = self.decl_table.keys(); + const decl_values = self.decl_table.values(); + for (decl_keys) |decl, i| { + if (!decl.has_tv) continue; // TODO do we really need this branch? + + const decl_block = &decl_values[i]; + + if (decl_block.fwd_decl.items.len != 0) { + try typedefs.ensureUnusedCapacity(@intCast(u32, decl_block.typedefs.count())); + var it = decl_block.typedefs.iterator(); + while (it.next()) |new| { + const gop = typedefs.getOrPutAssumeCapacity(new.key_ptr.*); + if (!gop.found_existing) { + try err_typedef_writer.writeAll(new.value_ptr.rendered); } - fn_count += 1; - break :buf decl.fn_link.c.fwd_decl.items; - } else { - break :buf decl.link.c.code.items; } - }; - all_buffers.appendAssumeCapacity(.{ - .iov_base = buf.ptr, - .iov_len = buf.len, - }); - file_size += buf.len; + const buf = decl_block.fwd_decl.items; + all_buffers.appendAssumeCapacity(.{ + .iov_base = buf.ptr, + .iov_len = buf.len, + }); + file_size += buf.len; + } + if (decl.getFunction() != null) { + fn_count += 1; + } else if (decl_block.code.items.len != 0) { + const buf = decl_block.code.items; + all_buffers.appendAssumeCapacity(.{ + .iov_base = buf.ptr, + .iov_len = buf.len, + }); + file_size += buf.len; + } } err_typedef_item.* = .{ @@ -259,15 +325,17 @@ pub fn flushModule(self: *C, comp: *Compilation) !void { // Now the function bodies. try all_buffers.ensureUnusedCapacity(fn_count); - for (self.decl_table.keys()) |decl| { - if (!decl.has_tv) continue; - if (decl.val.castTag(.function)) |_| { - const buf = decl.link.c.code.items; - all_buffers.appendAssumeCapacity(.{ - .iov_base = buf.ptr, - .iov_len = buf.len, - }); - file_size += buf.len; + for (decl_keys) |decl, i| { + if (decl.getFunction() != null) { + const decl_block = &decl_values[i]; + const buf = decl_block.code.items; + if (buf.len != 0) { + all_buffers.appendAssumeCapacity(.{ + .iov_base = buf.ptr, + .iov_len = buf.len, + }); + file_size += buf.len; + } } } diff --git a/src/type.zig b/src/type.zig index 5d184ed2fc..d4993151df 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1366,10 +1366,6 @@ pub const Type = extern union { .f128, .bool, .anyerror, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, .single_const_pointer_to_comptime_int, .const_slice_u8, .array_u8_sentinel_0, @@ -1397,6 +1393,12 @@ pub const Type = extern union { .function => !self.castTag(.function).?.data.is_generic, + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + => true, + .@"struct" => { // TODO introduce lazy value mechanism const struct_obj = self.castTag(.@"struct").?.data; -- cgit v1.2.3 From aecebf38acc8835db21eeea7b53e4ee26ec739a8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 21 Sep 2021 22:33:00 -0700 Subject: stage2: progress towards ability to compile compiler-rt * prepare compiler-rt to support being compiled by stage2 - put in a few minor workarounds that will be removed later, such as using `builtin.stage2_arch` rather than `builtin.cpu.arch`. - only try to export a few symbols for now - we'll move more symbols over to the "working in stage2" section as they become functional and gain test coverage. - use `inline fn` at function declarations rather than `@call` with an always_inline modifier at the callsites, to avoid depending on the anonymous array literal syntax language feature (for now). * AIR: replace floatcast instruction with fptrunc and fpext for shortening and widening floating point values, respectively. * Introduce a new ZIR instruction, `export_value`, which implements `@export` for the case when the thing to be exported is a local comptime value that points to a function. - AstGen: fix `@export` not properly reporting ambiguous decl references. * Sema: handle ExportOptions linkage. The value is now available to all backends. - Implement setting global linkage as appropriate in the LLVM backend. I did not yet inspect the LLVM IR, so this still needs to be audited. There is already a pending task to make sure the alias stuff is working as intended, and this is related. - Sema almost handles section, just a tiny bit more code is needed in `resolveExportOptions`. * Sema: implement float widening and shortening for both `@floatCast` and float coercion. - Implement the LLVM backend code for this as well. --- lib/std/special/compiler_rt.zig | 1164 ++++++++++++++------------- lib/std/special/compiler_rt/extendXfYf2.zig | 12 +- src/Air.zig | 10 +- src/AstGen.zig | 74 +- src/Liveness.zig | 3 +- src/Module.zig | 42 +- src/Sema.zig | 136 +++- src/Zir.zig | 14 +- src/codegen.zig | 15 +- src/codegen/c.zig | 10 +- src/codegen/llvm.zig | 36 +- src/codegen/llvm/bindings.zig | 16 + src/print_air.zig | 3 +- src/print_zir.zig | 12 + src/stage1/codegen.cpp | 1 + src/value.zig | 29 +- test/behavior.zig | 4 +- test/behavior/union.zig | 813 ------------------- test/behavior/union_stage1.zig | 799 ++++++++++++++++++ test/behavior/widening.zig | 43 + test/behavior/widening_stage1.zig | 34 - 21 files changed, 1723 insertions(+), 1547 deletions(-) create mode 100644 test/behavior/union_stage1.zig delete mode 100644 test/behavior/widening_stage1.zig (limited to 'src/codegen/c.zig') diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig index ed7f9d0c1c..2fb68f85dc 100644 --- a/lib/std/special/compiler_rt.zig +++ b/lib/std/special/compiler_rt.zig @@ -1,171 +1,24 @@ const std = @import("std"); -const builtin = std.builtin; +const builtin = @import("builtin"); const is_test = builtin.is_test; const os_tag = std.Target.current.os.tag; -const arch = std.Target.current.cpu.arch; +const arch = builtin.stage2_arch; const abi = std.Target.current.abi; const is_gnu = abi.isGnu(); const is_mingw = os_tag == .windows and is_gnu; -comptime { - const linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Weak; - const strong_linkage = if (is_test) builtin.GlobalLinkage.Internal else builtin.GlobalLinkage.Strong; - - switch (arch) { - .i386, - .x86_64, - => { - const zig_probe_stack = @import("compiler_rt/stack_probe.zig").zig_probe_stack; - @export(zig_probe_stack, .{ - .name = "__zig_probe_stack", - .linkage = linkage, - }); - }, - - else => {}, - } +const linkage = if (is_test) + std.builtin.GlobalLinkage.Internal +else + std.builtin.GlobalLinkage.Weak; - // __clear_cache manages its own logic about whether to be exported or not. - _ = @import("compiler_rt/clear_cache.zig").clear_cache; - - const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2; - @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage }); - const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2; - @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage }); - const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2; - @export(__letf2, .{ .name = "__letf2", .linkage = linkage }); - - const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2; - @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage }); - const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2; - @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage }); - const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2; - @export(__getf2, .{ .name = "__getf2", .linkage = linkage }); - - if (!is_test) { - @export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage }); - @export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__cmptf2", .linkage = linkage }); - - const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2; - @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage }); - const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2; - @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__eqtf2", .linkage = linkage }); - - const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2; - @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage }); - const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2; - @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__lttf2", .linkage = linkage }); - - const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2; - @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage }); - const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2; - @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__netf2", .linkage = linkage }); - - const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2; - @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage }); - const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2; - @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage }); - @export(__getf2, .{ .name = "__gttf2", .linkage = linkage }); - - const __extendhfsf2 = @import("compiler_rt/extendXfYf2.zig").__extendhfsf2; - @export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = linkage }); - const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2; - @export(__truncsfhf2, .{ .name = "__gnu_f2h_ieee", .linkage = linkage }); - } - - const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2; - @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage }); - const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2; - @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage }); - const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2; - @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage }); - - const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3; - @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage }); - const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3; - @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage }); - const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3; - @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage }); - const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3; - @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage }); - const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3; - @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage }); - const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3; - @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage }); - - const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3; - @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage }); - const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3; - @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage }); - const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3; - @export(__multf3, .{ .name = "__multf3", .linkage = linkage }); - - const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3; - @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage }); - const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3; - @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage }); - const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3; - @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage }); - - const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3; - @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage }); - const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3; - @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage }); - const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3; - @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage }); - const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3; - @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage }); - const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3; - @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage }); - const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3; - @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage }); - - const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf; - @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage }); - const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf; - @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage }); - const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf; - @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage }); - const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf; - @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage }); - - const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf; - @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage }); - const __floatundisf = @import("compiler_rt/floatundisf.zig").__floatundisf; - @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage }); - const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf; - @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage }); - const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf; - @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage }); - - const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf; - @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage }); - const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf; - @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage }); - const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf; - @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage }); - const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf; - @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage }); - const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf; - @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage }); - - const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf; - @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage }); - const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf; - @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage }); - - const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf; - @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage }); - const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf; - @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage }); - const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf; - @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage }); +const strong_linkage = if (is_test) + std.builtin.GlobalLinkage.Internal +else + std.builtin.GlobalLinkage.Strong; +comptime { const __extenddftf2 = @import("compiler_rt/extendXfYf2.zig").__extenddftf2; @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = linkage }); const __extendsftf2 = @import("compiler_rt/extendXfYf2.zig").__extendsftf2; @@ -175,446 +28,611 @@ comptime { const __extendhftf2 = @import("compiler_rt/extendXfYf2.zig").__extendhftf2; @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage }); - const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2; - @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage }); - const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2; - @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage }); - const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2; - @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage }); - const __trunctfdf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfdf2; - @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage }); - const __trunctfsf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfsf2; - @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage }); - - const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2; - @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage }); - - const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2; - @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage }); - - const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi; - @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage }); - const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi; - @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage }); - const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti; - @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage }); - - const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi; - @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage }); - const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi; - @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage }); - const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti; - @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage }); - - const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi; - @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage }); - const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi; - @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage }); - const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti; - @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage }); - - const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi; - @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage }); - const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi; - @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage }); - const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti; - @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage }); - const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi; - @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage }); - const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi; - @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage }); - const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti; - @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage }); - const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi; - @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage }); - const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi; - @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage }); - const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti; - @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage }); - - const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4; - @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage }); - const __popcountdi2 = @import("compiler_rt/popcountdi2.zig").__popcountdi2; - @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage }); - - const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3; - @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage }); - const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3; - @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage }); - const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4; - @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage }); - const __divsi3 = @import("compiler_rt/int.zig").__divsi3; - @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage }); - const __divdi3 = @import("compiler_rt/int.zig").__divdi3; - @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage }); - const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3; - @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage }); - const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3; - @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage }); - const __modsi3 = @import("compiler_rt/int.zig").__modsi3; - @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage }); - const __moddi3 = @import("compiler_rt/int.zig").__moddi3; - @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage }); - const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3; - @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage }); - const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3; - @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage }); - const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4; - @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage }); - const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4; - @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage }); - - const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2; - @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage }); - const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2; - @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage }); - - const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2; - @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage }); - const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2; - @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage }); - const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2; - @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage }); - - if (builtin.link_libc and os_tag == .openbsd) { - const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address; - @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage }); - } + if (!builtin.zig_is_stage2) { + switch (arch) { + .i386, + .x86_64, + => { + const zig_probe_stack = @import("compiler_rt/stack_probe.zig").zig_probe_stack; + @export(zig_probe_stack, .{ + .name = "__zig_probe_stack", + .linkage = linkage, + }); + }, - if ((arch.isARM() or arch.isThumb()) and !is_test) { - const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0; - @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage }); - const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1; - @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage }); - const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2; - @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage }); - - @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage }); - - const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod; - @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage }); - const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod; - @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage }); - - @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage }); - const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod; - @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage }); - @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage }); - const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod; - @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage }); - - const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy; - @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage }); - @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage }); - @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage }); - - const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove; - @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage }); - @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage }); - @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage }); - - const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset; - @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage }); - @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage }); - @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage }); - - const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr; - @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage }); - @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage }); - @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage }); - - if (os_tag == .linux) { - const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp; - @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage }); + else => {}, } - const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d; - @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage }); - const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d; - @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage }); - const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d; - @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage }); - const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f; - @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage }); - const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d; - @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage }); - const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d; - @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage }); - const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f; - @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage }); - const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f; - @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage }); - - const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg; - @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage }); - const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg; - @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage }); - - const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul; - @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage }); - const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul; - @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage }); - - const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h; - @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage }); - - const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz; - @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage }); - const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz; - @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage }); - - const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz; - @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage }); - const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz; - @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage }); - - const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz; - @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage }); - - const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f; - @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage }); - const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h; - @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage }); - - const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f; - @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage }); - const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f; - @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage }); - - const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd; - @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage }); - const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd; - @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage }); - const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub; - @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage }); - const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub; - @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage }); - - const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz; - @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage }); - - const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz; - @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage }); - const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz; - @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage }); - - const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv; - @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage }); - const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv; - @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage }); - - const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl; - @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage }); - const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr; - @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage }); - const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr; - @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage }); - - const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq; - @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage }); - const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt; - @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage }); - const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple; - @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage }); - const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge; - @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage }); - const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt; - @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage }); - const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun; - @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage }); - - const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq; - @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage }); - const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt; - @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage }); - const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple; - @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage }); - const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge; - @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage }); - const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt; - @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage }); - const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun; - @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage }); - } + // __clear_cache manages its own logic about whether to be exported or not. + _ = @import("compiler_rt/clear_cache.zig").clear_cache; + + const __lesf2 = @import("compiler_rt/compareXf2.zig").__lesf2; + @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage }); + const __ledf2 = @import("compiler_rt/compareXf2.zig").__ledf2; + @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage }); + const __letf2 = @import("compiler_rt/compareXf2.zig").__letf2; + @export(__letf2, .{ .name = "__letf2", .linkage = linkage }); + + const __gesf2 = @import("compiler_rt/compareXf2.zig").__gesf2; + @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage }); + const __gedf2 = @import("compiler_rt/compareXf2.zig").__gedf2; + @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage }); + const __getf2 = @import("compiler_rt/compareXf2.zig").__getf2; + @export(__getf2, .{ .name = "__getf2", .linkage = linkage }); + + if (!is_test) { + @export(__lesf2, .{ .name = "__cmpsf2", .linkage = linkage }); + @export(__ledf2, .{ .name = "__cmpdf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__cmptf2", .linkage = linkage }); + + const __eqsf2 = @import("compiler_rt/compareXf2.zig").__eqsf2; + @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage }); + const __eqdf2 = @import("compiler_rt/compareXf2.zig").__eqdf2; + @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__eqtf2", .linkage = linkage }); + + const __ltsf2 = @import("compiler_rt/compareXf2.zig").__ltsf2; + @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage }); + const __ltdf2 = @import("compiler_rt/compareXf2.zig").__ltdf2; + @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__lttf2", .linkage = linkage }); + + const __nesf2 = @import("compiler_rt/compareXf2.zig").__nesf2; + @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage }); + const __nedf2 = @import("compiler_rt/compareXf2.zig").__nedf2; + @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__netf2", .linkage = linkage }); + + const __gtsf2 = @import("compiler_rt/compareXf2.zig").__gtsf2; + @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage }); + const __gtdf2 = @import("compiler_rt/compareXf2.zig").__gtdf2; + @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage }); + @export(__getf2, .{ .name = "__gttf2", .linkage = linkage }); + + @export(@import("compiler_rt/extendXfYf2.zig").__extendhfsf2, .{ + .name = "__gnu_h2f_ieee", + .linkage = linkage, + }); + @export(@import("compiler_rt/truncXfYf2.zig").__truncsfhf2, .{ + .name = "__gnu_f2h_ieee", + .linkage = linkage, + }); + } - if (arch == .i386 and abi == .msvc) { - // Don't let LLVM apply the stdcall name mangling on those MSVC builtins - const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv; - @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage }); - const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv; - @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage }); - const _allrem = @import("compiler_rt/aullrem.zig")._allrem; - @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage }); - const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem; - @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage }); - } + const __unordsf2 = @import("compiler_rt/compareXf2.zig").__unordsf2; + @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage }); + const __unorddf2 = @import("compiler_rt/compareXf2.zig").__unorddf2; + @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage }); + const __unordtf2 = @import("compiler_rt/compareXf2.zig").__unordtf2; + @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage }); + + const __addsf3 = @import("compiler_rt/addXf3.zig").__addsf3; + @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage }); + const __adddf3 = @import("compiler_rt/addXf3.zig").__adddf3; + @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage }); + const __addtf3 = @import("compiler_rt/addXf3.zig").__addtf3; + @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage }); + const __subsf3 = @import("compiler_rt/addXf3.zig").__subsf3; + @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage }); + const __subdf3 = @import("compiler_rt/addXf3.zig").__subdf3; + @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage }); + const __subtf3 = @import("compiler_rt/addXf3.zig").__subtf3; + @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage }); + + const __mulsf3 = @import("compiler_rt/mulXf3.zig").__mulsf3; + @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage }); + const __muldf3 = @import("compiler_rt/mulXf3.zig").__muldf3; + @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage }); + const __multf3 = @import("compiler_rt/mulXf3.zig").__multf3; + @export(__multf3, .{ .name = "__multf3", .linkage = linkage }); + + const __divsf3 = @import("compiler_rt/divsf3.zig").__divsf3; + @export(__divsf3, .{ .name = "__divsf3", .linkage = linkage }); + const __divdf3 = @import("compiler_rt/divdf3.zig").__divdf3; + @export(__divdf3, .{ .name = "__divdf3", .linkage = linkage }); + const __divtf3 = @import("compiler_rt/divtf3.zig").__divtf3; + @export(__divtf3, .{ .name = "__divtf3", .linkage = linkage }); + + const __ashldi3 = @import("compiler_rt/shift.zig").__ashldi3; + @export(__ashldi3, .{ .name = "__ashldi3", .linkage = linkage }); + const __ashlti3 = @import("compiler_rt/shift.zig").__ashlti3; + @export(__ashlti3, .{ .name = "__ashlti3", .linkage = linkage }); + const __ashrdi3 = @import("compiler_rt/shift.zig").__ashrdi3; + @export(__ashrdi3, .{ .name = "__ashrdi3", .linkage = linkage }); + const __ashrti3 = @import("compiler_rt/shift.zig").__ashrti3; + @export(__ashrti3, .{ .name = "__ashrti3", .linkage = linkage }); + const __lshrdi3 = @import("compiler_rt/shift.zig").__lshrdi3; + @export(__lshrdi3, .{ .name = "__lshrdi3", .linkage = linkage }); + const __lshrti3 = @import("compiler_rt/shift.zig").__lshrti3; + @export(__lshrti3, .{ .name = "__lshrti3", .linkage = linkage }); + + const __floatsidf = @import("compiler_rt/floatsiXf.zig").__floatsidf; + @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage }); + const __floatsisf = @import("compiler_rt/floatsiXf.zig").__floatsisf; + @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage }); + const __floatdidf = @import("compiler_rt/floatdidf.zig").__floatdidf; + @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage }); + const __floatsitf = @import("compiler_rt/floatsiXf.zig").__floatsitf; + @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage }); + + const __floatunsisf = @import("compiler_rt/floatunsisf.zig").__floatunsisf; + @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage }); + const __floatundisf = @import("compiler_rt/floatundisf.zig").__floatundisf; + @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage }); + const __floatunsidf = @import("compiler_rt/floatunsidf.zig").__floatunsidf; + @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage }); + const __floatundidf = @import("compiler_rt/floatundidf.zig").__floatundidf; + @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage }); + + const __floatditf = @import("compiler_rt/floatditf.zig").__floatditf; + @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage }); + const __floattitf = @import("compiler_rt/floattitf.zig").__floattitf; + @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage }); + const __floattidf = @import("compiler_rt/floattidf.zig").__floattidf; + @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage }); + const __floattisf = @import("compiler_rt/floatXisf.zig").__floattisf; + @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage }); + const __floatdisf = @import("compiler_rt/floatXisf.zig").__floatdisf; + @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage }); + + const __floatunditf = @import("compiler_rt/floatunditf.zig").__floatunditf; + @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage }); + const __floatunsitf = @import("compiler_rt/floatunsitf.zig").__floatunsitf; + @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage }); + + const __floatuntitf = @import("compiler_rt/floatuntitf.zig").__floatuntitf; + @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage }); + const __floatuntidf = @import("compiler_rt/floatuntidf.zig").__floatuntidf; + @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage }); + const __floatuntisf = @import("compiler_rt/floatuntisf.zig").__floatuntisf; + @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage }); - if (arch.isSPARC()) { - // SPARC systems use a different naming scheme - const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add; - @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage }); - const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div; - @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage }); - const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul; - @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage }); - const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub; - @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage }); - - const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp; - @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage }); - const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq; - @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage }); - const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne; - @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage }); - const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt; - @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage }); - const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle; - @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage }); - const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt; - @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage }); - const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge; - @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage }); - - const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq; - @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage }); - const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq; - @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage }); - const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq; - @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage }); - const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq; - @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage }); - const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq; - @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage }); - const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq; - @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage }); - const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi; - @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage }); - const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui; - @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage }); - const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox; - @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage }); - const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux; - @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage }); - const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos; - @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage }); - const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod; - @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage }); - } + const __truncsfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncsfhf2; + @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage }); + const __truncdfhf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfhf2; + @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage }); + const __trunctfhf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfhf2; + @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage }); + const __trunctfdf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfdf2; + @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage }); + const __trunctfsf2 = @import("compiler_rt/truncXfYf2.zig").__trunctfsf2; + @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage }); + + const __truncdfsf2 = @import("compiler_rt/truncXfYf2.zig").__truncdfsf2; + @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage }); + + const __extendsfdf2 = @import("compiler_rt/extendXfYf2.zig").__extendsfdf2; + @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage }); + + const __fixunssfsi = @import("compiler_rt/fixunssfsi.zig").__fixunssfsi; + @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage }); + const __fixunssfdi = @import("compiler_rt/fixunssfdi.zig").__fixunssfdi; + @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage }); + const __fixunssfti = @import("compiler_rt/fixunssfti.zig").__fixunssfti; + @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage }); + + const __fixunsdfsi = @import("compiler_rt/fixunsdfsi.zig").__fixunsdfsi; + @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage }); + const __fixunsdfdi = @import("compiler_rt/fixunsdfdi.zig").__fixunsdfdi; + @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage }); + const __fixunsdfti = @import("compiler_rt/fixunsdfti.zig").__fixunsdfti; + @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage }); + + const __fixunstfsi = @import("compiler_rt/fixunstfsi.zig").__fixunstfsi; + @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage }); + const __fixunstfdi = @import("compiler_rt/fixunstfdi.zig").__fixunstfdi; + @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage }); + const __fixunstfti = @import("compiler_rt/fixunstfti.zig").__fixunstfti; + @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage }); + + const __fixdfdi = @import("compiler_rt/fixdfdi.zig").__fixdfdi; + @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage }); + const __fixdfsi = @import("compiler_rt/fixdfsi.zig").__fixdfsi; + @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage }); + const __fixdfti = @import("compiler_rt/fixdfti.zig").__fixdfti; + @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage }); + const __fixsfdi = @import("compiler_rt/fixsfdi.zig").__fixsfdi; + @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage }); + const __fixsfsi = @import("compiler_rt/fixsfsi.zig").__fixsfsi; + @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage }); + const __fixsfti = @import("compiler_rt/fixsfti.zig").__fixsfti; + @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage }); + const __fixtfdi = @import("compiler_rt/fixtfdi.zig").__fixtfdi; + @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage }); + const __fixtfsi = @import("compiler_rt/fixtfsi.zig").__fixtfsi; + @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage }); + const __fixtfti = @import("compiler_rt/fixtfti.zig").__fixtfti; + @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage }); + + const __udivmoddi4 = @import("compiler_rt/int.zig").__udivmoddi4; + @export(__udivmoddi4, .{ .name = "__udivmoddi4", .linkage = linkage }); + const __popcountdi2 = @import("compiler_rt/popcountdi2.zig").__popcountdi2; + @export(__popcountdi2, .{ .name = "__popcountdi2", .linkage = linkage }); + + const __mulsi3 = @import("compiler_rt/int.zig").__mulsi3; + @export(__mulsi3, .{ .name = "__mulsi3", .linkage = linkage }); + const __muldi3 = @import("compiler_rt/muldi3.zig").__muldi3; + @export(__muldi3, .{ .name = "__muldi3", .linkage = linkage }); + const __divmoddi4 = @import("compiler_rt/int.zig").__divmoddi4; + @export(__divmoddi4, .{ .name = "__divmoddi4", .linkage = linkage }); + const __divsi3 = @import("compiler_rt/int.zig").__divsi3; + @export(__divsi3, .{ .name = "__divsi3", .linkage = linkage }); + const __divdi3 = @import("compiler_rt/int.zig").__divdi3; + @export(__divdi3, .{ .name = "__divdi3", .linkage = linkage }); + const __udivsi3 = @import("compiler_rt/int.zig").__udivsi3; + @export(__udivsi3, .{ .name = "__udivsi3", .linkage = linkage }); + const __udivdi3 = @import("compiler_rt/int.zig").__udivdi3; + @export(__udivdi3, .{ .name = "__udivdi3", .linkage = linkage }); + const __modsi3 = @import("compiler_rt/int.zig").__modsi3; + @export(__modsi3, .{ .name = "__modsi3", .linkage = linkage }); + const __moddi3 = @import("compiler_rt/int.zig").__moddi3; + @export(__moddi3, .{ .name = "__moddi3", .linkage = linkage }); + const __umodsi3 = @import("compiler_rt/int.zig").__umodsi3; + @export(__umodsi3, .{ .name = "__umodsi3", .linkage = linkage }); + const __umoddi3 = @import("compiler_rt/int.zig").__umoddi3; + @export(__umoddi3, .{ .name = "__umoddi3", .linkage = linkage }); + const __divmodsi4 = @import("compiler_rt/int.zig").__divmodsi4; + @export(__divmodsi4, .{ .name = "__divmodsi4", .linkage = linkage }); + const __udivmodsi4 = @import("compiler_rt/int.zig").__udivmodsi4; + @export(__udivmodsi4, .{ .name = "__udivmodsi4", .linkage = linkage }); + + const __negsf2 = @import("compiler_rt/negXf2.zig").__negsf2; + @export(__negsf2, .{ .name = "__negsf2", .linkage = linkage }); + const __negdf2 = @import("compiler_rt/negXf2.zig").__negdf2; + @export(__negdf2, .{ .name = "__negdf2", .linkage = linkage }); + + const __clzsi2 = @import("compiler_rt/count0bits.zig").__clzsi2; + @export(__clzsi2, .{ .name = "__clzsi2", .linkage = linkage }); + const __clzdi2 = @import("compiler_rt/count0bits.zig").__clzdi2; + @export(__clzdi2, .{ .name = "__clzdi2", .linkage = linkage }); + const __clzti2 = @import("compiler_rt/count0bits.zig").__clzti2; + @export(__clzti2, .{ .name = "__clzti2", .linkage = linkage }); + + if (builtin.link_libc and os_tag == .openbsd) { + const __emutls_get_address = @import("compiler_rt/emutls.zig").__emutls_get_address; + @export(__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = linkage }); + } - if ((arch == .powerpc or arch.isPPC64()) and !is_test) { - @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage }); - @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage }); - @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage }); - @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage }); - @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage }); - @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage }); - @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage }); - @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage }); - @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage }); - @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage }); - @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage }); - @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage }); - @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage }); - @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage }); - @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage }); - @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage }); - - @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__nekf2", .linkage = linkage }); - @export(__getf2, .{ .name = "__gekf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage }); - @export(__letf2, .{ .name = "__lekf2", .linkage = linkage }); - @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage }); - @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage }); - } + if ((arch.isARM() or arch.isThumb()) and !is_test) { + const __aeabi_unwind_cpp_pr0 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr0; + @export(__aeabi_unwind_cpp_pr0, .{ .name = "__aeabi_unwind_cpp_pr0", .linkage = linkage }); + const __aeabi_unwind_cpp_pr1 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr1; + @export(__aeabi_unwind_cpp_pr1, .{ .name = "__aeabi_unwind_cpp_pr1", .linkage = linkage }); + const __aeabi_unwind_cpp_pr2 = @import("compiler_rt/arm.zig").__aeabi_unwind_cpp_pr2; + @export(__aeabi_unwind_cpp_pr2, .{ .name = "__aeabi_unwind_cpp_pr2", .linkage = linkage }); + + @export(__muldi3, .{ .name = "__aeabi_lmul", .linkage = linkage }); + + const __aeabi_ldivmod = @import("compiler_rt/arm.zig").__aeabi_ldivmod; + @export(__aeabi_ldivmod, .{ .name = "__aeabi_ldivmod", .linkage = linkage }); + const __aeabi_uldivmod = @import("compiler_rt/arm.zig").__aeabi_uldivmod; + @export(__aeabi_uldivmod, .{ .name = "__aeabi_uldivmod", .linkage = linkage }); + + @export(__divsi3, .{ .name = "__aeabi_idiv", .linkage = linkage }); + const __aeabi_idivmod = @import("compiler_rt/arm.zig").__aeabi_idivmod; + @export(__aeabi_idivmod, .{ .name = "__aeabi_idivmod", .linkage = linkage }); + @export(__udivsi3, .{ .name = "__aeabi_uidiv", .linkage = linkage }); + const __aeabi_uidivmod = @import("compiler_rt/arm.zig").__aeabi_uidivmod; + @export(__aeabi_uidivmod, .{ .name = "__aeabi_uidivmod", .linkage = linkage }); + + const __aeabi_memcpy = @import("compiler_rt/arm.zig").__aeabi_memcpy; + @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy", .linkage = linkage }); + @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy4", .linkage = linkage }); + @export(__aeabi_memcpy, .{ .name = "__aeabi_memcpy8", .linkage = linkage }); + + const __aeabi_memmove = @import("compiler_rt/arm.zig").__aeabi_memmove; + @export(__aeabi_memmove, .{ .name = "__aeabi_memmove", .linkage = linkage }); + @export(__aeabi_memmove, .{ .name = "__aeabi_memmove4", .linkage = linkage }); + @export(__aeabi_memmove, .{ .name = "__aeabi_memmove8", .linkage = linkage }); + + const __aeabi_memset = @import("compiler_rt/arm.zig").__aeabi_memset; + @export(__aeabi_memset, .{ .name = "__aeabi_memset", .linkage = linkage }); + @export(__aeabi_memset, .{ .name = "__aeabi_memset4", .linkage = linkage }); + @export(__aeabi_memset, .{ .name = "__aeabi_memset8", .linkage = linkage }); + + const __aeabi_memclr = @import("compiler_rt/arm.zig").__aeabi_memclr; + @export(__aeabi_memclr, .{ .name = "__aeabi_memclr", .linkage = linkage }); + @export(__aeabi_memclr, .{ .name = "__aeabi_memclr4", .linkage = linkage }); + @export(__aeabi_memclr, .{ .name = "__aeabi_memclr8", .linkage = linkage }); + + if (os_tag == .linux) { + const __aeabi_read_tp = @import("compiler_rt/arm.zig").__aeabi_read_tp; + @export(__aeabi_read_tp, .{ .name = "__aeabi_read_tp", .linkage = linkage }); + } + + const __aeabi_f2d = @import("compiler_rt/extendXfYf2.zig").__aeabi_f2d; + @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage }); + const __aeabi_i2d = @import("compiler_rt/floatsiXf.zig").__aeabi_i2d; + @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage }); + const __aeabi_l2d = @import("compiler_rt/floatdidf.zig").__aeabi_l2d; + @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage }); + const __aeabi_l2f = @import("compiler_rt/floatXisf.zig").__aeabi_l2f; + @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage }); + const __aeabi_ui2d = @import("compiler_rt/floatunsidf.zig").__aeabi_ui2d; + @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage }); + const __aeabi_ul2d = @import("compiler_rt/floatundidf.zig").__aeabi_ul2d; + @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage }); + const __aeabi_ui2f = @import("compiler_rt/floatunsisf.zig").__aeabi_ui2f; + @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage }); + const __aeabi_ul2f = @import("compiler_rt/floatundisf.zig").__aeabi_ul2f; + @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage }); + + const __aeabi_fneg = @import("compiler_rt/negXf2.zig").__aeabi_fneg; + @export(__aeabi_fneg, .{ .name = "__aeabi_fneg", .linkage = linkage }); + const __aeabi_dneg = @import("compiler_rt/negXf2.zig").__aeabi_dneg; + @export(__aeabi_dneg, .{ .name = "__aeabi_dneg", .linkage = linkage }); + + const __aeabi_fmul = @import("compiler_rt/mulXf3.zig").__aeabi_fmul; + @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage }); + const __aeabi_dmul = @import("compiler_rt/mulXf3.zig").__aeabi_dmul; + @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage }); + + const __aeabi_d2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2h; + @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage }); + + const __aeabi_f2ulz = @import("compiler_rt/fixunssfdi.zig").__aeabi_f2ulz; + @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage }); + const __aeabi_d2ulz = @import("compiler_rt/fixunsdfdi.zig").__aeabi_d2ulz; + @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage }); + + const __aeabi_f2lz = @import("compiler_rt/fixsfdi.zig").__aeabi_f2lz; + @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage }); + const __aeabi_d2lz = @import("compiler_rt/fixdfdi.zig").__aeabi_d2lz; + @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage }); + + const __aeabi_d2uiz = @import("compiler_rt/fixunsdfsi.zig").__aeabi_d2uiz; + @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage }); + + const __aeabi_h2f = @import("compiler_rt/extendXfYf2.zig").__aeabi_h2f; + @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage }); + const __aeabi_f2h = @import("compiler_rt/truncXfYf2.zig").__aeabi_f2h; + @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage }); + + const __aeabi_i2f = @import("compiler_rt/floatsiXf.zig").__aeabi_i2f; + @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage }); + const __aeabi_d2f = @import("compiler_rt/truncXfYf2.zig").__aeabi_d2f; + @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage }); + + const __aeabi_fadd = @import("compiler_rt/addXf3.zig").__aeabi_fadd; + @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage }); + const __aeabi_dadd = @import("compiler_rt/addXf3.zig").__aeabi_dadd; + @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage }); + const __aeabi_fsub = @import("compiler_rt/addXf3.zig").__aeabi_fsub; + @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage }); + const __aeabi_dsub = @import("compiler_rt/addXf3.zig").__aeabi_dsub; + @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage }); + + const __aeabi_f2uiz = @import("compiler_rt/fixunssfsi.zig").__aeabi_f2uiz; + @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage }); + + const __aeabi_f2iz = @import("compiler_rt/fixsfsi.zig").__aeabi_f2iz; + @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage }); + const __aeabi_d2iz = @import("compiler_rt/fixdfsi.zig").__aeabi_d2iz; + @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage }); + + const __aeabi_fdiv = @import("compiler_rt/divsf3.zig").__aeabi_fdiv; + @export(__aeabi_fdiv, .{ .name = "__aeabi_fdiv", .linkage = linkage }); + const __aeabi_ddiv = @import("compiler_rt/divdf3.zig").__aeabi_ddiv; + @export(__aeabi_ddiv, .{ .name = "__aeabi_ddiv", .linkage = linkage }); + + const __aeabi_llsl = @import("compiler_rt/shift.zig").__aeabi_llsl; + @export(__aeabi_llsl, .{ .name = "__aeabi_llsl", .linkage = linkage }); + const __aeabi_lasr = @import("compiler_rt/shift.zig").__aeabi_lasr; + @export(__aeabi_lasr, .{ .name = "__aeabi_lasr", .linkage = linkage }); + const __aeabi_llsr = @import("compiler_rt/shift.zig").__aeabi_llsr; + @export(__aeabi_llsr, .{ .name = "__aeabi_llsr", .linkage = linkage }); + + const __aeabi_fcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpeq; + @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage }); + const __aeabi_fcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmplt; + @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage }); + const __aeabi_fcmple = @import("compiler_rt/compareXf2.zig").__aeabi_fcmple; + @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage }); + const __aeabi_fcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpge; + @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage }); + const __aeabi_fcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpgt; + @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage }); + const __aeabi_fcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_fcmpun; + @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage }); + + const __aeabi_dcmpeq = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpeq; + @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage }); + const __aeabi_dcmplt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmplt; + @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage }); + const __aeabi_dcmple = @import("compiler_rt/compareXf2.zig").__aeabi_dcmple; + @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage }); + const __aeabi_dcmpge = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpge; + @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage }); + const __aeabi_dcmpgt = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpgt; + @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage }); + const __aeabi_dcmpun = @import("compiler_rt/compareXf2.zig").__aeabi_dcmpun; + @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage }); + } - if (builtin.os.tag == .windows) { - // Default stack-probe functions emitted by LLVM - if (is_mingw) { - const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk; - @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage }); - const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms; - @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage }); - } else if (!builtin.link_libc) { - // This symbols are otherwise exported by MSVCRT.lib - const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk; - @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage }); - const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk; - @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); + if (arch == .i386 and abi == .msvc) { + // Don't let LLVM apply the stdcall name mangling on those MSVC builtins + const _alldiv = @import("compiler_rt/aulldiv.zig")._alldiv; + @export(_alldiv, .{ .name = "\x01__alldiv", .linkage = strong_linkage }); + const _aulldiv = @import("compiler_rt/aulldiv.zig")._aulldiv; + @export(_aulldiv, .{ .name = "\x01__aulldiv", .linkage = strong_linkage }); + const _allrem = @import("compiler_rt/aullrem.zig")._allrem; + @export(_allrem, .{ .name = "\x01__allrem", .linkage = strong_linkage }); + const _aullrem = @import("compiler_rt/aullrem.zig")._aullrem; + @export(_aullrem, .{ .name = "\x01__aullrem", .linkage = strong_linkage }); } - switch (arch) { - .i386 => { - const __divti3 = @import("compiler_rt/divti3.zig").__divti3; - @export(__divti3, .{ .name = "__divti3", .linkage = linkage }); + if (arch.isSPARC()) { + // SPARC systems use a different naming scheme + const _Qp_add = @import("compiler_rt/sparc.zig")._Qp_add; + @export(_Qp_add, .{ .name = "_Qp_add", .linkage = linkage }); + const _Qp_div = @import("compiler_rt/sparc.zig")._Qp_div; + @export(_Qp_div, .{ .name = "_Qp_div", .linkage = linkage }); + const _Qp_mul = @import("compiler_rt/sparc.zig")._Qp_mul; + @export(_Qp_mul, .{ .name = "_Qp_mul", .linkage = linkage }); + const _Qp_sub = @import("compiler_rt/sparc.zig")._Qp_sub; + @export(_Qp_sub, .{ .name = "_Qp_sub", .linkage = linkage }); + + const _Qp_cmp = @import("compiler_rt/sparc.zig")._Qp_cmp; + @export(_Qp_cmp, .{ .name = "_Qp_cmp", .linkage = linkage }); + const _Qp_feq = @import("compiler_rt/sparc.zig")._Qp_feq; + @export(_Qp_feq, .{ .name = "_Qp_feq", .linkage = linkage }); + const _Qp_fne = @import("compiler_rt/sparc.zig")._Qp_fne; + @export(_Qp_fne, .{ .name = "_Qp_fne", .linkage = linkage }); + const _Qp_flt = @import("compiler_rt/sparc.zig")._Qp_flt; + @export(_Qp_flt, .{ .name = "_Qp_flt", .linkage = linkage }); + const _Qp_fle = @import("compiler_rt/sparc.zig")._Qp_fle; + @export(_Qp_fle, .{ .name = "_Qp_fle", .linkage = linkage }); + const _Qp_fgt = @import("compiler_rt/sparc.zig")._Qp_fgt; + @export(_Qp_fgt, .{ .name = "_Qp_fgt", .linkage = linkage }); + const _Qp_fge = @import("compiler_rt/sparc.zig")._Qp_fge; + @export(_Qp_fge, .{ .name = "_Qp_fge", .linkage = linkage }); + + const _Qp_itoq = @import("compiler_rt/sparc.zig")._Qp_itoq; + @export(_Qp_itoq, .{ .name = "_Qp_itoq", .linkage = linkage }); + const _Qp_uitoq = @import("compiler_rt/sparc.zig")._Qp_uitoq; + @export(_Qp_uitoq, .{ .name = "_Qp_uitoq", .linkage = linkage }); + const _Qp_xtoq = @import("compiler_rt/sparc.zig")._Qp_xtoq; + @export(_Qp_xtoq, .{ .name = "_Qp_xtoq", .linkage = linkage }); + const _Qp_uxtoq = @import("compiler_rt/sparc.zig")._Qp_uxtoq; + @export(_Qp_uxtoq, .{ .name = "_Qp_uxtoq", .linkage = linkage }); + const _Qp_stoq = @import("compiler_rt/sparc.zig")._Qp_stoq; + @export(_Qp_stoq, .{ .name = "_Qp_stoq", .linkage = linkage }); + const _Qp_dtoq = @import("compiler_rt/sparc.zig")._Qp_dtoq; + @export(_Qp_dtoq, .{ .name = "_Qp_dtoq", .linkage = linkage }); + const _Qp_qtoi = @import("compiler_rt/sparc.zig")._Qp_qtoi; + @export(_Qp_qtoi, .{ .name = "_Qp_qtoi", .linkage = linkage }); + const _Qp_qtoui = @import("compiler_rt/sparc.zig")._Qp_qtoui; + @export(_Qp_qtoui, .{ .name = "_Qp_qtoui", .linkage = linkage }); + const _Qp_qtox = @import("compiler_rt/sparc.zig")._Qp_qtox; + @export(_Qp_qtox, .{ .name = "_Qp_qtox", .linkage = linkage }); + const _Qp_qtoux = @import("compiler_rt/sparc.zig")._Qp_qtoux; + @export(_Qp_qtoux, .{ .name = "_Qp_qtoux", .linkage = linkage }); + const _Qp_qtos = @import("compiler_rt/sparc.zig")._Qp_qtos; + @export(_Qp_qtos, .{ .name = "_Qp_qtos", .linkage = linkage }); + const _Qp_qtod = @import("compiler_rt/sparc.zig")._Qp_qtod; + @export(_Qp_qtod, .{ .name = "_Qp_qtod", .linkage = linkage }); + } + + if ((arch == .powerpc or arch.isPPC64()) and !is_test) { + @export(__addtf3, .{ .name = "__addkf3", .linkage = linkage }); + @export(__subtf3, .{ .name = "__subkf3", .linkage = linkage }); + @export(__multf3, .{ .name = "__mulkf3", .linkage = linkage }); + @export(__divtf3, .{ .name = "__divkf3", .linkage = linkage }); + @export(__extendsftf2, .{ .name = "__extendsfkf2", .linkage = linkage }); + @export(__extenddftf2, .{ .name = "__extenddfkf2", .linkage = linkage }); + @export(__trunctfsf2, .{ .name = "__trunckfsf2", .linkage = linkage }); + @export(__trunctfdf2, .{ .name = "__trunckfdf2", .linkage = linkage }); + @export(__fixtfdi, .{ .name = "__fixkfdi", .linkage = linkage }); + @export(__fixtfsi, .{ .name = "__fixkfsi", .linkage = linkage }); + @export(__fixunstfsi, .{ .name = "__fixunskfsi", .linkage = linkage }); + @export(__fixunstfdi, .{ .name = "__fixunskfdi", .linkage = linkage }); + @export(__floatsitf, .{ .name = "__floatsikf", .linkage = linkage }); + @export(__floatditf, .{ .name = "__floatdikf", .linkage = linkage }); + @export(__floatunditf, .{ .name = "__floatundikf", .linkage = linkage }); + @export(__floatunsitf, .{ .name = "__floatunsikf", .linkage = linkage }); + + @export(__letf2, .{ .name = "__eqkf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__nekf2", .linkage = linkage }); + @export(__getf2, .{ .name = "__gekf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__ltkf2", .linkage = linkage }); + @export(__letf2, .{ .name = "__lekf2", .linkage = linkage }); + @export(__getf2, .{ .name = "__gtkf2", .linkage = linkage }); + @export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage }); + } + + if (builtin.os.tag == .windows) { + // Default stack-probe functions emitted by LLVM + if (is_mingw) { + const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk; + @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage }); + const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms; + @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage }); + } else if (!builtin.link_libc) { + // This symbols are otherwise exported by MSVCRT.lib + const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk; + @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage }); + const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk; + @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); + } + + switch (arch) { + .i386 => { + const __divti3 = @import("compiler_rt/divti3.zig").__divti3; + @export(__divti3, .{ .name = "__divti3", .linkage = linkage }); + const __modti3 = @import("compiler_rt/modti3.zig").__modti3; + @export(__modti3, .{ .name = "__modti3", .linkage = linkage }); + const __multi3 = @import("compiler_rt/multi3.zig").__multi3; + @export(__multi3, .{ .name = "__multi3", .linkage = linkage }); + const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3; + @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage }); + const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4; + @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage }); + const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3; + @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage }); + }, + .x86_64 => { + // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI + // that LLVM expects compiler-rt to have. + const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64; + @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage }); + const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64; + @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage }); + const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64; + @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage }); + const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64; + @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage }); + const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64; + @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage }); + const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64; + @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage }); + }, + else => {}, + } + if (arch.isAARCH64()) { + const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk; + @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); + const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3; + @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage }); const __modti3 = @import("compiler_rt/modti3.zig").__modti3; @export(__modti3, .{ .name = "__modti3", .linkage = linkage }); - const __multi3 = @import("compiler_rt/multi3.zig").__multi3; - @export(__multi3, .{ .name = "__multi3", .linkage = linkage }); - const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3; - @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage }); - const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4; - @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage }); + const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3; + @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage }); const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3; @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage }); - }, - .x86_64 => { - // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI - // that LLVM expects compiler-rt to have. - const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64; - @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage }); - const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64; - @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage }); - const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64; - @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage }); - const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64; - @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage }); - const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64; - @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage }); - const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64; - @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage }); - }, - else => {}, - } - if (arch.isAARCH64()) { - const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk; - @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage }); - const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3; - @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage }); + } + } else { + const __divti3 = @import("compiler_rt/divti3.zig").__divti3; + @export(__divti3, .{ .name = "__divti3", .linkage = linkage }); const __modti3 = @import("compiler_rt/modti3.zig").__modti3; @export(__modti3, .{ .name = "__modti3", .linkage = linkage }); - const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3; - @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage }); + const __multi3 = @import("compiler_rt/multi3.zig").__multi3; + @export(__multi3, .{ .name = "__multi3", .linkage = linkage }); + const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3; + @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage }); + const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4; + @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage }); const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3; @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage }); } - } else { - const __divti3 = @import("compiler_rt/divti3.zig").__divti3; - @export(__divti3, .{ .name = "__divti3", .linkage = linkage }); - const __modti3 = @import("compiler_rt/modti3.zig").__modti3; - @export(__modti3, .{ .name = "__modti3", .linkage = linkage }); - const __multi3 = @import("compiler_rt/multi3.zig").__multi3; - @export(__multi3, .{ .name = "__multi3", .linkage = linkage }); - const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3; - @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage }); - const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4; - @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage }); - const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3; - @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage }); - } - const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4; - @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage }); - const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4; - @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage }); + const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4; + @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage }); + const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4; + @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage }); - _ = @import("compiler_rt/atomics.zig"); + _ = @import("compiler_rt/atomics.zig"); + } } // Avoid dragging in the runtime safety mechanisms into this .o file, // unless we're trying to test this file. -pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn { +pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn { _ = error_return_trace; @setCold(true); + if (builtin.zig_is_stage2) { + while (true) { + @breakpoint(); + } + } if (is_test) { std.debug.panic("{s}", .{msg}); } else { diff --git a/lib/std/special/compiler_rt/extendXfYf2.zig b/lib/std/special/compiler_rt/extendXfYf2.zig index 5571bd9ed3..7afb6b1645 100644 --- a/lib/std/special/compiler_rt/extendXfYf2.zig +++ b/lib/std/special/compiler_rt/extendXfYf2.zig @@ -3,23 +3,23 @@ const builtin = @import("builtin"); const is_test = builtin.is_test; pub fn __extendsfdf2(a: f32) callconv(.C) f64 { - return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) }); + return extendXfYf2(f64, f32, @bitCast(u32, a)); } pub fn __extenddftf2(a: f64) callconv(.C) f128 { - return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) }); + return extendXfYf2(f128, f64, @bitCast(u64, a)); } pub fn __extendsftf2(a: f32) callconv(.C) f128 { - return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) }); + return extendXfYf2(f128, f32, @bitCast(u32, a)); } pub fn __extendhfsf2(a: u16) callconv(.C) f32 { - return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a }); + return extendXfYf2(f32, f16, a); } pub fn __extendhftf2(a: u16) callconv(.C) f128 { - return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f16, a }); + return extendXfYf2(f128, f16, a); } pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 { @@ -34,7 +34,7 @@ pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 { const CHAR_BIT = 8; -fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t { +inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t { @setRuntimeSafety(builtin.is_test); const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits); diff --git a/src/Air.zig b/src/Air.zig index ad95200001..c3181fac60 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -227,9 +227,12 @@ pub const Inst = struct { /// Indicates the program counter will never get to this instruction. /// Result type is always noreturn; no instructions in a block follow this one. unreach, - /// Convert from one float type to another. + /// Convert from a float type to a smaller one. /// Uses the `ty_op` field. - floatcast, + fptrunc, + /// Convert from a float type to a wider one. + /// Uses the `ty_op` field. + fpext, /// Returns an integer with a different type than the operand. The new type may have /// fewer, the same, or more bits than the operand type. However, the instruction /// guarantees that the same integer value fits in both types. @@ -586,7 +589,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .not, .bitcast, .load, - .floatcast, + .fpext, + .fptrunc, .intcast, .trunc, .optional_payload, diff --git a/src/AstGen.zig b/src/AstGen.zig index 176203d37f..6a533dff11 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -2166,6 +2166,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner .ensure_result_used, .ensure_result_non_error, .@"export", + .export_value, .set_eval_branch_quota, .ensure_err_payload_void, .atomic_store, @@ -7095,32 +7096,55 @@ fn builtinCall( .identifier => { const ident_token = main_tokens[params[0]]; decl_name = try astgen.identAsString(ident_token); - { - var s = scope; - while (true) switch (s.tag) { - .local_val => { - const local_val = s.cast(Scope.LocalVal).?; - if (local_val.name == decl_name) { - local_val.used = true; - break; - } - s = local_val.parent; - }, - .local_ptr => { - const local_ptr = s.cast(Scope.LocalPtr).?; - if (local_ptr.name == decl_name) { - if (!local_ptr.maybe_comptime) - return astgen.failNode(params[0], "unable to export runtime-known value", .{}); - local_ptr.used = true; - break; + + var s = scope; + var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already + while (true) switch (s.tag) { + .local_val => { + const local_val = s.cast(Scope.LocalVal).?; + if (local_val.name == decl_name) { + local_val.used = true; + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = local_val.inst, + .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), + }); + return rvalue(gz, rl, .void_value, node); + } + s = local_val.parent; + }, + .local_ptr => { + const local_ptr = s.cast(Scope.LocalPtr).?; + if (local_ptr.name == decl_name) { + if (!local_ptr.maybe_comptime) + return astgen.failNode(params[0], "unable to export runtime-known value", .{}); + local_ptr.used = true; + const loaded = try gz.addUnNode(.load, local_ptr.ptr, node); + _ = try gz.addPlNode(.export_value, node, Zir.Inst.ExportValue{ + .operand = loaded, + .options = try comptimeExpr(gz, scope, .{ .coerced_ty = .export_options_type }, params[1]), + }); + return rvalue(gz, rl, .void_value, node); + } + s = local_ptr.parent; + }, + .gen_zir => s = s.cast(GenZir).?.parent, + .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, + .namespace => { + const ns = s.cast(Scope.Namespace).?; + if (ns.decls.get(decl_name)) |i| { + if (found_already) |f| { + return astgen.failNodeNotes(node, "ambiguous reference", .{}, &.{ + try astgen.errNoteNode(f, "declared here", .{}), + try astgen.errNoteNode(i, "also declared here", .{}), + }); } - s = local_ptr.parent; - }, - .gen_zir => s = s.cast(GenZir).?.parent, - .defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent, - .namespace, .top => break, - }; - } + // We found a match but must continue looking for ambiguous references to decls. + found_already = i; + } + s = ns.parent; + }, + .top => break, + }; }, .field_access => { const namespace_node = node_datas[params[0]].lhs; diff --git a/src/Liveness.zig b/src/Liveness.zig index 1d34da091d..dd0899e745 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -274,7 +274,8 @@ fn analyzeInst( .not, .bitcast, .load, - .floatcast, + .fpext, + .fptrunc, .intcast, .trunc, .optional_payload, diff --git a/src/Module.zig b/src/Module.zig index 88817efc26..70c90d9c01 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2389,6 +2389,7 @@ pub fn deinit(mod: *Module) void { fn freeExportList(gpa: *Allocator, export_list: []*Export) void { for (export_list) |exp| { gpa.free(exp.options.name); + if (exp.options.section) |s| gpa.free(s); gpa.destroy(exp); } gpa.free(export_list); @@ -3317,7 +3318,8 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); } // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + const options: std.builtin.ExportOptions = .{ .name = mem.spanZ(decl.name) }; + try mod.analyzeExport(&block_scope.base, export_src, options, decl); } return type_changed or is_inline != prev_is_inline; } @@ -3376,7 +3378,8 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (decl.is_exported) { const export_src = src; // TODO point to the export token // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + const options: std.builtin.ExportOptions = .{ .name = mem.spanZ(decl.name) }; + try mod.analyzeExport(&block_scope.base, export_src, options, decl); } return type_changed; @@ -4119,7 +4122,7 @@ pub fn analyzeExport( mod: *Module, scope: *Scope, src: LazySrcLoc, - borrowed_symbol_name: []const u8, + borrowed_options: std.builtin.ExportOptions, exported_decl: *Decl, ) !void { try mod.ensureDeclAnalyzed(exported_decl); @@ -4128,23 +4131,32 @@ pub fn analyzeExport( else => return mod.fail(scope, src, "unable to export type '{}'", .{exported_decl.ty}), } - try mod.decl_exports.ensureUnusedCapacity(mod.gpa, 1); - try mod.export_owners.ensureUnusedCapacity(mod.gpa, 1); + const gpa = mod.gpa; + + try mod.decl_exports.ensureUnusedCapacity(gpa, 1); + try mod.export_owners.ensureUnusedCapacity(gpa, 1); - const new_export = try mod.gpa.create(Export); - errdefer mod.gpa.destroy(new_export); + const new_export = try gpa.create(Export); + errdefer gpa.destroy(new_export); - const symbol_name = try mod.gpa.dupe(u8, borrowed_symbol_name); - errdefer mod.gpa.free(symbol_name); + const symbol_name = try gpa.dupe(u8, borrowed_options.name); + errdefer gpa.free(symbol_name); + + const section: ?[]const u8 = if (borrowed_options.section) |s| try gpa.dupe(u8, s) else null; + errdefer if (section) |s| gpa.free(s); const owner_decl = scope.ownerDecl().?; log.debug("exporting Decl '{s}' as symbol '{s}' from Decl '{s}'", .{ - exported_decl.name, borrowed_symbol_name, owner_decl.name, + exported_decl.name, symbol_name, owner_decl.name, }); new_export.* = .{ - .options = .{ .name = symbol_name }, + .options = .{ + .name = symbol_name, + .linkage = borrowed_options.linkage, + .section = section, + }, .src = src, .link = switch (mod.comp.bin_file.tag) { .coff => .{ .coff = {} }, @@ -4165,18 +4177,18 @@ pub fn analyzeExport( if (!eo_gop.found_existing) { eo_gop.value_ptr.* = &[0]*Export{}; } - eo_gop.value_ptr.* = try mod.gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); + eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1); eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export; - errdefer eo_gop.value_ptr.* = mod.gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); + errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1); // Add to exported_decl table. const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl); if (!de_gop.found_existing) { de_gop.value_ptr.* = &[0]*Export{}; } - de_gop.value_ptr.* = try mod.gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); + de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1); de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; - errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); + errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } /// Takes ownership of `name` even if it returns an error. diff --git a/src/Sema.zig b/src/Sema.zig index 91d12b7b31..a0e3250e56 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -458,6 +458,11 @@ pub fn analyzeBody( i += 1; continue; }, + .export_value => { + try sema.zirExportValue(block, inst); + i += 1; + continue; + }, .set_align_stack => { try sema.zirSetAlignStack(block, inst); i += 1; @@ -2392,30 +2397,33 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Export, inst_data.payload_index).data; const src = inst_data.src(); - const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const decl_name = sema.code.nullTerminatedString(extra.decl_name); if (extra.namespace != .none) { return sema.mod.fail(&block.base, src, "TODO: implement exporting with field access", .{}); } - const decl = try sema.lookupIdentifier(block, lhs_src, decl_name); - const options = try sema.resolveInstConst(block, rhs_src, extra.options); - const struct_obj = options.ty.castTag(.@"struct").?.data; - const fields = options.val.castTag(.@"struct").?.data[0..struct_obj.fields.count()]; - const name_index = struct_obj.fields.getIndex("name").?; - const linkage_index = struct_obj.fields.getIndex("linkage").?; - const section_index = struct_obj.fields.getIndex("section").?; - const export_name = try fields[name_index].toAllocatedBytes(sema.arena); - const linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage); + const decl = try sema.lookupIdentifier(block, operand_src, decl_name); + const options = try sema.resolveExportOptions(block, options_src, extra.options); + try sema.mod.analyzeExport(&block.base, src, options, decl); +} - if (linkage != .Strong) { - return sema.mod.fail(&block.base, src, "TODO: implement exporting with non-strong linkage", .{}); - } - if (!fields[section_index].isNull()) { - return sema.mod.fail(&block.base, src, "TODO: implement exporting with linksection", .{}); - } +fn zirExportValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { + const tracy = trace(@src()); + defer tracy.end(); - try sema.mod.analyzeExport(&block.base, src, export_name, decl); + const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; + const src = inst_data.src(); + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const options_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand = try sema.resolveInstConst(block, operand_src, extra.operand); + const options = try sema.resolveExportOptions(block, options_src, extra.options); + const decl = switch (operand.val.tag()) { + .function => operand.val.castTag(.function).?.data.owner_decl, + else => return sema.mod.fail(&block.base, operand_src, "TODO implement exporting arbitrary Value objects", .{}), // TODO put this Value into an anonymous Decl and then export it. + }; + try sema.mod.analyzeExport(&block.base, src, options, decl); } fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -4516,11 +4524,18 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_type, operand, operand_src); - } else if (dest_is_comptime_float) { + } + if (dest_is_comptime_float) { return sema.mod.fail(&block.base, src, "unable to cast runtime value to 'comptime_float'", .{}); } - - return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); + const target = sema.mod.getTarget(); + const src_bits = operand_ty.floatBits(target); + const dst_bits = dest_type.floatBits(target); + if (dst_bits >= src_bits) { + return sema.coerce(block, dest_type, operand, operand_src); + } + try sema.requireRuntimeBlock(block, operand_src); + return block.addTyOp(.fptrunc, dest_type, operand); } fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -7936,6 +7951,31 @@ fn checkAtomicOperandType( } } +fn resolveExportOptions( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, +) CompileError!std.builtin.ExportOptions { + const export_options_ty = try sema.getBuiltinType(block, src, "ExportOptions"); + const air_ref = sema.resolveInst(zir_ref); + const coerced = try sema.coerce(block, export_options_ty, air_ref, src); + const val = try sema.resolveConstValue(block, src, coerced); + const fields = val.castTag(.@"struct").?.data; + const struct_obj = export_options_ty.castTag(.@"struct").?.data; + const name_index = struct_obj.fields.getIndex("name").?; + const linkage_index = struct_obj.fields.getIndex("linkage").?; + const section_index = struct_obj.fields.getIndex("section").?; + if (!fields[section_index].isNull()) { + return sema.mod.fail(&block.base, src, "TODO: implement exporting with linksection", .{}); + } + return std.builtin.ExportOptions{ + .name = try fields[name_index].toAllocatedBytes(sema.arena), + .linkage = fields[linkage_index].toEnum(std.builtin.GlobalLinkage), + .section = null, // TODO + }; +} + fn resolveAtomicOrder( sema: *Sema, block: *Scope.Block, @@ -9581,7 +9621,7 @@ fn coerce( const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); - return block.addTyOp(.floatcast, dest_type, inst); + return block.addTyOp(.fpext, dest_type, inst); } } }, @@ -9729,35 +9769,53 @@ fn coerceNum( const target = sema.mod.getTarget(); switch (dst_zig_tag) { - .ComptimeInt, .Int => { - if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { + .ComptimeInt, .Int => switch (src_zig_tag) { + .Float, .ComptimeFloat => { if (val.floatHasFraction()) { - return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty }); + return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from coercion to type '{}'", .{ val, dest_type }); } return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{}); - } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { + }, + .Int, .ComptimeInt => { if (!val.intFitsInType(dest_type, target)) { return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ dest_type, val }); } return try sema.addConstant(dest_type, val); - } + }, + else => {}, }, - .ComptimeFloat, .Float => { - if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { - const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) { - error.Overflow => return sema.mod.fail( + .ComptimeFloat, .Float => switch (src_zig_tag) { + .ComptimeFloat => { + const result_val = try val.floatCast(sema.arena, dest_type); + return try sema.addConstant(dest_type, result_val); + }, + .Float => { + const result_val = try val.floatCast(sema.arena, dest_type); + if (!val.eql(result_val, dest_type)) { + return sema.mod.fail( &block.base, inst_src, - "cast of value {} to type '{}' loses information", - .{ val, dest_type }, - ), - error.OutOfMemory => return error.OutOfMemory, - }; - return try sema.addConstant(dest_type, res); - } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { + "type {} cannot represent float value {}", + .{ dest_type, val }, + ); + } + return try sema.addConstant(dest_type, result_val); + }, + .Int, .ComptimeInt => { const result_val = try val.intToFloat(sema.arena, dest_type, target); + // TODO implement this compile error + //const int_again_val = try result_val.floatToInt(sema.arena, inst_ty); + //if (!int_again_val.eql(val, inst_ty)) { + // return sema.mod.fail( + // &block.base, + // inst_src, + // "type {} cannot represent integer value {}", + // .{ dest_type, val }, + // ); + //} return try sema.addConstant(dest_type, result_val); - } + }, + else => {}, }, else => {}, } diff --git a/src/Zir.zig b/src/Zir.zig index 7f752bcced..d3b1678d97 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -319,9 +319,13 @@ pub const Inst = struct { /// `error.Foo` syntax. Uses the `str_tok` field of the Data union. error_value, /// Implements the `@export` builtin function, based on either an identifier to a Decl, - /// or field access of a Decl. + /// or field access of a Decl. The thing being exported is the Decl. /// Uses the `pl_node` union field. Payload is `Export`. @"export", + /// Implements the `@export` builtin function, based on a comptime-known value. + /// The thing being exported is the comptime-known value which is the operand. + /// Uses the `pl_node` union field. Payload is `ExportValue`. + export_value, /// Given a pointer to a struct or object that contains virtual fields, returns a pointer /// to the named field. The field name is stored in string_bytes. Used by a.b syntax. /// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field. @@ -1010,6 +1014,7 @@ pub const Inst = struct { .ensure_result_used, .ensure_result_non_error, .@"export", + .export_value, .field_ptr, .field_val, .field_ptr_named, @@ -1273,6 +1278,7 @@ pub const Inst = struct { .error_union_type = .pl_node, .error_value = .str_tok, .@"export" = .pl_node, + .export_value = .pl_node, .field_ptr = .pl_node, .field_val = .pl_node, .field_ptr_named = .pl_node, @@ -2843,6 +2849,12 @@ pub const Inst = struct { options: Ref, }; + pub const ExportValue = struct { + /// The comptime value to export. + operand: Ref, + options: Ref, + }; + /// Trailing: `CompileErrors.Item` for each `items_len`. pub const CompileErrors = struct { items_len: u32, diff --git a/src/codegen.zig b/src/codegen.zig index 54c1400211..06b520c9dd 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -859,7 +859,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .call => try self.airCall(inst), .cond_br => try self.airCondBr(inst), .dbg_stmt => try self.airDbgStmt(inst), - .floatcast => try self.airFloatCast(inst), + .fptrunc => try self.airFptrunc(inst), + .fpext => try self.airFpext(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), .bool_to_int => try self.airBoolToInt(inst), @@ -1172,10 +1173,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } - fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void { + fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { - else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement airFptrunc for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + + fn airFpext(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airFpext for {}", .{self.target.cpu.arch}), }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c4e1d48062..a82f0e57f7 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -954,7 +954,12 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .atomic_rmw => try airAtomicRmw(f, inst), .atomic_load => try airAtomicLoad(f, inst), - .int_to_float, .float_to_int => try airSimpleCast(f, inst), + .int_to_float, + .float_to_int, + .fptrunc, + .fpext, + .ptrtoint, + => try airSimpleCast(f, inst), .atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)), .atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)), @@ -982,9 +987,6 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(f, inst), .wrap_errunion_payload => try airWrapErrUnionPay(f, inst), .wrap_errunion_err => try airWrapErrUnionErr(f, inst), - - .ptrtoint => return f.fail("TODO: C backend: implement codegen for ptrtoint", .{}), - .floatcast => return f.fail("TODO: C backend: implement codegen for floatcast", .{}), // zig fmt: on }; switch (result_value) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 3a977bc582..0d3bca2b03 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -472,7 +472,18 @@ pub const Object = struct { alias.setAliasee(llvm_fn); } else { const alias = self.llvm_module.addAlias(llvm_fn.typeOf(), llvm_fn, exp_name_z); - _ = alias; + switch (exp.options.linkage) { + .Internal => alias.setLinkage(.Internal), + .Strong => alias.setLinkage(.External), + .Weak => { + if (is_extern) { + alias.setLinkage(.ExternalWeak); + } else { + alias.setLinkage(.WeakODR); + } + }, + .LinkOnce => alias.setLinkage(.LinkOnceODR), + } } } } @@ -1137,7 +1148,8 @@ pub const FuncGen = struct { .cond_br => try self.airCondBr(inst), .intcast => try self.airIntCast(inst), .trunc => try self.airTrunc(inst), - .floatcast => try self.airFloatCast(inst), + .fptrunc => try self.airFptrunc(inst), + .fpext => try self.airFpext(inst), .ptrtoint => try self.airPtrToInt(inst), .load => try self.airLoad(inst), .loop => try self.airLoop(inst), @@ -2060,12 +2072,26 @@ pub const FuncGen = struct { return self.builder.buildTrunc(operand, dest_llvm_ty, ""); } - fn airFloatCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; - // TODO split floatcast AIR into float_widen and float_shorten - return self.todo("implement 'airFloatCast'", .{}); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + + return self.builder.buildFPTrunc(operand, dest_llvm_ty, ""); + } + + fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const dest_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + + return self.builder.buildFPExt(operand, dest_llvm_ty, ""); } fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 039232426b..bf951fa67e 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -601,6 +601,22 @@ pub const Builder = opaque { DestTy: *const Type, Name: [*:0]const u8, ) *const Value; + + pub const buildFPTrunc = LLVMBuildFPTrunc; + extern fn LLVMBuildFPTrunc( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; + + pub const buildFPExt = LLVMBuildFPExt; + extern fn LLVMBuildFPExt( + *const Builder, + Val: *const Value, + DestTy: *const Type, + Name: [*:0]const u8, + ) *const Value; }; pub const IntPredicate = enum(c_uint) { diff --git a/src/print_air.zig b/src/print_air.zig index 3d13fa688f..a9ad993eb0 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -156,7 +156,8 @@ const Writer = struct { .not, .bitcast, .load, - .floatcast, + .fptrunc, + .fpext, .intcast, .trunc, .optional_payload, diff --git a/src/print_zir.zig b/src/print_zir.zig index 35b3da4479..4527b62262 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -285,6 +285,7 @@ const Writer = struct { => try self.writePlNodeBin(stream, inst), .@"export" => try self.writePlNodeExport(stream, inst), + .export_value => try self.writePlNodeExportValue(stream, inst), .call, .call_chkused, @@ -611,6 +612,17 @@ const Writer = struct { try self.writeSrc(stream, inst_data.src()); } + fn writePlNodeExportValue(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { + const inst_data = self.code.instructions.items(.data)[inst].pl_node; + const extra = self.code.extraData(Zir.Inst.ExportValue, inst_data.payload_index).data; + + try self.writeInstRef(stream, extra.operand); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.options); + try stream.writeAll(") "); + try self.writeSrc(stream, inst_data.src()); + } + fn writeStructInit(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Zir.Inst.StructInit, inst_data.payload_index); diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index 3b22101df9..1304dcc004 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -9325,6 +9325,7 @@ Buf *codegen_generate_builtin_source(CodeGen *g) { buf_appendf(contents, "pub const single_threaded = %s;\n", bool_to_str(g->is_single_threaded)); buf_appendf(contents, "pub const abi = std.Target.Abi.%s;\n", cur_abi); buf_appendf(contents, "pub const cpu = std.Target.Cpu.baseline(.%s);\n", cur_arch); + buf_appendf(contents, "pub const stage2_arch: std.Target.Cpu.Arch = .%s;\n", cur_arch); buf_appendf(contents, "pub const os = std.Target.Os.Tag.defaultVersionRange(.%s);\n", cur_os); buf_appendf(contents, "pub const target = std.Target{\n" diff --git a/src/value.zig b/src/value.zig index bdf5b9c37c..cb5d211b1e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1041,30 +1041,15 @@ pub const Value = extern union { } } - /// Converts an integer or a float to a float. - /// Returns `error.Overflow` if the value does not fit in the new type. - pub fn floatCast(self: Value, allocator: *Allocator, dest_ty: Type) !Value { + /// Converts an integer or a float to a float. May result in a loss of information. + /// Caller can find out by equality checking the result against the operand. + pub fn floatCast(self: Value, arena: *Allocator, dest_ty: Type) !Value { switch (dest_ty.tag()) { - .f16 => { - const res = try Value.Tag.float_16.create(allocator, self.toFloat(f16)); - if (!self.eql(res, dest_ty)) - return error.Overflow; - return res; - }, - .f32 => { - const res = try Value.Tag.float_32.create(allocator, self.toFloat(f32)); - if (!self.eql(res, dest_ty)) - return error.Overflow; - return res; - }, - .f64 => { - const res = try Value.Tag.float_64.create(allocator, self.toFloat(f64)); - if (!self.eql(res, dest_ty)) - return error.Overflow; - return res; - }, + .f16 => return Value.Tag.float_16.create(arena, self.toFloat(f16)), + .f32 => return Value.Tag.float_32.create(arena, self.toFloat(f32)), + .f64 => return Value.Tag.float_64.create(arena, self.toFloat(f64)), .f128, .comptime_float, .c_longdouble => { - return Value.Tag.float_128.create(allocator, self.toFloat(f128)); + return Value.Tag.float_128.create(arena, self.toFloat(f128)); }, else => unreachable, } diff --git a/test/behavior.zig b/test/behavior.zig index f328db968e..7fbba99a8c 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -13,6 +13,7 @@ test { _ = @import("behavior/atomics.zig"); _ = @import("behavior/sizeof_and_typeof.zig"); _ = @import("behavior/translate_c_macros.zig"); + _ = @import("behavior/union.zig"); _ = @import("behavior/widening.zig"); if (builtin.zig_is_stage2) { @@ -149,7 +150,7 @@ test { _ = @import("behavior/typename.zig"); _ = @import("behavior/undefined.zig"); _ = @import("behavior/underscore.zig"); - _ = @import("behavior/union.zig"); + _ = @import("behavior/union_stage1.zig"); _ = @import("behavior/usingnamespace_stage1.zig"); _ = @import("behavior/var_args.zig"); _ = @import("behavior/vector.zig"); @@ -158,7 +159,6 @@ test { _ = @import("behavior/wasm.zig"); } _ = @import("behavior/while.zig"); - _ = @import("behavior/widening_stage1.zig"); _ = @import("behavior/src.zig"); _ = @import("behavior/translate_c_macros_stage1.zig"); } diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 323dd18f4d..14b5e374dd 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2,816 +2,3 @@ const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const Tag = std.meta.Tag; - -const Value = union(enum) { - Int: u64, - Array: [9]u8, -}; - -const Agg = struct { - val1: Value, - val2: Value, -}; - -const v1 = Value{ .Int = 1234 }; -const v2 = Value{ .Array = [_]u8{3} ** 9 }; - -const err = @as(anyerror!Agg, Agg{ - .val1 = v1, - .val2 = v2, -}); - -const array = [_]Value{ - v1, - v2, - v1, - v2, -}; - -test "unions embedded in aggregate types" { - switch (array[1]) { - Value.Array => |arr| try expect(arr[4] == 3), - else => unreachable, - } - switch ((err catch unreachable).val1) { - Value.Int => |x| try expect(x == 1234), - else => unreachable, - } -} - -const Foo = union { - float: f64, - int: i32, -}; - -test "basic unions" { - var foo = Foo{ .int = 1 }; - try expect(foo.int == 1); - foo = Foo{ .float = 12.34 }; - try expect(foo.float == 12.34); -} - -test "comptime union field access" { - comptime { - var foo = Foo{ .int = 0 }; - try expect(foo.int == 0); - - foo = Foo{ .float = 42.42 }; - try expect(foo.float == 42.42); - } -} - -test "init union with runtime value" { - var foo: Foo = undefined; - - setFloat(&foo, 12.34); - try expect(foo.float == 12.34); - - setInt(&foo, 42); - try expect(foo.int == 42); -} - -fn setFloat(foo: *Foo, x: f64) void { - foo.* = Foo{ .float = x }; -} - -fn setInt(foo: *Foo, x: i32) void { - foo.* = Foo{ .int = x }; -} - -const FooExtern = extern union { - float: f64, - int: i32, -}; - -test "basic extern unions" { - var foo = FooExtern{ .int = 1 }; - try expect(foo.int == 1); - foo.float = 12.34; - try expect(foo.float == 12.34); -} - -const Letter = enum { - A, - B, - C, -}; -const Payload = union(Letter) { - A: i32, - B: f64, - C: bool, -}; - -test "union with specified enum tag" { - try doTest(); - comptime try doTest(); -} - -fn doTest() error{TestUnexpectedResult}!void { - try expect((try bar(Payload{ .A = 1234 })) == -10); -} - -fn bar(value: Payload) error{TestUnexpectedResult}!i32 { - try expect(@as(Letter, value) == Letter.A); - return switch (value) { - Payload.A => |x| return x - 1244, - Payload.B => |x| if (x == 12.34) @as(i32, 20) else 21, - Payload.C => |x| if (x) @as(i32, 30) else 31, - }; -} - -const MultipleChoice = union(enum(u32)) { - A = 20, - B = 40, - C = 60, - D = 1000, -}; -test "simple union(enum(u32))" { - var x = MultipleChoice.C; - try expect(x == MultipleChoice.C); - try expect(@enumToInt(@as(Tag(MultipleChoice), x)) == 60); -} - -const MultipleChoice2 = union(enum(u32)) { - Unspecified1: i32, - A: f32 = 20, - Unspecified2: void, - B: bool = 40, - Unspecified3: i32, - C: i8 = 60, - Unspecified4: void, - D: void = 1000, - Unspecified5: i32, -}; - -test "union(enum(u32)) with specified and unspecified tag values" { - comptime try expect(Tag(Tag(MultipleChoice2)) == u32); - try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); - comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); -} - -fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void { - try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60); - try expect(1123 == switch (x) { - MultipleChoice2.A => 1, - MultipleChoice2.B => 2, - MultipleChoice2.C => |v| @as(i32, 1000) + v, - MultipleChoice2.D => 4, - MultipleChoice2.Unspecified1 => 5, - MultipleChoice2.Unspecified2 => 6, - MultipleChoice2.Unspecified3 => 7, - MultipleChoice2.Unspecified4 => 8, - MultipleChoice2.Unspecified5 => 9, - }); -} - -const ExternPtrOrInt = extern union { - ptr: *u8, - int: u64, -}; -test "extern union size" { - comptime try expect(@sizeOf(ExternPtrOrInt) == 8); -} - -const PackedPtrOrInt = packed union { - ptr: *u8, - int: u64, -}; -test "extern union size" { - comptime try expect(@sizeOf(PackedPtrOrInt) == 8); -} - -const ZeroBits = union { - OnlyField: void, -}; -test "union with only 1 field which is void should be zero bits" { - comptime try expect(@sizeOf(ZeroBits) == 0); -} - -const TheTag = enum { - A, - B, - C, -}; -const TheUnion = union(TheTag) { - A: i32, - B: i32, - C: i32, -}; -test "union field access gives the enum values" { - try expect(TheUnion.A == TheTag.A); - try expect(TheUnion.B == TheTag.B); - try expect(TheUnion.C == TheTag.C); -} - -test "cast union to tag type of union" { - try testCastUnionToTag(TheUnion{ .B = 1234 }); - comptime try testCastUnionToTag(TheUnion{ .B = 1234 }); -} - -fn testCastUnionToTag(x: TheUnion) !void { - try expect(@as(TheTag, x) == TheTag.B); -} - -test "cast tag type of union to union" { - var x: Value2 = Letter2.B; - try expect(@as(Letter2, x) == Letter2.B); -} -const Letter2 = enum { - A, - B, - C, -}; -const Value2 = union(Letter2) { - A: i32, - B, - C, -}; - -test "implicit cast union to its tag type" { - var x: Value2 = Letter2.B; - try expect(x == Letter2.B); - try giveMeLetterB(x); -} -fn giveMeLetterB(x: Letter2) !void { - try expect(x == Value2.B); -} - -pub const PackThis = union(enum) { - Invalid: bool, - StringLiteral: u2, -}; - -test "constant packed union" { - try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }}); -} - -fn testConstPackedUnion(expected_tokens: []const PackThis) !void { - try expect(expected_tokens[0].StringLiteral == 1); -} - -test "switch on union with only 1 field" { - var r: PartialInst = undefined; - r = PartialInst.Compiled; - switch (r) { - PartialInst.Compiled => { - var z: PartialInstWithPayload = undefined; - z = PartialInstWithPayload{ .Compiled = 1234 }; - switch (z) { - PartialInstWithPayload.Compiled => |x| { - try expect(x == 1234); - return; - }, - } - }, - } - unreachable; -} - -const PartialInst = union(enum) { - Compiled, -}; - -const PartialInstWithPayload = union(enum) { - Compiled: i32, -}; - -test "access a member of tagged union with conflicting enum tag name" { - const Bar = union(enum) { - A: A, - B: B, - - const A = u8; - const B = void; - }; - - comptime try expect(Bar.A == u8); -} - -test "tagged union initialization with runtime void" { - try expect(testTaggedUnionInit({})); -} - -const TaggedUnionWithAVoid = union(enum) { - A, - B: i32, -}; - -fn testTaggedUnionInit(x: anytype) bool { - const y = TaggedUnionWithAVoid{ .A = x }; - return @as(Tag(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A; -} - -pub const UnionEnumNoPayloads = union(enum) { - A, - B, -}; - -test "tagged union with no payloads" { - const a = UnionEnumNoPayloads{ .B = {} }; - switch (a) { - Tag(UnionEnumNoPayloads).A => @panic("wrong"), - Tag(UnionEnumNoPayloads).B => {}, - } -} - -test "union with only 1 field casted to its enum type" { - const Literal = union(enum) { - Number: f64, - Bool: bool, - }; - - const Expr = union(enum) { - Literal: Literal, - }; - - var e = Expr{ .Literal = Literal{ .Bool = true } }; - const ExprTag = Tag(Expr); - comptime try expect(Tag(ExprTag) == u0); - var t = @as(ExprTag, e); - try expect(t == Expr.Literal); -} - -test "union with only 1 field casted to its enum type which has enum value specified" { - const Literal = union(enum) { - Number: f64, - Bool: bool, - }; - - const ExprTag = enum(comptime_int) { - Literal = 33, - }; - - const Expr = union(ExprTag) { - Literal: Literal, - }; - - var e = Expr{ .Literal = Literal{ .Bool = true } }; - comptime try expect(Tag(ExprTag) == comptime_int); - var t = @as(ExprTag, e); - try expect(t == Expr.Literal); - try expect(@enumToInt(t) == 33); - comptime try expect(@enumToInt(t) == 33); -} - -test "@enumToInt works on unions" { - const Bar = union(enum) { - A: bool, - B: u8, - C, - }; - - const a = Bar{ .A = true }; - var b = Bar{ .B = undefined }; - var c = Bar.C; - try expect(@enumToInt(a) == 0); - try expect(@enumToInt(b) == 1); - try expect(@enumToInt(c) == 2); -} - -const Attribute = union(enum) { - A: bool, - B: u8, -}; - -fn setAttribute(attr: Attribute) void { - _ = attr; -} - -fn Setter(attr: Attribute) type { - return struct { - fn set() void { - setAttribute(attr); - } - }; -} - -test "comptime union field value equality" { - const a0 = Setter(Attribute{ .A = false }); - const a1 = Setter(Attribute{ .A = true }); - const a2 = Setter(Attribute{ .A = false }); - - const b0 = Setter(Attribute{ .B = 5 }); - const b1 = Setter(Attribute{ .B = 9 }); - const b2 = Setter(Attribute{ .B = 5 }); - - try expect(a0 == a0); - try expect(a1 == a1); - try expect(a0 == a2); - - try expect(b0 == b0); - try expect(b1 == b1); - try expect(b0 == b2); - - try expect(a0 != b0); - try expect(a0 != a1); - try expect(b0 != b1); -} - -test "return union init with void payload" { - const S = struct { - fn entry() !void { - try expect(func().state == State.one); - } - const Outer = union(enum) { - state: State, - }; - const State = union(enum) { - one: void, - two: u32, - }; - fn func() Outer { - return Outer{ .state = State{ .one = {} } }; - } - }; - try S.entry(); - comptime try S.entry(); -} - -test "@unionInit can modify a union type" { - const UnionInitEnum = union(enum) { - Boolean: bool, - Byte: u8, - }; - - var value: UnionInitEnum = undefined; - - value = @unionInit(UnionInitEnum, "Boolean", true); - try expect(value.Boolean == true); - value.Boolean = false; - try expect(value.Boolean == false); - - value = @unionInit(UnionInitEnum, "Byte", 2); - try expect(value.Byte == 2); - value.Byte = 3; - try expect(value.Byte == 3); -} - -test "@unionInit can modify a pointer value" { - const UnionInitEnum = union(enum) { - Boolean: bool, - Byte: u8, - }; - - var value: UnionInitEnum = undefined; - var value_ptr = &value; - - value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true); - try expect(value.Boolean == true); - - value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2); - try expect(value.Byte == 2); -} - -test "union no tag with struct member" { - const Struct = struct {}; - const Union = union { - s: Struct, - pub fn foo(self: *@This()) void { - _ = self; - } - }; - var u = Union{ .s = Struct{} }; - u.foo(); -} - -fn testComparison() !void { - var x = Payload{ .A = 42 }; - try expect(x == .A); - try expect(x != .B); - try expect(x != .C); - try expect((x == .B) == false); - try expect((x == .C) == false); - try expect((x != .A) == false); -} - -test "comparison between union and enum literal" { - try testComparison(); - comptime try testComparison(); -} - -test "packed union generates correctly aligned LLVM type" { - const U = packed union { - f1: fn () error{TestUnexpectedResult}!void, - f2: u32, - }; - var foo = [_]U{ - U{ .f1 = doTest }, - U{ .f2 = 0 }, - }; - try foo[0].f1(); -} - -test "union with one member defaults to u0 tag type" { - const U0 = union(enum) { - X: u32, - }; - comptime try expect(Tag(Tag(U0)) == u0); -} - -test "union with comptime_int tag" { - const Union = union(enum(comptime_int)) { - X: u32, - Y: u16, - Z: u8, - }; - comptime try expect(Tag(Tag(Union)) == comptime_int); -} - -test "extern union doesn't trigger field check at comptime" { - const U = extern union { - x: u32, - y: u8, - }; - - const x = U{ .x = 0x55AAAA55 }; - comptime try expect(x.y == 0x55); -} - -const Foo1 = union(enum) { - f: struct { - x: usize, - }, -}; -var glbl: Foo1 = undefined; - -test "global union with single field is correctly initialized" { - glbl = Foo1{ - .f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 }, - }; - try expect(glbl.f.x == 123); -} - -pub const FooUnion = union(enum) { - U0: usize, - U1: u8, -}; - -var glbl_array: [2]FooUnion = undefined; - -test "initialize global array of union" { - glbl_array[1] = FooUnion{ .U1 = 2 }; - glbl_array[0] = FooUnion{ .U0 = 1 }; - try expect(glbl_array[0].U0 == 1); - try expect(glbl_array[1].U1 == 2); -} - -test "anonymous union literal syntax" { - const S = struct { - const Number = union { - int: i32, - float: f64, - }; - - fn doTheTest() !void { - var i: Number = .{ .int = 42 }; - var f = makeNumber(); - try expect(i.int == 42); - try expect(f.float == 12.34); - } - - fn makeNumber() Number { - return .{ .float = 12.34 }; - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "update the tag value for zero-sized unions" { - const S = union(enum) { - U0: void, - U1: void, - }; - var x = S{ .U0 = {} }; - try expect(x == .U0); - x = S{ .U1 = {} }; - try expect(x == .U1); -} - -test "function call result coerces from tagged union to the tag" { - const S = struct { - const Arch = union(enum) { - One, - Two: usize, - }; - - const ArchTag = Tag(Arch); - - fn doTheTest() !void { - var x: ArchTag = getArch1(); - try expect(x == .One); - - var y: ArchTag = getArch2(); - try expect(y == .Two); - } - - pub fn getArch1() Arch { - return .One; - } - - pub fn getArch2() Arch { - return .{ .Two = 99 }; - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "0-sized extern union definition" { - const U = extern union { - a: void, - const f = 1; - }; - - try expect(U.f == 1); -} - -test "union initializer generates padding only if needed" { - const U = union(enum) { - A: u24, - }; - - var v = U{ .A = 532 }; - try expect(v.A == 532); -} - -test "runtime tag name with single field" { - const U = union(enum) { - A: i32, - }; - - var v = U{ .A = 42 }; - try expect(std.mem.eql(u8, @tagName(v), "A")); -} - -test "cast from anonymous struct to union" { - const S = struct { - const U = union(enum) { - A: u32, - B: []const u8, - C: void, - }; - fn doTheTest() !void { - var y: u32 = 42; - const t0 = .{ .A = 123 }; - const t1 = .{ .B = "foo" }; - const t2 = .{ .C = {} }; - const t3 = .{ .A = y }; - const x0: U = t0; - var x1: U = t1; - const x2: U = t2; - var x3: U = t3; - try expect(x0.A == 123); - try expect(std.mem.eql(u8, x1.B, "foo")); - try expect(x2 == .C); - try expect(x3.A == y); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "cast from pointer to anonymous struct to pointer to union" { - const S = struct { - const U = union(enum) { - A: u32, - B: []const u8, - C: void, - }; - fn doTheTest() !void { - var y: u32 = 42; - const t0 = &.{ .A = 123 }; - const t1 = &.{ .B = "foo" }; - const t2 = &.{ .C = {} }; - const t3 = &.{ .A = y }; - const x0: *const U = t0; - var x1: *const U = t1; - const x2: *const U = t2; - var x3: *const U = t3; - try expect(x0.A == 123); - try expect(std.mem.eql(u8, x1.B, "foo")); - try expect(x2.* == .C); - try expect(x3.A == y); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "method call on an empty union" { - const S = struct { - const MyUnion = union(MyUnionTag) { - pub const MyUnionTag = enum { X1, X2 }; - X1: [0]u8, - X2: [0]u8, - - pub fn useIt(self: *@This()) bool { - _ = self; - return true; - } - }; - - fn doTheTest() !void { - var u = MyUnion{ .X1 = [0]u8{} }; - try expect(u.useIt()); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "switching on non exhaustive union" { - const S = struct { - const E = enum(u8) { - a, - b, - _, - }; - const U = union(E) { - a: i32, - b: u32, - }; - fn doTheTest() !void { - var a = U{ .a = 2 }; - switch (a) { - .a => |val| try expect(val == 2), - .b => unreachable, - } - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "containers with single-field enums" { - const S = struct { - const A = union(enum) { f1 }; - const B = union(enum) { f1: void }; - const C = struct { a: A }; - const D = struct { a: B }; - - fn doTheTest() !void { - var array1 = [1]A{A{ .f1 = {} }}; - var array2 = [1]B{B{ .f1 = {} }}; - try expect(array1[0] == .f1); - try expect(array2[0] == .f1); - - var struct1 = C{ .a = A{ .f1 = {} } }; - var struct2 = D{ .a = B{ .f1 = {} } }; - try expect(struct1.a == .f1); - try expect(struct2.a == .f1); - } - }; - - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "@unionInit on union w/ tag but no fields" { - const S = struct { - const Type = enum(u8) { no_op = 105 }; - - const Data = union(Type) { - no_op: void, - - pub fn decode(buf: []const u8) Data { - _ = buf; - return @unionInit(Data, "no_op", {}); - } - }; - - comptime { - std.debug.assert(@sizeOf(Data) != 0); - } - - fn doTheTest() !void { - var data: Data = .{ .no_op = .{} }; - _ = data; - var o = Data.decode(&[_]u8{}); - try expectEqual(Type.no_op, o); - } - }; - - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "union enum type gets a separate scope" { - const S = struct { - const U = union(enum) { - a: u8, - const foo = 1; - }; - - fn doTheTest() !void { - try expect(!@hasDecl(Tag(U), "foo")); - } - }; - - try S.doTheTest(); -} -test "anytype union field: issue #9233" { - const Baz = union(enum) { bar: anytype }; - _ = Baz; -} diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig new file mode 100644 index 0000000000..086bd981cd --- /dev/null +++ b/test/behavior/union_stage1.zig @@ -0,0 +1,799 @@ +const std = @import("std"); +const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; +const Tag = std.meta.Tag; + +const Value = union(enum) { + Int: u64, + Array: [9]u8, +}; + +const Agg = struct { + val1: Value, + val2: Value, +}; + +const v1 = Value{ .Int = 1234 }; +const v2 = Value{ .Array = [_]u8{3} ** 9 }; + +const err = @as(anyerror!Agg, Agg{ + .val1 = v1, + .val2 = v2, +}); + +const array = [_]Value{ v1, v2, v1, v2 }; + +test "unions embedded in aggregate types" { + switch (array[1]) { + Value.Array => |arr| try expect(arr[4] == 3), + else => unreachable, + } + switch ((err catch unreachable).val1) { + Value.Int => |x| try expect(x == 1234), + else => unreachable, + } +} + +const Foo = union { + float: f64, + int: i32, +}; + +test "basic unions" { + var foo = Foo{ .int = 1 }; + try expect(foo.int == 1); + foo = Foo{ .float = 12.34 }; + try expect(foo.float == 12.34); +} + +test "comptime union field access" { + comptime { + var foo = Foo{ .int = 0 }; + try expect(foo.int == 0); + + foo = Foo{ .float = 42.42 }; + try expect(foo.float == 42.42); + } +} + +test "init union with runtime value" { + var foo: Foo = undefined; + + setFloat(&foo, 12.34); + try expect(foo.float == 12.34); + + setInt(&foo, 42); + try expect(foo.int == 42); +} + +fn setFloat(foo: *Foo, x: f64) void { + foo.* = Foo{ .float = x }; +} + +fn setInt(foo: *Foo, x: i32) void { + foo.* = Foo{ .int = x }; +} + +const FooExtern = extern union { + float: f64, + int: i32, +}; + +test "basic extern unions" { + var foo = FooExtern{ .int = 1 }; + try expect(foo.int == 1); + foo.float = 12.34; + try expect(foo.float == 12.34); +} + +const Letter = enum { A, B, C }; +const Payload = union(Letter) { + A: i32, + B: f64, + C: bool, +}; + +test "union with specified enum tag" { + try doTest(); + comptime try doTest(); +} + +fn doTest() error{TestUnexpectedResult}!void { + try expect((try bar(Payload{ .A = 1234 })) == -10); +} + +fn bar(value: Payload) error{TestUnexpectedResult}!i32 { + try expect(@as(Letter, value) == Letter.A); + return switch (value) { + Payload.A => |x| return x - 1244, + Payload.B => |x| if (x == 12.34) @as(i32, 20) else 21, + Payload.C => |x| if (x) @as(i32, 30) else 31, + }; +} + +const MultipleChoice = union(enum(u32)) { + A = 20, + B = 40, + C = 60, + D = 1000, +}; +test "simple union(enum(u32))" { + var x = MultipleChoice.C; + try expect(x == MultipleChoice.C); + try expect(@enumToInt(@as(Tag(MultipleChoice), x)) == 60); +} + +const MultipleChoice2 = union(enum(u32)) { + Unspecified1: i32, + A: f32 = 20, + Unspecified2: void, + B: bool = 40, + Unspecified3: i32, + C: i8 = 60, + Unspecified4: void, + D: void = 1000, + Unspecified5: i32, +}; + +test "union(enum(u32)) with specified and unspecified tag values" { + comptime try expect(Tag(Tag(MultipleChoice2)) == u32); + try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); + comptime try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2{ .C = 123 }); +} + +fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void { + try expect(@enumToInt(@as(Tag(MultipleChoice2), x)) == 60); + try expect(1123 == switch (x) { + MultipleChoice2.A => 1, + MultipleChoice2.B => 2, + MultipleChoice2.C => |v| @as(i32, 1000) + v, + MultipleChoice2.D => 4, + MultipleChoice2.Unspecified1 => 5, + MultipleChoice2.Unspecified2 => 6, + MultipleChoice2.Unspecified3 => 7, + MultipleChoice2.Unspecified4 => 8, + MultipleChoice2.Unspecified5 => 9, + }); +} + +const ExternPtrOrInt = extern union { + ptr: *u8, + int: u64, +}; +test "extern union size" { + comptime try expect(@sizeOf(ExternPtrOrInt) == 8); +} + +const PackedPtrOrInt = packed union { + ptr: *u8, + int: u64, +}; +test "extern union size" { + comptime try expect(@sizeOf(PackedPtrOrInt) == 8); +} + +const ZeroBits = union { + OnlyField: void, +}; +test "union with only 1 field which is void should be zero bits" { + comptime try expect(@sizeOf(ZeroBits) == 0); +} + +const TheTag = enum { A, B, C }; +const TheUnion = union(TheTag) { + A: i32, + B: i32, + C: i32, +}; +test "union field access gives the enum values" { + try expect(TheUnion.A == TheTag.A); + try expect(TheUnion.B == TheTag.B); + try expect(TheUnion.C == TheTag.C); +} + +test "cast union to tag type of union" { + try testCastUnionToTag(TheUnion{ .B = 1234 }); + comptime try testCastUnionToTag(TheUnion{ .B = 1234 }); +} + +fn testCastUnionToTag(x: TheUnion) !void { + try expect(@as(TheTag, x) == TheTag.B); +} + +test "cast tag type of union to union" { + var x: Value2 = Letter2.B; + try expect(@as(Letter2, x) == Letter2.B); +} +const Letter2 = enum { A, B, C }; +const Value2 = union(Letter2) { + A: i32, + B, + C, +}; + +test "implicit cast union to its tag type" { + var x: Value2 = Letter2.B; + try expect(x == Letter2.B); + try giveMeLetterB(x); +} +fn giveMeLetterB(x: Letter2) !void { + try expect(x == Value2.B); +} + +// TODO it looks like this test intended to test packed unions, but this is not a packed +// union. go through git history and find out what happened. +pub const PackThis = union(enum) { + Invalid: bool, + StringLiteral: u2, +}; + +test "constant packed union" { + try testConstPackedUnion(&[_]PackThis{PackThis{ .StringLiteral = 1 }}); +} + +fn testConstPackedUnion(expected_tokens: []const PackThis) !void { + try expect(expected_tokens[0].StringLiteral == 1); +} + +test "switch on union with only 1 field" { + var r: PartialInst = undefined; + r = PartialInst.Compiled; + switch (r) { + PartialInst.Compiled => { + var z: PartialInstWithPayload = undefined; + z = PartialInstWithPayload{ .Compiled = 1234 }; + switch (z) { + PartialInstWithPayload.Compiled => |x| { + try expect(x == 1234); + return; + }, + } + }, + } + unreachable; +} + +const PartialInst = union(enum) { + Compiled, +}; + +const PartialInstWithPayload = union(enum) { + Compiled: i32, +}; + +test "access a member of tagged union with conflicting enum tag name" { + const Bar = union(enum) { + A: A, + B: B, + + const A = u8; + const B = void; + }; + + comptime try expect(Bar.A == u8); +} + +test "tagged union initialization with runtime void" { + try expect(testTaggedUnionInit({})); +} + +const TaggedUnionWithAVoid = union(enum) { + A, + B: i32, +}; + +fn testTaggedUnionInit(x: anytype) bool { + const y = TaggedUnionWithAVoid{ .A = x }; + return @as(Tag(TaggedUnionWithAVoid), y) == TaggedUnionWithAVoid.A; +} + +pub const UnionEnumNoPayloads = union(enum) { A, B }; + +test "tagged union with no payloads" { + const a = UnionEnumNoPayloads{ .B = {} }; + switch (a) { + Tag(UnionEnumNoPayloads).A => @panic("wrong"), + Tag(UnionEnumNoPayloads).B => {}, + } +} + +test "union with only 1 field casted to its enum type" { + const Literal = union(enum) { + Number: f64, + Bool: bool, + }; + + const Expr = union(enum) { + Literal: Literal, + }; + + var e = Expr{ .Literal = Literal{ .Bool = true } }; + const ExprTag = Tag(Expr); + comptime try expect(Tag(ExprTag) == u0); + var t = @as(ExprTag, e); + try expect(t == Expr.Literal); +} + +test "union with only 1 field casted to its enum type which has enum value specified" { + const Literal = union(enum) { + Number: f64, + Bool: bool, + }; + + const ExprTag = enum(comptime_int) { + Literal = 33, + }; + + const Expr = union(ExprTag) { + Literal: Literal, + }; + + var e = Expr{ .Literal = Literal{ .Bool = true } }; + comptime try expect(Tag(ExprTag) == comptime_int); + var t = @as(ExprTag, e); + try expect(t == Expr.Literal); + try expect(@enumToInt(t) == 33); + comptime try expect(@enumToInt(t) == 33); +} + +test "@enumToInt works on unions" { + const Bar = union(enum) { + A: bool, + B: u8, + C, + }; + + const a = Bar{ .A = true }; + var b = Bar{ .B = undefined }; + var c = Bar.C; + try expect(@enumToInt(a) == 0); + try expect(@enumToInt(b) == 1); + try expect(@enumToInt(c) == 2); +} + +const Attribute = union(enum) { + A: bool, + B: u8, +}; + +fn setAttribute(attr: Attribute) void { + _ = attr; +} + +fn Setter(attr: Attribute) type { + return struct { + fn set() void { + setAttribute(attr); + } + }; +} + +test "comptime union field value equality" { + const a0 = Setter(Attribute{ .A = false }); + const a1 = Setter(Attribute{ .A = true }); + const a2 = Setter(Attribute{ .A = false }); + + const b0 = Setter(Attribute{ .B = 5 }); + const b1 = Setter(Attribute{ .B = 9 }); + const b2 = Setter(Attribute{ .B = 5 }); + + try expect(a0 == a0); + try expect(a1 == a1); + try expect(a0 == a2); + + try expect(b0 == b0); + try expect(b1 == b1); + try expect(b0 == b2); + + try expect(a0 != b0); + try expect(a0 != a1); + try expect(b0 != b1); +} + +test "return union init with void payload" { + const S = struct { + fn entry() !void { + try expect(func().state == State.one); + } + const Outer = union(enum) { + state: State, + }; + const State = union(enum) { + one: void, + two: u32, + }; + fn func() Outer { + return Outer{ .state = State{ .one = {} } }; + } + }; + try S.entry(); + comptime try S.entry(); +} + +test "@unionInit can modify a union type" { + const UnionInitEnum = union(enum) { + Boolean: bool, + Byte: u8, + }; + + var value: UnionInitEnum = undefined; + + value = @unionInit(UnionInitEnum, "Boolean", true); + try expect(value.Boolean == true); + value.Boolean = false; + try expect(value.Boolean == false); + + value = @unionInit(UnionInitEnum, "Byte", 2); + try expect(value.Byte == 2); + value.Byte = 3; + try expect(value.Byte == 3); +} + +test "@unionInit can modify a pointer value" { + const UnionInitEnum = union(enum) { + Boolean: bool, + Byte: u8, + }; + + var value: UnionInitEnum = undefined; + var value_ptr = &value; + + value_ptr.* = @unionInit(UnionInitEnum, "Boolean", true); + try expect(value.Boolean == true); + + value_ptr.* = @unionInit(UnionInitEnum, "Byte", 2); + try expect(value.Byte == 2); +} + +test "union no tag with struct member" { + const Struct = struct {}; + const Union = union { + s: Struct, + pub fn foo(self: *@This()) void { + _ = self; + } + }; + var u = Union{ .s = Struct{} }; + u.foo(); +} + +fn testComparison() !void { + var x = Payload{ .A = 42 }; + try expect(x == .A); + try expect(x != .B); + try expect(x != .C); + try expect((x == .B) == false); + try expect((x == .C) == false); + try expect((x != .A) == false); +} + +test "comparison between union and enum literal" { + try testComparison(); + comptime try testComparison(); +} + +test "packed union generates correctly aligned LLVM type" { + const U = packed union { + f1: fn () error{TestUnexpectedResult}!void, + f2: u32, + }; + var foo = [_]U{ + U{ .f1 = doTest }, + U{ .f2 = 0 }, + }; + try foo[0].f1(); +} + +test "union with one member defaults to u0 tag type" { + const U0 = union(enum) { + X: u32, + }; + comptime try expect(Tag(Tag(U0)) == u0); +} + +test "union with comptime_int tag" { + const Union = union(enum(comptime_int)) { + X: u32, + Y: u16, + Z: u8, + }; + comptime try expect(Tag(Tag(Union)) == comptime_int); +} + +test "extern union doesn't trigger field check at comptime" { + const U = extern union { + x: u32, + y: u8, + }; + + const x = U{ .x = 0x55AAAA55 }; + comptime try expect(x.y == 0x55); +} + +const Foo1 = union(enum) { + f: struct { + x: usize, + }, +}; +var glbl: Foo1 = undefined; + +test "global union with single field is correctly initialized" { + glbl = Foo1{ + .f = @typeInfo(Foo1).Union.fields[0].field_type{ .x = 123 }, + }; + try expect(glbl.f.x == 123); +} + +pub const FooUnion = union(enum) { + U0: usize, + U1: u8, +}; + +var glbl_array: [2]FooUnion = undefined; + +test "initialize global array of union" { + glbl_array[1] = FooUnion{ .U1 = 2 }; + glbl_array[0] = FooUnion{ .U0 = 1 }; + try expect(glbl_array[0].U0 == 1); + try expect(glbl_array[1].U1 == 2); +} + +test "anonymous union literal syntax" { + const S = struct { + const Number = union { + int: i32, + float: f64, + }; + + fn doTheTest() !void { + var i: Number = .{ .int = 42 }; + var f = makeNumber(); + try expect(i.int == 42); + try expect(f.float == 12.34); + } + + fn makeNumber() Number { + return .{ .float = 12.34 }; + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "update the tag value for zero-sized unions" { + const S = union(enum) { + U0: void, + U1: void, + }; + var x = S{ .U0 = {} }; + try expect(x == .U0); + x = S{ .U1 = {} }; + try expect(x == .U1); +} + +test "function call result coerces from tagged union to the tag" { + const S = struct { + const Arch = union(enum) { + One, + Two: usize, + }; + + const ArchTag = Tag(Arch); + + fn doTheTest() !void { + var x: ArchTag = getArch1(); + try expect(x == .One); + + var y: ArchTag = getArch2(); + try expect(y == .Two); + } + + pub fn getArch1() Arch { + return .One; + } + + pub fn getArch2() Arch { + return .{ .Two = 99 }; + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "0-sized extern union definition" { + const U = extern union { + a: void, + const f = 1; + }; + + try expect(U.f == 1); +} + +test "union initializer generates padding only if needed" { + const U = union(enum) { + A: u24, + }; + + var v = U{ .A = 532 }; + try expect(v.A == 532); +} + +test "runtime tag name with single field" { + const U = union(enum) { + A: i32, + }; + + var v = U{ .A = 42 }; + try expect(std.mem.eql(u8, @tagName(v), "A")); +} + +test "cast from anonymous struct to union" { + const S = struct { + const U = union(enum) { + A: u32, + B: []const u8, + C: void, + }; + fn doTheTest() !void { + var y: u32 = 42; + const t0 = .{ .A = 123 }; + const t1 = .{ .B = "foo" }; + const t2 = .{ .C = {} }; + const t3 = .{ .A = y }; + const x0: U = t0; + var x1: U = t1; + const x2: U = t2; + var x3: U = t3; + try expect(x0.A == 123); + try expect(std.mem.eql(u8, x1.B, "foo")); + try expect(x2 == .C); + try expect(x3.A == y); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "cast from pointer to anonymous struct to pointer to union" { + const S = struct { + const U = union(enum) { + A: u32, + B: []const u8, + C: void, + }; + fn doTheTest() !void { + var y: u32 = 42; + const t0 = &.{ .A = 123 }; + const t1 = &.{ .B = "foo" }; + const t2 = &.{ .C = {} }; + const t3 = &.{ .A = y }; + const x0: *const U = t0; + var x1: *const U = t1; + const x2: *const U = t2; + var x3: *const U = t3; + try expect(x0.A == 123); + try expect(std.mem.eql(u8, x1.B, "foo")); + try expect(x2.* == .C); + try expect(x3.A == y); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "method call on an empty union" { + const S = struct { + const MyUnion = union(MyUnionTag) { + pub const MyUnionTag = enum { X1, X2 }; + X1: [0]u8, + X2: [0]u8, + + pub fn useIt(self: *@This()) bool { + _ = self; + return true; + } + }; + + fn doTheTest() !void { + var u = MyUnion{ .X1 = [0]u8{} }; + try expect(u.useIt()); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "switching on non exhaustive union" { + const S = struct { + const E = enum(u8) { + a, + b, + _, + }; + const U = union(E) { + a: i32, + b: u32, + }; + fn doTheTest() !void { + var a = U{ .a = 2 }; + switch (a) { + .a => |val| try expect(val == 2), + .b => unreachable, + } + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "containers with single-field enums" { + const S = struct { + const A = union(enum) { f1 }; + const B = union(enum) { f1: void }; + const C = struct { a: A }; + const D = struct { a: B }; + + fn doTheTest() !void { + var array1 = [1]A{A{ .f1 = {} }}; + var array2 = [1]B{B{ .f1 = {} }}; + try expect(array1[0] == .f1); + try expect(array2[0] == .f1); + + var struct1 = C{ .a = A{ .f1 = {} } }; + var struct2 = D{ .a = B{ .f1 = {} } }; + try expect(struct1.a == .f1); + try expect(struct2.a == .f1); + } + }; + + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "@unionInit on union w/ tag but no fields" { + const S = struct { + const Type = enum(u8) { no_op = 105 }; + + const Data = union(Type) { + no_op: void, + + pub fn decode(buf: []const u8) Data { + _ = buf; + return @unionInit(Data, "no_op", {}); + } + }; + + comptime { + std.debug.assert(@sizeOf(Data) != 0); + } + + fn doTheTest() !void { + var data: Data = .{ .no_op = .{} }; + _ = data; + var o = Data.decode(&[_]u8{}); + try expectEqual(Type.no_op, o); + } + }; + + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "union enum type gets a separate scope" { + const S = struct { + const U = union(enum) { + a: u8, + const foo = 1; + }; + + fn doTheTest() !void { + try expect(!@hasDecl(Tag(U), "foo")); + } + }; + + try S.doTheTest(); +} +test "anytype union field: issue #9233" { + const Baz = union(enum) { bar: anytype }; + _ = Baz; +} diff --git a/test/behavior/widening.zig b/test/behavior/widening.zig index efcbab9883..9c1694b368 100644 --- a/test/behavior/widening.zig +++ b/test/behavior/widening.zig @@ -17,3 +17,46 @@ test "implicit unsigned integer to signed integer" { var b: i16 = a; try expect(b == 250); } + +test "float widening" { + if (@import("builtin").zig_is_stage2) { + // This test is passing but it depends on compiler-rt symbols, which + // cannot yet be built with stage2 due to + // "TODO implement equality comparison between a union's tag value and an enum literal" + return error.SkipZigTest; + } + var a: f16 = 12.34; + var b: f32 = a; + var c: f64 = b; + var d: f128 = c; + try expect(a == b); + try expect(b == c); + try expect(c == d); +} + +test "float widening f16 to f128" { + if (@import("builtin").zig_is_stage2) { + // This test is passing but it depends on compiler-rt symbols, which + // cannot yet be built with stage2 due to + // "TODO implement equality comparison between a union's tag value and an enum literal" + return error.SkipZigTest; + } + // TODO https://github.com/ziglang/zig/issues/3282 + if (@import("builtin").stage2_arch == .aarch64) return error.SkipZigTest; + if (@import("builtin").stage2_arch == .powerpc64le) return error.SkipZigTest; + + var x: f16 = 12.34; + var y: f128 = x; + try expect(x == y); +} + +test "cast small unsigned to larger signed" { + try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200)); + try expect(castSmallUnsignedToLargerSigned2(9999) == @as(i64, 9999)); +} +fn castSmallUnsignedToLargerSigned1(x: u8) i16 { + return x; +} +fn castSmallUnsignedToLargerSigned2(x: u16) i64 { + return x; +} diff --git a/test/behavior/widening_stage1.zig b/test/behavior/widening_stage1.zig deleted file mode 100644 index 0cec3988cb..0000000000 --- a/test/behavior/widening_stage1.zig +++ /dev/null @@ -1,34 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; -const mem = std.mem; - -test "float widening" { - var a: f16 = 12.34; - var b: f32 = a; - var c: f64 = b; - var d: f128 = c; - try expect(a == b); - try expect(b == c); - try expect(c == d); -} - -test "float widening f16 to f128" { - // TODO https://github.com/ziglang/zig/issues/3282 - if (@import("builtin").stage2_arch == .aarch64) return error.SkipZigTest; - if (@import("builtin").stage2_arch == .powerpc64le) return error.SkipZigTest; - - var x: f16 = 12.34; - var y: f128 = x; - try expect(x == y); -} - -test "cast small unsigned to larger signed" { - try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200)); - try expect(castSmallUnsignedToLargerSigned2(9999) == @as(i64, 9999)); -} -fn castSmallUnsignedToLargerSigned1(x: u8) i16 { - return x; -} -fn castSmallUnsignedToLargerSigned2(x: u16) i64 { - return x; -} -- cgit v1.2.3 From 42aa1ea115eca3dcc704eddf020ce87271a41174 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 24 Sep 2021 17:33:06 -0700 Subject: stage2: implement `@memset` and `@memcpy` builtins --- src/Air.zig | 15 +++++ src/AstGen.zig | 16 +++--- src/Liveness.zig | 5 ++ src/Sema.zig | 126 +++++++++++++++++++++++++++++++++++++++--- src/codegen.zig | 12 ++++ src/codegen/c.zig | 46 ++++++++++++++- src/codegen/llvm.zig | 53 ++++++++++++++++++ src/codegen/llvm/bindings.zig | 19 +++++++ src/link/C/zig.h | 1 + src/print_air.zig | 24 ++++++++ src/print_zir.zig | 30 +++++++++- src/type.zig | 73 +++++++++++++++++++++--- test/behavior/basic.zig | 18 ++++++ test/behavior/misc.zig | 10 ---- 14 files changed, 412 insertions(+), 36 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index c3181fac60..4341271f3a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -321,6 +321,19 @@ pub const Inst = struct { /// Uses the `ty_op` field. int_to_float, + /// Given dest ptr, value, and len, set all elements at dest to value. + /// Result type is always void. + /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the + /// value, `rhs` is the length. + /// The element type may be any type, not just u8. + memset, + /// Given dest ptr, src ptr, and len, copy len elements from src to dest. + /// Result type is always void. + /// Uses the `pl_op` field. Operand is the dest ptr. Payload is `Bin`. `lhs` is the + /// src ptr, `rhs` is the length. + /// The element type may be any type, not just u8. + memcpy, + /// Uses the `ty_pl` field with payload `Cmpxchg`. cmpxchg_weak, /// Uses the `ty_pl` field with payload `Cmpxchg`. @@ -628,6 +641,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .atomic_store_monotonic, .atomic_store_release, .atomic_store_seq_cst, + .memset, + .memcpy, => return Type.initTag(.void), .ptrtoint, diff --git a/src/AstGen.zig b/src/AstGen.zig index 416584bee9..469e77037a 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -2149,8 +2149,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner .field_ptr_type, .field_parent_ptr, .maximum, - .memcpy, - .memset, .minimum, .builtin_async_call, .c_import, @@ -2204,6 +2202,8 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner .set_float_mode, .set_runtime_safety, .closure_capture, + .memcpy, + .memset, => break :b true, } } else switch (maybe_unused_result) { @@ -7576,17 +7576,17 @@ fn builtinCall( }, .memcpy => { const result = try gz.addPlNode(.memcpy, node, Zir.Inst.Memcpy{ - .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]), - .source = try expr(gz, scope, .{ .ty = .manyptr_const_u8_type }, params[1]), - .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]), + .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), + .source = try expr(gz, scope, .{ .coerced_ty = .manyptr_const_u8_type }, params[1]), + .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), }); return rvalue(gz, rl, result, node); }, .memset => { const result = try gz.addPlNode(.memset, node, Zir.Inst.Memset{ - .dest = try expr(gz, scope, .{ .ty = .manyptr_u8_type }, params[0]), - .byte = try expr(gz, scope, .{ .ty = .u8_type }, params[1]), - .byte_count = try expr(gz, scope, .{ .ty = .usize_type }, params[2]), + .dest = try expr(gz, scope, .{ .coerced_ty = .manyptr_u8_type }, params[0]), + .byte = try expr(gz, scope, .{ .coerced_ty = .u8_type }, params[1]), + .byte_count = try expr(gz, scope, .{ .coerced_ty = .usize_type }, params[2]), }); return rvalue(gz, rl, result, node); }, diff --git a/src/Liveness.zig b/src/Liveness.zig index 4da5eaa284..42ab1ab351 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -361,6 +361,11 @@ fn analyzeInst( const extra = a.air.extraData(Air.AtomicRmw, pl_op.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.operand, .none }); }, + .memset, .memcpy => { + const pl_op = inst_datas[inst].pl_op; + const extra = a.air.extraData(Air.Bin, pl_op.payload).data; + return trackOperands(a, new_set, inst, main_tomb, .{ pl_op.operand, extra.lhs, extra.rhs }); + }, .br => { const br = inst_datas[inst].br; return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none }); diff --git a/src/Sema.zig b/src/Sema.zig index 87cde2ca1a..41fabbfacd 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -341,8 +341,6 @@ pub fn analyzeBody( .field_ptr_type => try sema.zirFieldPtrType(block, inst), .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), .maximum => try sema.zirMaximum(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), .minimum => try sema.zirMinimum(block, inst), .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), .@"resume" => try sema.zirResume(block, inst), @@ -526,6 +524,16 @@ pub fn analyzeBody( i += 1; continue; }, + .memcpy => { + try sema.zirMemcpy(block, inst); + i += 1; + continue; + }, + .memset => { + try sema.zirMemset(block, inst); + i += 1; + continue; + }, // Special case instructions to handle comptime control flow. .@"break" => { @@ -8422,16 +8430,119 @@ fn zirMaximum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr return sema.mod.fail(&block.base, src, "TODO: Sema.zirMaximum", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data; const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); + const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const src_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + const dest_ptr = sema.resolveInst(extra.dest); + const dest_ptr_ty = sema.typeOf(dest_ptr); + + if (dest_ptr_ty.zigTypeTag() != .Pointer) { + return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); + } + if (dest_ptr_ty.isConstPtr()) { + return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); + } + + const uncasted_src_ptr = sema.resolveInst(extra.source); + const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); + if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) { + return sema.mod.fail(&block.base, src_src, "expected pointer, found '{}'", .{ + uncasted_src_ptr_ty, + }); + } + const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; + const wanted_src_ptr_ty = try Module.ptrType( + sema.arena, + dest_ptr_ty.elemType2(), + null, + src_ptr_info.@"align", + src_ptr_info.@"addrspace", + 0, + 0, + false, + src_ptr_info.@"allowzero", + src_ptr_info.@"volatile", + .Many, + ); + const src_ptr = try sema.coerce(block, wanted_src_ptr_ty, uncasted_src_ptr, src_src); + const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src); + + const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); + const maybe_src_ptr_val = try sema.resolveDefinedValue(block, src_src, src_ptr); + const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); + + const runtime_src = if (maybe_dest_ptr_val) |dest_ptr_val| rs: { + if (maybe_src_ptr_val) |src_ptr_val| { + if (maybe_len_val) |len_val| { + _ = dest_ptr_val; + _ = src_ptr_val; + _ = len_val; + return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy at comptime", .{}); + } else break :rs len_src; + } else break :rs src_src; + } else dest_src; + + try sema.requireRuntimeBlock(block, runtime_src); + _ = try block.addInst(.{ + .tag = .memcpy, + .data = .{ .pl_op = .{ + .operand = dest_ptr, + .payload = try sema.addExtra(Air.Bin{ + .lhs = src_ptr, + .rhs = len, + }), + } }, + }); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data; const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); + const dest_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const value_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; + const dest_ptr = sema.resolveInst(extra.dest); + const dest_ptr_ty = sema.typeOf(dest_ptr); + if (dest_ptr_ty.zigTypeTag() != .Pointer) { + return sema.mod.fail(&block.base, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); + } + if (dest_ptr_ty.isConstPtr()) { + return sema.mod.fail(&block.base, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); + } + const elem_ty = dest_ptr_ty.elemType2(); + const value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.byte), value_src); + const len = try sema.coerce(block, Type.initTag(.usize), sema.resolveInst(extra.byte_count), len_src); + + const maybe_dest_ptr_val = try sema.resolveDefinedValue(block, dest_src, dest_ptr); + const maybe_len_val = try sema.resolveDefinedValue(block, len_src, len); + + const runtime_src = if (maybe_dest_ptr_val) |ptr_val| rs: { + if (maybe_len_val) |len_val| { + if (try sema.resolveMaybeUndefVal(block, value_src, value)) |val| { + _ = ptr_val; + _ = len_val; + _ = val; + return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset at comptime", .{}); + } else break :rs value_src; + } else break :rs len_src; + } else dest_src; + + try sema.requireRuntimeBlock(block, runtime_src); + _ = try block.addInst(.{ + .tag = .memset, + .data = .{ .pl_op = .{ + .operand = dest_ptr, + .payload = try sema.addExtra(Air.Bin{ + .lhs = value, + .rhs = len, + }), + } }, + }); } fn zirMinimum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -10090,7 +10201,8 @@ fn coerceArrayPtrToMany( // The comptime Value representation is compatible with both types. return sema.addConstant(dest_type, val); } - return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); + try sema.requireRuntimeBlock(block, inst_src); + return sema.bitcast(block, dest_type, inst, inst_src); } fn analyzeDeclVal( diff --git a/src/codegen.zig b/src/codegen.zig index f812cbc5d4..102f8d4985 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -887,6 +887,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .cmpxchg_weak => try self.airCmpxchg(inst), .atomic_rmw => try self.airAtomicRmw(inst), .atomic_load => try self.airAtomicLoad(inst), + .memcpy => try self.airMemcpy(inst), + .memset => try self.airMemset(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -4883,6 +4885,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("TODO implement airAtomicStore for {}", .{self.target.cpu.arch}); } + fn airMemset(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMemset for {}", .{self.target.cpu.arch}); + } + + fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airMemcpy for {}", .{self.target.cpu.arch}); + } + fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a82f0e57f7..16b13db292 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -953,6 +953,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .cmpxchg_strong => try airCmpxchg(f, inst, "strong"), .atomic_rmw => try airAtomicRmw(f, inst), .atomic_load => try airAtomicLoad(f, inst), + .memset => try airMemset(f, inst), + .memcpy => try airMemcpy(f, inst), .int_to_float, .float_to_int, @@ -2005,8 +2007,12 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { const atomic_load = f.air.instructions.items(.data)[inst].atomic_load; - const inst_ty = f.air.typeOfIndex(inst); const ptr = try f.resolveInst(atomic_load.ptr); + const ptr_ty = f.air.typeOf(atomic_load.ptr); + if (!ptr_ty.isVolatilePtr() and f.liveness.isUnused(inst)) + return CValue.none; + + const inst_ty = f.air.typeOfIndex(inst); const local = try f.allocLocal(inst_ty, .Const); const writer = f.object.writer(); @@ -2036,6 +2042,44 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa return local; } +fn airMemset(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const extra = f.air.extraData(Air.Bin, pl_op.payload).data; + const dest_ptr = try f.resolveInst(pl_op.operand); + const value = try f.resolveInst(extra.lhs); + const len = try f.resolveInst(extra.rhs); + const writer = f.object.writer(); + + try writer.writeAll("memset("); + try f.writeCValue(writer, dest_ptr); + try writer.writeAll(", "); + try f.writeCValue(writer, value); + try writer.writeAll(", "); + try f.writeCValue(writer, len); + try writer.writeAll(");\n"); + + return CValue.none; +} + +fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { + const pl_op = f.air.instructions.items(.data)[inst].pl_op; + const extra = f.air.extraData(Air.Bin, pl_op.payload).data; + const dest_ptr = try f.resolveInst(pl_op.operand); + const src_ptr = try f.resolveInst(extra.lhs); + const len = try f.resolveInst(extra.rhs); + const writer = f.object.writer(); + + try writer.writeAll("memcpy("); + try f.writeCValue(writer, dest_ptr); + try writer.writeAll(", "); + try f.writeCValue(writer, src_ptr); + try writer.writeAll(", "); + try f.writeCValue(writer, len); + try writer.writeAll(");\n"); + + return CValue.none; +} + fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { return switch (order) { .Unordered => "memory_order_relaxed", diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b2b87b97d8..b15834c963 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1279,6 +1279,8 @@ pub const FuncGen = struct { .fence => try self.airFence(inst), .atomic_rmw => try self.airAtomicRmw(inst), .atomic_load => try self.airAtomicLoad(inst), + .memset => try self.airMemset(inst), + .memcpy => try self.airMemcpy(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -2426,6 +2428,8 @@ pub const FuncGen = struct { const atomic_load = self.air.instructions.items(.data)[inst].atomic_load; const ptr = try self.resolveInst(atomic_load.ptr); const ptr_ty = self.air.typeOf(atomic_load.ptr); + if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) + return null; const ordering = toLlvmAtomicOrdering(atomic_load.order); const operand_ty = ptr_ty.elemType(); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); @@ -2468,6 +2472,55 @@ pub const FuncGen = struct { return null; } + fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Bin, pl_op.payload).data; + const dest_ptr = try self.resolveInst(pl_op.operand); + const ptr_ty = self.air.typeOf(pl_op.operand); + const value = try self.resolveInst(extra.lhs); + const val_is_undef = if (self.air.value(extra.lhs)) |val| val.isUndef() else false; + const len = try self.resolveInst(extra.rhs); + const u8_llvm_ty = self.context.intType(8); + const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); + const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); + const fill_char = if (val_is_undef) u8_llvm_ty.constInt(0xaa, .False) else value; + const target = self.dg.module.getTarget(); + const dest_ptr_align = ptr_ty.ptrAlignment(target); + const memset = self.builder.buildMemSet(dest_ptr_u8, fill_char, len, dest_ptr_align); + memset.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr())); + + if (val_is_undef and self.dg.module.comp.bin_file.options.valgrind) { + // TODO generate valgrind client request to mark byte range as undefined + // see gen_valgrind_undef() in codegen.cpp + } + return null; + } + + fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Bin, pl_op.payload).data; + const dest_ptr = try self.resolveInst(pl_op.operand); + const dest_ptr_ty = self.air.typeOf(pl_op.operand); + const src_ptr = try self.resolveInst(extra.lhs); + const src_ptr_ty = self.air.typeOf(extra.lhs); + const len = try self.resolveInst(extra.rhs); + const u8_llvm_ty = self.context.intType(8); + const ptr_u8_llvm_ty = u8_llvm_ty.pointerType(0); + const dest_ptr_u8 = self.builder.buildBitCast(dest_ptr, ptr_u8_llvm_ty, ""); + const src_ptr_u8 = self.builder.buildBitCast(src_ptr, ptr_u8_llvm_ty, ""); + const is_volatile = src_ptr_ty.isVolatilePtr() or dest_ptr_ty.isVolatilePtr(); + const target = self.dg.module.getTarget(); + const memcpy = self.builder.buildMemCpy( + dest_ptr_u8, + dest_ptr_ty.ptrAlignment(target), + src_ptr_u8, + src_ptr_ty.ptrAlignment(target), + len, + ); + memcpy.setVolatile(llvm.Bool.fromBool(is_volatile)); + return null; + } + fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index c53ac08fdd..9d32682260 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -632,6 +632,25 @@ pub const Builder = opaque { DestTy: *const Type, Name: [*:0]const u8, ) *const Value; + + pub const buildMemSet = LLVMBuildMemSet; + extern fn LLVMBuildMemSet( + B: *const Builder, + Ptr: *const Value, + Val: *const Value, + Len: *const Value, + Align: c_uint, + ) *const Value; + + pub const buildMemCpy = LLVMBuildMemCpy; + extern fn LLVMBuildMemCpy( + B: *const Builder, + Dst: *const Value, + DstAlign: c_uint, + Src: *const Value, + SrcAlign: c_uint, + Size: *const Value, + ) *const Value; }; pub const IntPredicate = enum(c_uint) { diff --git a/src/link/C/zig.h b/src/link/C/zig.h index e19a138c1b..b34068d1f2 100644 --- a/src/link/C/zig.h +++ b/src/link/C/zig.h @@ -126,6 +126,7 @@ #define int128_t __int128 #define uint128_t unsigned __int128 ZIG_EXTERN_C void *memcpy (void *ZIG_RESTRICT, const void *ZIG_RESTRICT, size_t); +ZIG_EXTERN_C void *memset (void *, int, size_t); static inline uint8_t zig_addw_u8(uint8_t lhs, uint8_t rhs, uint8_t max) { uint8_t thresh = max - rhs; diff --git a/src/print_air.zig b/src/print_air.zig index a9ad993eb0..fa384baae0 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -202,6 +202,8 @@ const Writer = struct { .atomic_store_release => try w.writeAtomicStore(s, inst, .Release), .atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst), .atomic_rmw => try w.writeAtomicRmw(s, inst), + .memcpy => try w.writeMemcpy(s, inst), + .memset => try w.writeMemset(s, inst), } } @@ -322,6 +324,28 @@ const Writer = struct { try s.print(", {s}, {s}", .{ @tagName(extra.op()), @tagName(extra.ordering()) }); } + fn writeMemset(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Bin, pl_op.payload).data; + + try w.writeOperand(s, inst, 0, pl_op.operand); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, extra.lhs); + try s.writeAll(", "); + try w.writeOperand(s, inst, 2, extra.rhs); + } + + fn writeMemcpy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Bin, pl_op.payload).data; + + try w.writeOperand(s, inst, 0, pl_op.operand); + try s.writeAll(", "); + try w.writeOperand(s, inst, 1, extra.lhs); + try s.writeAll(", "); + try w.writeOperand(s, inst, 2, extra.rhs); + } + fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const val = w.air.values[ty_pl.payload]; diff --git a/src/print_zir.zig b/src/print_zir.zig index 9350fd0de3..6ae218ed22 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -210,8 +210,6 @@ const Writer = struct { .mul_add, .builtin_call, .field_parent_ptr, - .memcpy, - .memset, .builtin_async_call, => try self.writePlNode(stream, inst), @@ -222,6 +220,8 @@ const Writer = struct { .cmpxchg_strong, .cmpxchg_weak => try self.writeCmpxchg(stream, inst), .atomic_store => try self.writeAtomicStore(stream, inst), .atomic_rmw => try self.writeAtomicRmw(stream, inst), + .memcpy => try self.writeMemcpy(stream, inst), + .memset => try self.writeMemset(stream, inst), .struct_init_anon, .struct_init_anon_ref, @@ -692,6 +692,32 @@ const Writer = struct { try self.writeSrc(stream, inst_data.src()); } + fn writeMemcpy(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { + const inst_data = self.code.instructions.items(.data)[inst].pl_node; + const extra = self.code.extraData(Zir.Inst.Memcpy, inst_data.payload_index).data; + + try self.writeInstRef(stream, extra.dest); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.source); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.byte_count); + try stream.writeAll(") "); + try self.writeSrc(stream, inst_data.src()); + } + + fn writeMemset(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { + const inst_data = self.code.instructions.items(.data)[inst].pl_node; + const extra = self.code.extraData(Zir.Inst.Memset, inst_data.payload_index).data; + + try self.writeInstRef(stream, extra.dest); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.byte); + try stream.writeAll(", "); + try self.writeInstRef(stream, extra.byte_count); + try stream.writeAll(") "); + try self.writeSrc(stream, inst_data.src()); + } + fn writeStructInitAnon(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); diff --git a/src/type.zig b/src/type.zig index a2da252f61..0381111345 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2391,12 +2391,11 @@ pub const Type = extern union { }; } - /// Asserts the type is a pointer or array type. - pub fn elemType(self: Type) Type { - return switch (self.tag()) { - .vector => self.castTag(.vector).?.data.elem_type, - .array => self.castTag(.array).?.data.elem_type, - .array_sentinel => self.castTag(.array_sentinel).?.data.elem_type, + pub fn childType(ty: Type) Type { + return switch (ty.tag()) { + .vector => ty.castTag(.vector).?.data.elem_type, + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, .single_const_pointer, .single_mut_pointer, .many_const_pointer, @@ -2405,7 +2404,48 @@ pub const Type = extern union { .c_mut_pointer, .const_slice, .mut_slice, - => self.castPointer().?.data, + => ty.castPointer().?.data, + + .array_u8, + .array_u8_sentinel_0, + .const_slice_u8, + .manyptr_u8, + .manyptr_const_u8, + => Type.initTag(.u8), + + .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), + .pointer => ty.castTag(.pointer).?.data.pointee_type, + + else => unreachable, + }; + } + + /// Asserts the type is a pointer or array type. + /// TODO this is deprecated in favor of `childType`. + pub const elemType = childType; + + /// For *[N]T, returns T. + /// For ?*T, returns T. + /// For ?*[N]T, returns T. + /// For ?[*]T, returns T. + /// For *T, returns T. + /// For [*]T, returns T. + pub fn elemType2(ty: Type) Type { + return switch (ty.tag()) { + .vector => ty.castTag(.vector).?.data.elem_type, + .array => ty.castTag(.array).?.data.elem_type, + .array_sentinel => ty.castTag(.array_sentinel).?.data.elem_type, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + => ty.castPointer().?.data, + + .single_const_pointer, + .single_mut_pointer, + => ty.castPointer().?.data.shallowElemType(), .array_u8, .array_u8_sentinel_0, @@ -2415,12 +2455,29 @@ pub const Type = extern union { => Type.initTag(.u8), .single_const_pointer_to_comptime_int => Type.initTag(.comptime_int), - .pointer => self.castTag(.pointer).?.data.pointee_type, + .pointer => { + const info = ty.castTag(.pointer).?.data; + const child_ty = info.pointee_type; + if (info.size == .One) { + return child_ty.shallowElemType(); + } else { + return child_ty; + } + }, + + // TODO handle optionals else => unreachable, }; } + fn shallowElemType(child_ty: Type) Type { + return switch (child_ty.zigTypeTag()) { + .Array, .Vector => child_ty.childType(), + else => child_ty, + }; + } + /// Asserts that the type is an optional. /// Resulting `Type` will have inner memory referencing `buf`. pub fn optionalChild(self: Type, buf: *Payload.ElemType) Type { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 517162c8d4..f6876e29ad 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -170,3 +170,21 @@ test "string concatenation" { test "array mult operator" { try expect(mem.eql(u8, "ab" ** 5, "ababababab")); } + +test "memcpy and memset intrinsics" { + try testMemcpyMemset(); + // TODO add comptime test coverage + //comptime try testMemcpyMemset(); +} + +fn testMemcpyMemset() !void { + var foo: [20]u8 = undefined; + var bar: [20]u8 = undefined; + + @memset(&foo, 'A', foo.len); + @memcpy(&bar, &foo, bar.len); + + try expect(bar[0] == 'A'); + try expect(bar[11] == 'A'); + try expect(bar[19] == 'A'); +} diff --git a/test/behavior/misc.zig b/test/behavior/misc.zig index 5394e6fd14..9b3bf48366 100644 --- a/test/behavior/misc.zig +++ b/test/behavior/misc.zig @@ -5,16 +5,6 @@ const expectEqualStrings = std.testing.expectEqualStrings; const mem = std.mem; const builtin = @import("builtin"); -test "memcpy and memset intrinsics" { - var foo: [20]u8 = undefined; - var bar: [20]u8 = undefined; - - @memset(&foo, 'A', foo.len); - @memcpy(&bar, &foo, bar.len); - - if (bar[11] != 'A') unreachable; -} - test "slicing" { var array: [20]i32 = undefined; -- cgit v1.2.3 From c0aa4a1a42b3e0d312bd274799be67d60a1c0238 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 Sep 2021 19:48:42 -0700 Subject: stage2: implement basic unions * AIR instructions struct_field_ptr and related functions now are also emitted by the frontend for unions. Backends must inspect the type of the pointer operand to lower the instructions correctly. - These will be renamed to `agg_field_ptr` (short for "aggregate") in the future. * Introduce the new `set_union_tag` AIR instruction. * Introduce `Module.EnumNumbered` and associated `Type` methods. This is for enums which have no decls, but do have the possibility of overriding the integer tag type and tag values. * Sema: Implement support for union tag types in both the auto-generated and explicitly-provided cases, as well as explicitly provided enum tag values in union declarations. * LLVM backend: implement lowering union types, union field pointer instructions, and the new `set_union_tag` instruction. --- src/Air.zig | 14 +- src/Liveness.zig | 1 + src/Module.zig | 107 +++++++++++++- src/Sema.zig | 320 +++++++++++++++++++++++++++++++++-------- src/codegen.zig | 9 ++ src/codegen/c.zig | 16 +++ src/codegen/llvm.zig | 80 ++++++++++- src/print_air.zig | 1 + src/type.zig | 170 +++++++++++----------- test/behavior/union.zig | 12 ++ test/behavior/union_stage1.zig | 7 - 11 files changed, 576 insertions(+), 161 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index 4341271f3a..40070dccfb 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -270,19 +270,26 @@ pub const Inst = struct { /// wrap from E to E!T /// Uses the `ty_op` field. wrap_errunion_err, - /// Given a pointer to a struct and a field index, returns a pointer to the field. + /// Given a pointer to a struct or union and a field index, returns a pointer to the field. /// Uses the `ty_pl` field, payload is `StructField`. + /// TODO rename to `agg_field_ptr`. struct_field_ptr, - /// Given a pointer to a struct, returns a pointer to the field. + /// Given a pointer to a struct or union, returns a pointer to the field. /// The field index is the number at the end of the name. /// Uses `ty_op` field. + /// TODO rename to `agg_field_ptr_index_X` struct_field_ptr_index_0, struct_field_ptr_index_1, struct_field_ptr_index_2, struct_field_ptr_index_3, - /// Given a byval struct and a field index, returns the field byval. + /// Given a byval struct or union and a field index, returns the field byval. /// Uses the `ty_pl` field, payload is `StructField`. + /// TODO rename to `agg_field_val` struct_field_val, + /// Given a pointer to a tagged union, set its tag to the provided value. + /// Result type is always void. + /// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value. + set_union_tag, /// Given a slice value, return the length. /// Result type is always usize. /// Uses the `ty_op` field. @@ -643,6 +650,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .atomic_store_seq_cst, .memset, .memcpy, + .set_union_tag, => return Type.initTag(.void), .ptrtoint, diff --git a/src/Liveness.zig b/src/Liveness.zig index 42ab1ab351..9a7126d135 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -256,6 +256,7 @@ fn analyzeInst( .atomic_store_monotonic, .atomic_store_release, .atomic_store_seq_cst, + .set_union_tag, => { const o = inst_datas[inst].bin_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); diff --git a/src/Module.zig b/src/Module.zig index dbece09255..83bbbb6366 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -859,6 +859,36 @@ pub const EnumSimple = struct { } }; +/// Represents the data that an enum declaration provides, when there are no +/// declarations. However an integer tag type is provided, and the enum tag values +/// are explicitly provided. +pub const EnumNumbered = struct { + /// The Decl that corresponds to the enum itself. + owner_decl: *Decl, + /// An integer type which is used for the numerical value of the enum. + /// Whether zig chooses this type or the user specifies it, it is stored here. + tag_ty: Type, + /// Set of field names in declaration order. + fields: NameMap, + /// Maps integer tag value to field index. + /// Entries are in declaration order, same as `fields`. + /// If this hash map is empty, it means the enum tags are auto-numbered. + values: ValueMap, + /// Offset from `owner_decl`, points to the enum decl AST node. + node_offset: i32, + + pub const NameMap = EnumFull.NameMap; + pub const ValueMap = EnumFull.ValueMap; + + pub fn srcLoc(self: EnumNumbered) SrcLoc { + return .{ + .file_scope = self.owner_decl.getFileScope(), + .parent_decl_node = self.owner_decl.src_node, + .lazy = .{ .node_offset = self.node_offset }, + }; + } +}; + /// Represents the data that an enum declaration provides, when there is /// at least one tag value explicitly specified, or at least one declaration. pub const EnumFull = struct { @@ -868,16 +898,17 @@ pub const EnumFull = struct { /// Whether zig chooses this type or the user specifies it, it is stored here. tag_ty: Type, /// Set of field names in declaration order. - fields: std.StringArrayHashMapUnmanaged(void), + fields: NameMap, /// Maps integer tag value to field index. /// Entries are in declaration order, same as `fields`. /// If this hash map is empty, it means the enum tags are auto-numbered. values: ValueMap, - /// Represents the declarations inside this struct. + /// Represents the declarations inside this enum. namespace: Scope.Namespace, /// Offset from `owner_decl`, points to the enum decl AST node. node_offset: i32, + pub const NameMap = std.StringArrayHashMapUnmanaged(void); pub const ValueMap = std.ArrayHashMapUnmanaged(Value, void, Value.ArrayHashContext, false); pub fn srcLoc(self: EnumFull) SrcLoc { @@ -933,6 +964,44 @@ pub const Union = struct { .lazy = .{ .node_offset = self.node_offset }, }; } + + pub fn haveFieldTypes(u: Union) bool { + return switch (u.status) { + .none, + .field_types_wip, + => false, + .have_field_types, + .layout_wip, + .have_layout, + => true, + }; + } + + pub fn onlyTagHasCodegenBits(u: Union) bool { + assert(u.haveFieldTypes()); + for (u.fields.values()) |field| { + if (field.ty.hasCodeGenBits()) return false; + } + return true; + } + + pub fn mostAlignedField(u: Union, target: Target) u32 { + assert(u.haveFieldTypes()); + var most_alignment: u64 = 0; + var most_index: usize = undefined; + for (u.fields.values()) |field, i| { + if (!field.ty.hasCodeGenBits()) continue; + const field_align = if (field.abi_align.tag() == .abi_align_default) + field.ty.abiAlignment(target) + else + field.abi_align.toUnsignedInt(); + if (field_align > most_alignment) { + most_alignment = field_align; + most_index = i; + } + } + return @intCast(u32, most_index); + } }; /// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator. @@ -1543,6 +1612,40 @@ pub const Scope = struct { }); } + pub fn addStructFieldPtr( + block: *Block, + struct_ptr: Air.Inst.Ref, + field_index: u32, + ptr_field_ty: Type, + ) !Air.Inst.Ref { + const ty = try block.sema.addType(ptr_field_ty); + const tag: Air.Inst.Tag = switch (field_index) { + 0 => .struct_field_ptr_index_0, + 1 => .struct_field_ptr_index_1, + 2 => .struct_field_ptr_index_2, + 3 => .struct_field_ptr_index_3, + else => { + return block.addInst(.{ + .tag = .struct_field_ptr, + .data = .{ .ty_pl = .{ + .ty = ty, + .payload = try block.sema.addExtra(Air.StructField{ + .struct_operand = struct_ptr, + .field_index = @intCast(u32, field_index), + }), + } }, + }); + }, + }; + return block.addInst(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = ty, + .operand = struct_ptr, + } }, + }); + } + pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { return Air.indexToRef(try block.addInstAsIndex(inst)); } diff --git a/src/Sema.zig b/src/Sema.zig index 533252d682..f076389797 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1625,7 +1625,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr if (block.is_comptime) { return sema.analyzeComptimeAlloc(block, var_type); } - try sema.validateVarType(block, ty_src, var_type); + try sema.validateVarType(block, ty_src, var_type, false); const ptr_type = try Type.ptr(sema.arena, .{ .pointee_type = var_type, .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local), @@ -1711,7 +1711,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list, .none); if (var_is_mut) { - try sema.validateVarType(block, ty_src, final_elem_ty); + try sema.validateVarType(block, ty_src, final_elem_ty, false); } // Change it to a normal alloc. const final_ptr_ty = try Type.ptr(sema.arena, .{ @@ -1730,19 +1730,82 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const tracy = trace(@src()); defer tracy.end(); - const gpa = sema.gpa; - const mod = sema.mod; const validate_inst = sema.code.instructions.items(.data)[inst].pl_node; - const struct_init_src = validate_inst.src(); + const init_src = validate_inst.src(); const validate_extra = sema.code.extraData(Zir.Inst.Block, validate_inst.payload_index); const instrs = sema.code.extra[validate_extra.end..][0..validate_extra.data.body_len]; + const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; + const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); + const agg_ty = sema.typeOf(object_ptr).elemType(); + switch (agg_ty.zigTypeTag()) { + .Struct => return sema.validateStructInitPtr( + block, + agg_ty.castTag(.@"struct").?.data, + init_src, + instrs, + ), + .Union => return sema.validateUnionInitPtr( + block, + agg_ty.cast(Type.Payload.Union).?.data, + init_src, + instrs, + object_ptr, + ), + else => unreachable, + } +} - const struct_obj: *Module.Struct = s: { - const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; - const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = sema.resolveInst(field_ptr_extra.lhs); - break :s sema.typeOf(object_ptr).elemType().castTag(.@"struct").?.data; - }; +fn validateUnionInitPtr( + sema: *Sema, + block: *Scope.Block, + union_obj: *Module.Union, + init_src: LazySrcLoc, + instrs: []const Zir.Inst.Index, + union_ptr: Air.Inst.Ref, +) CompileError!void { + const mod = sema.mod; + + if (instrs.len != 1) { + // TODO add note for other field + // TODO add note for union declared here + return mod.fail(&block.base, init_src, "only one union field can be active at once", .{}); + } + + const field_ptr = instrs[0]; + const field_ptr_data = sema.code.instructions.items(.data)[field_ptr].pl_node; + const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_ptr_data.src_node }; + const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; + const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start); + const field_index_big = union_obj.fields.getIndex(field_name) orelse + return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name); + const field_index = @intCast(u32, field_index_big); + + // TODO here we need to go back and see if we need to convert the union + // to a comptime-known value. This will involve editing the AIR code we have + // generated so far - in particular deleting some runtime pointer bitcast + // instructions which are not actually needed if the initialization expression + // ends up being comptime-known. + + // Otherwise, we set the new union tag now. + const new_tag = try sema.addConstant( + union_obj.tag_ty, + try Value.Tag.enum_field_index.create(sema.arena, field_index), + ); + + try sema.requireRuntimeBlock(block, init_src); + _ = try block.addBinOp(.set_union_tag, union_ptr, new_tag); +} + +fn validateStructInitPtr( + sema: *Sema, + block: *Scope.Block, + struct_obj: *Module.Struct, + init_src: LazySrcLoc, + instrs: []const Zir.Inst.Index, +) CompileError!void { + const gpa = sema.gpa; + const mod = sema.mod; // Maps field index to field_ptr index of where it was already initialized. const found_fields = try gpa.alloc(Zir.Inst.Index, struct_obj.fields.count()); @@ -1781,9 +1844,9 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const template = "missing struct field: {s}"; const args = .{field_name}; if (root_msg) |msg| { - try mod.errNote(&block.base, struct_init_src, msg, template, args); + try mod.errNote(&block.base, init_src, msg, template, args); } else { - root_msg = try mod.errMsg(&block.base, struct_init_src, template, args); + root_msg = try mod.errMsg(&block.base, init_src, template, args); } } if (root_msg) |msg| { @@ -8037,7 +8100,7 @@ fn checkAtomicOperandType( const max_atomic_bits = target_util.largestAtomicBits(target); const int_ty = switch (ty.zigTypeTag()) { .Int => ty, - .Enum => ty.enumTagType(&buffer), + .Enum => ty.intTagType(&buffer), .Float => { const bit_count = ty.floatBits(target); if (bit_count > max_atomic_bits) { @@ -8621,11 +8684,7 @@ fn zirVarExtended( return sema.failWithNeededComptime(block, init_src); } else Value.initTag(.unreachable_value); - if (!var_ty.isValidVarType(small.is_extern)) { - return sema.mod.fail(&block.base, mut_src, "variable of type '{}' must be const", .{ - var_ty, - }); - } + try sema.validateVarType(block, mut_src, var_ty, small.is_extern); if (lib_name != null) { // Look at the sema code for functions which has this logic, it just needs to @@ -8810,9 +8869,54 @@ fn requireIntegerType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Typ } } -fn validateVarType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) !void { - if (!ty.isValidVarType(false)) { - return sema.mod.fail(&block.base, src, "variable of type '{}' must be const or comptime", .{ty}); +/// Emit a compile error if type cannot be used for a runtime variable. +fn validateVarType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + var_ty: Type, + is_extern: bool, +) CompileError!void { + var ty = var_ty; + const ok: bool = while (true) switch (ty.zigTypeTag()) { + .Bool, + .Int, + .Float, + .ErrorSet, + .Enum, + .Frame, + .AnyFrame, + => break true, + + .BoundFn, + .ComptimeFloat, + .ComptimeInt, + .EnumLiteral, + .NoReturn, + .Type, + .Void, + .Undefined, + .Null, + => break false, + + .Opaque => break is_extern, + + .Optional => { + var buf: Type.Payload.ElemType = undefined; + const child_ty = ty.optionalChild(&buf); + return validateVarType(sema, block, src, child_ty, is_extern); + }, + .Pointer, .Array, .Vector => ty = ty.elemType(), + .ErrorUnion => ty = ty.errorUnionPayload(), + + .Fn => @panic("TODO fn validateVarType"), + .Struct, .Union => { + const resolved_ty = try sema.resolveTypeFields(block, src, ty); + break !resolved_ty.requiresComptime(); + }, + } else unreachable; // TODO should not need else unreachable + if (!ok) { + return sema.mod.fail(&block.base, src, "variable of type '{}' must be const or comptime", .{var_ty}); } } @@ -9393,8 +9497,9 @@ fn structFieldPtr( const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; - const field_index = struct_obj.fields.getIndex(field_name) orelse + const field_index_big = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); + const field_index = @intCast(u32, field_index_big); const field = struct_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ .pointee_type = field.ty, @@ -9413,31 +9518,7 @@ fn structFieldPtr( } try sema.requireRuntimeBlock(block, src); - const tag: Air.Inst.Tag = switch (field_index) { - 0 => .struct_field_ptr_index_0, - 1 => .struct_field_ptr_index_1, - 2 => .struct_field_ptr_index_2, - 3 => .struct_field_ptr_index_3, - else => { - return block.addInst(.{ - .tag = .struct_field_ptr, - .data = .{ .ty_pl = .{ - .ty = try sema.addType(ptr_field_ty), - .payload = try sema.addExtra(Air.StructField{ - .struct_operand = struct_ptr, - .field_index = @intCast(u32, field_index), - }), - } }, - }); - }, - }; - return block.addInst(.{ - .tag = tag, - .data = .{ .ty_op = .{ - .ty = try sema.addType(ptr_field_ty), - .operand = struct_ptr, - } }, - }); + return block.addStructFieldPtr(struct_ptr, field_index, ptr_field_ty); } fn structFieldVal( @@ -9487,7 +9568,6 @@ fn unionFieldPtr( field_name_src: LazySrcLoc, unresolved_union_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -9495,8 +9575,9 @@ fn unionFieldPtr( const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; - const field_index = union_obj.fields.getIndex(field_name) orelse + const field_index_big = union_obj.fields.getIndex(field_name) orelse return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); + const field_index = @intCast(u32, field_index_big); const field = union_obj.fields.values()[field_index]; const ptr_field_ty = try Type.ptr(arena, .{ @@ -9517,7 +9598,7 @@ fn unionFieldPtr( } try sema.requireRuntimeBlock(block, src); - return mod.fail(&block.base, src, "TODO implement runtime union field access", .{}); + return block.addStructFieldPtr(union_ptr, field_index, ptr_field_ty); } fn unionFieldVal( @@ -11160,6 +11241,28 @@ fn analyzeUnionFields( if (body.len != 0) { _ = try sema.analyzeBody(block, body); } + var int_tag_ty: Type = undefined; + var enum_field_names: ?*Module.EnumNumbered.NameMap = null; + var enum_value_map: ?*Module.EnumNumbered.ValueMap = null; + if (tag_type_ref != .none) { + const provided_ty = try sema.resolveType(block, src, tag_type_ref); + if (small.auto_enum_tag) { + // The provided type is an integer type and we must construct the enum tag type here. + int_tag_ty = provided_ty; + union_obj.tag_ty = try sema.generateUnionTagTypeNumbered(block, fields_len, provided_ty); + enum_field_names = &union_obj.tag_ty.castTag(.enum_numbered).?.data.fields; + enum_value_map = &union_obj.tag_ty.castTag(.enum_numbered).?.data.values; + } else { + // The provided type is the enum tag type. + union_obj.tag_ty = provided_ty; + } + } else { + // If auto_enum_tag is false, this is an untagged union. However, for semantic analysis + // purposes, we still auto-generate an enum tag type the same way. That the union is + // untagged is represented by the Type tag (union vs union_tagged). + union_obj.tag_ty = try sema.generateUnionTagTypeSimple(block, fields_len); + enum_field_names = &union_obj.tag_ty.castTag(.enum_simple).?.data.fields; + } const bits_per_field = 4; const fields_per_u32 = 32 / bits_per_field; @@ -11198,12 +11301,25 @@ fn analyzeUnionFields( break :blk align_ref; } else .none; - if (has_tag) { + const tag_ref: Zir.Inst.Ref = if (has_tag) blk: { + const tag_ref = @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); extra_index += 1; + break :blk tag_ref; + } else .none; + + if (enum_value_map) |map| { + const tag_src = src; // TODO better source location + const coerced = try sema.coerce(block, int_tag_ty, tag_ref, tag_src); + const val = try sema.resolveConstValue(block, tag_src, coerced); + map.putAssumeCapacityContext(val, {}, .{ .ty = int_tag_ty }); } // This string needs to outlive the ZIR code. const field_name = try decl_arena.allocator.dupe(u8, field_name_zir); + if (enum_field_names) |set| { + set.putAssumeCapacity(field_name, {}); + } + const field_ty: Type = if (field_type_ref == .none) Type.initTag(.void) else @@ -11225,11 +11341,84 @@ fn analyzeUnionFields( // But only resolve the source location if we need to emit a compile error. const abi_align_val = (try sema.resolveInstConst(block, src, align_ref)).val; gop.value_ptr.abi_align = try abi_align_val.copy(&decl_arena.allocator); + } else { + gop.value_ptr.abi_align = Value.initTag(.abi_align_default); } } +} + +fn generateUnionTagTypeNumbered( + sema: *Sema, + block: *Scope.Block, + fields_len: u32, + int_ty: Type, +) !Type { + const mod = sema.mod; + + var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + errdefer new_decl_arena.deinit(); - // TODO resolve the union tag_type_ref - _ = tag_type_ref; + const enum_obj = try new_decl_arena.allocator.create(Module.EnumNumbered); + const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumNumbered); + enum_ty_payload.* = .{ + .base = .{ .tag = .enum_numbered }, + .data = enum_obj, + }; + const enum_ty = Type.initPayload(&enum_ty_payload.base); + const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); + // TODO better type name + const new_decl = try mod.createAnonymousDecl(&block.base, .{ + .ty = Type.initTag(.type), + .val = enum_val, + }); + new_decl.owns_tv = true; + errdefer sema.mod.deleteAnonDecl(&block.base, new_decl); + + enum_obj.* = .{ + .owner_decl = new_decl, + .tag_ty = int_ty, + .fields = .{}, + .values = .{}, + .node_offset = 0, + }; + // Here we pre-allocate the maps using the decl arena. + try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); + try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{ .ty = int_ty }); + try new_decl.finalizeNewArena(&new_decl_arena); + return enum_ty; +} + +fn generateUnionTagTypeSimple(sema: *Sema, block: *Scope.Block, fields_len: u32) !Type { + const mod = sema.mod; + + var new_decl_arena = std.heap.ArenaAllocator.init(sema.gpa); + errdefer new_decl_arena.deinit(); + + const enum_obj = try new_decl_arena.allocator.create(Module.EnumSimple); + const enum_ty_payload = try new_decl_arena.allocator.create(Type.Payload.EnumSimple); + enum_ty_payload.* = .{ + .base = .{ .tag = .enum_simple }, + .data = enum_obj, + }; + const enum_ty = Type.initPayload(&enum_ty_payload.base); + const enum_val = try Value.Tag.ty.create(&new_decl_arena.allocator, enum_ty); + // TODO better type name + const new_decl = try mod.createAnonymousDecl(&block.base, .{ + .ty = Type.initTag(.type), + .val = enum_val, + }); + new_decl.owns_tv = true; + errdefer sema.mod.deleteAnonDecl(&block.base, new_decl); + + enum_obj.* = .{ + .owner_decl = new_decl, + .fields = .{}, + .node_offset = 0, + }; + // Here we pre-allocate the maps using the decl arena. + try enum_obj.fields.ensureTotalCapacity(&new_decl_arena.allocator, fields_len); + try new_decl.finalizeNewArena(&new_decl_arena); + return enum_ty; } fn getBuiltin( @@ -11367,11 +11556,28 @@ fn typeHasOnePossibleValue( } return Value.initTag(.empty_struct_value); }, + .enum_numbered => { + const resolved_ty = try sema.resolveTypeFields(block, src, ty); + const enum_obj = resolved_ty.castTag(.enum_numbered).?.data; + if (enum_obj.fields.count() == 1) { + if (enum_obj.values.count() == 0) { + return Value.initTag(.zero); // auto-numbered + } else { + return enum_obj.values.keys()[0]; + } + } else { + return null; + } + }, .enum_full => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); - const enum_full = resolved_ty.castTag(.enum_full).?.data; - if (enum_full.fields.count() == 1) { - return enum_full.values.keys()[0]; + const enum_obj = resolved_ty.castTag(.enum_full).?.data; + if (enum_obj.fields.count() == 1) { + if (enum_obj.values.count() == 0) { + return Value.initTag(.zero); // auto-numbered + } else { + return enum_obj.values.keys()[0]; + } } else { return null; } diff --git a/src/codegen.zig b/src/codegen.zig index 102f8d4985..6a605edca9 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -889,6 +889,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .atomic_load => try self.airAtomicLoad(inst), .memcpy => try self.airMemcpy(inst), .memset => try self.airMemset(inst), + .set_union_tag => try self.airSetUnionTag(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -1543,6 +1544,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = switch (arch) { + else => return self.fail("TODO implement airSetUnionTag for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 16b13db292..fc0c86b8f1 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -955,6 +955,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .atomic_load => try airAtomicLoad(f, inst), .memset => try airMemset(f, inst), .memcpy => try airMemcpy(f, inst), + .set_union_tag => try airSetUnionTag(f, inst), .int_to_float, .float_to_int, @@ -2080,6 +2081,21 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index) !CValue { return CValue.none; } +fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const union_ptr = try f.resolveInst(bin_op.lhs); + const new_tag = try f.resolveInst(bin_op.rhs); + const writer = f.object.writer(); + + try writer.writeAll("*"); + try f.writeCValue(writer, union_ptr); + try writer.writeAll(" = "); + try f.writeCValue(writer, new_tag); + try writer.writeAll(";\n"); + + return CValue.none; +} + fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { return switch (order) { .Unordered => "memory_order_relaxed", diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b15834c963..ab164b5d91 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -735,7 +735,7 @@ pub const DeclGen = struct { }, .Enum => { var buffer: Type.Payload.Bits = undefined; - const int_ty = t.enumTagType(&buffer); + const int_ty = t.intTagType(&buffer); const bit_count = int_ty.intInfo(self.module.getTarget()).bits; return self.context.intType(bit_count); }, @@ -812,6 +812,29 @@ pub const DeclGen = struct { .False, ); }, + .Union => { + const union_obj = t.castTag(.@"union").?.data; + assert(union_obj.haveFieldTypes()); + + const enum_tag_ty = union_obj.tag_ty; + const enum_tag_llvm_ty = try self.llvmType(enum_tag_ty); + + if (union_obj.onlyTagHasCodegenBits()) { + return enum_tag_llvm_ty; + } + + const target = self.module.getTarget(); + const most_aligned_field_index = union_obj.mostAlignedField(target); + const most_aligned_field = union_obj.fields.values()[most_aligned_field_index]; + // TODO handle when the most aligned field is different than the + // biggest sized field. + + const llvm_fields = [_]*const llvm.Type{ + try self.llvmType(most_aligned_field.ty), + enum_tag_llvm_ty, + }; + return self.context.structType(&llvm_fields, llvm_fields.len, .False); + }, .Fn => { const ret_ty = try self.llvmType(t.fnReturnType()); const params_len = t.fnParamLen(); @@ -840,7 +863,6 @@ pub const DeclGen = struct { .BoundFn => @panic("TODO remove BoundFn from the language"), - .Union, .Opaque, .Frame, .AnyFrame, @@ -1131,7 +1153,7 @@ pub const DeclGen = struct { var buffer: Type.Payload.Bits = undefined; const int_ty = switch (ty.zigTypeTag()) { .Int => ty, - .Enum => ty.enumTagType(&buffer), + .Enum => ty.intTagType(&buffer), .Float => { if (!is_rmw_xchg) return null; return dg.context.intType(@intCast(c_uint, ty.abiSize(target) * 8)); @@ -1281,6 +1303,7 @@ pub const FuncGen = struct { .atomic_load => try self.airAtomicLoad(inst), .memset => try self.airMemset(inst), .memcpy => try self.airMemcpy(inst), + .set_union_tag => try self.airSetUnionTag(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -1381,7 +1404,7 @@ pub const FuncGen = struct { const int_ty = switch (operand_ty.zigTypeTag()) { .Enum => blk: { var buffer: Type.Payload.Bits = undefined; - const int_ty = operand_ty.enumTagType(&buffer); + const int_ty = operand_ty.intTagType(&buffer); break :blk int_ty; }, .Int, .Bool, .Pointer, .ErrorSet => operand_ty, @@ -1660,8 +1683,9 @@ pub const FuncGen = struct { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data; const struct_ptr = try self.resolveInst(struct_field.struct_operand); + const struct_ptr_ty = self.air.typeOf(struct_field.struct_operand); const field_index = @intCast(c_uint, struct_field.field_index); - return self.builder.buildStructGEP(struct_ptr, field_index, ""); + return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } fn airStructFieldPtrIndex(self: *FuncGen, inst: Air.Inst.Index, field_index: c_uint) !?*const llvm.Value { @@ -1670,7 +1694,8 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const struct_ptr = try self.resolveInst(ty_op.operand); - return self.builder.buildStructGEP(struct_ptr, field_index, ""); + const struct_ptr_ty = self.air.typeOf(ty_op.operand); + return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index); } fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { @@ -2521,6 +2546,49 @@ pub const FuncGen = struct { return null; } + fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const union_ptr = try self.resolveInst(bin_op.lhs); + // TODO handle when onlyTagHasCodegenBits() == true + const new_tag = try self.resolveInst(bin_op.rhs); + const tag_field_ptr = self.builder.buildStructGEP(union_ptr, 1, ""); + + _ = self.builder.buildStore(new_tag, tag_field_ptr); + return null; + } + + fn fieldPtr( + self: *FuncGen, + inst: Air.Inst.Index, + struct_ptr: *const llvm.Value, + struct_ptr_ty: Type, + field_index: c_uint, + ) !?*const llvm.Value { + const struct_ty = struct_ptr_ty.childType(); + switch (struct_ty.zigTypeTag()) { + .Struct => return self.builder.buildStructGEP(struct_ptr, field_index, ""), + .Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index), + else => unreachable, + } + } + + fn unionFieldPtr( + self: *FuncGen, + inst: Air.Inst.Index, + union_ptr: *const llvm.Value, + union_ty: Type, + field_index: c_uint, + ) !?*const llvm.Value { + const union_obj = union_ty.cast(Type.Payload.Union).?.data; + const field = &union_obj.fields.values()[field_index]; + const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); + if (!field.ty.hasCodeGenBits()) { + return null; + } + const union_field_ptr = self.builder.buildStructGEP(union_ptr, 0, ""); + return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, ""); + } + fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value { const id = llvm.lookupIntrinsicID(name.ptr, name.len); assert(id != 0); diff --git a/src/print_air.zig b/src/print_air.zig index fa384baae0..e735d03bd3 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -130,6 +130,7 @@ const Writer = struct { .ptr_ptr_elem_val, .shl, .shr, + .set_union_tag, => try w.writeBinOp(s, inst), .is_null, diff --git a/src/type.zig b/src/type.zig index 48c65c1008..bb798959f4 100644 --- a/src/type.zig +++ b/src/type.zig @@ -124,6 +124,7 @@ pub const Type = extern union { .enum_full, .enum_nonexhaustive, .enum_simple, + .enum_numbered, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -874,6 +875,7 @@ pub const Type = extern union { .@"struct" => return self.copyPayloadShallow(allocator, Payload.Struct), .@"union", .union_tagged => return self.copyPayloadShallow(allocator, Payload.Union), .enum_simple => return self.copyPayloadShallow(allocator, Payload.EnumSimple), + .enum_numbered => return self.copyPayloadShallow(allocator, Payload.EnumNumbered), .enum_full, .enum_nonexhaustive => return self.copyPayloadShallow(allocator, Payload.EnumFull), .@"opaque" => return self.copyPayloadShallow(allocator, Payload.Opaque), } @@ -958,6 +960,10 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.owner_decl.renderFullyQualifiedName(writer); }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.owner_decl.renderFullyQualifiedName(writer); + }, .@"opaque" => { // TODO use declaration name return writer.writeAll("opaque {}"); @@ -1268,6 +1274,7 @@ pub const Type = extern union { .@"union", .union_tagged, .enum_simple, + .enum_numbered, .enum_full, .enum_nonexhaustive, => false, // TODO some of these should be `true` depending on their child types @@ -1421,7 +1428,7 @@ pub const Type = extern union { const enum_simple = self.castTag(.enum_simple).?.data; return enum_simple.fields.count() >= 2; }, - .enum_nonexhaustive => { + .enum_numbered, .enum_nonexhaustive => { var buffer: Payload.Bits = undefined; const int_tag_ty = self.intTagType(&buffer); return int_tag_ty.hasCodeGenBits(); @@ -1682,7 +1689,7 @@ pub const Type = extern union { assert(biggest != 0); return biggest; }, - .enum_full, .enum_nonexhaustive, .enum_simple => { + .enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => { var buffer: Payload.Bits = undefined; const int_tag_ty = self.intTagType(&buffer); return int_tag_ty.abiAlignment(target); @@ -1781,7 +1788,7 @@ pub const Type = extern union { } return size; }, - .enum_simple, .enum_full, .enum_nonexhaustive => { + .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { var buffer: Payload.Bits = undefined; const int_tag_ty = self.intTagType(&buffer); return int_tag_ty.abiSize(target); @@ -1948,7 +1955,7 @@ pub const Type = extern union { .@"struct" => { @panic("TODO bitSize struct"); }, - .enum_simple, .enum_full, .enum_nonexhaustive => { + .enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => { var buffer: Payload.Bits = undefined; const int_tag_ty = self.intTagType(&buffer); return int_tag_ty.bitSize(target); @@ -2094,23 +2101,6 @@ pub const Type = extern union { }; } - /// Asserts the type is an enum. - pub fn intTagType(self: Type, buffer: *Payload.Bits) Type { - switch (self.tag()) { - .enum_full, .enum_nonexhaustive => return self.cast(Payload.EnumFull).?.data.tag_ty, - .enum_simple => { - const enum_simple = self.castTag(.enum_simple).?.data; - const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count()); - buffer.* = .{ - .base = .{ .tag = .int_unsigned }, - .data = bits, - }; - return Type.initPayload(&buffer.base); - }, - else => unreachable, - } - } - pub fn isSinglePointer(self: Type) bool { return switch (self.tag()) { .single_const_pointer, @@ -2363,48 +2353,6 @@ pub const Type = extern union { } } - /// Returns if type can be used for a runtime variable - pub fn isValidVarType(self: Type, is_extern: bool) bool { - var ty = self; - while (true) switch (ty.zigTypeTag()) { - .Bool, - .Int, - .Float, - .ErrorSet, - .Enum, - .Frame, - .AnyFrame, - => return true, - - .Opaque => return is_extern, - .BoundFn, - .ComptimeFloat, - .ComptimeInt, - .EnumLiteral, - .NoReturn, - .Type, - .Void, - .Undefined, - .Null, - => return false, - - .Optional => { - var buf: Payload.ElemType = undefined; - return ty.optionalChild(&buf).isValidVarType(is_extern); - }, - .Pointer, .Array, .Vector => ty = ty.elemType(), - .ErrorUnion => ty = ty.errorUnionPayload(), - - .Fn => @panic("TODO fn isValidVarType"), - .Struct => { - // TODO this is not always correct; introduce lazy value mechanism - // and here we need to force a resolve of "type requires comptime". - return true; - }, - .Union => @panic("TODO union isValidVarType"), - }; - } - pub fn childType(ty: Type) Type { return switch (ty.tag()) { .vector => ty.castTag(.vector).?.data.elem_type, @@ -2530,6 +2478,15 @@ pub const Type = extern union { } } + /// Returns the tag type of a union, if the type is a union and it has a tag type. + /// Otherwise, returns `null`. + pub fn unionTagType(ty: Type) ?Type { + return switch (ty.tag()) { + .union_tagged => ty.castTag(.union_tagged).?.data.tag_ty, + else => null, + }; + } + /// Asserts that the type is an error union. pub fn errorUnionPayload(self: Type) Type { return switch (self.tag()) { @@ -3000,6 +2957,7 @@ pub const Type = extern union { } }, .enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty, + .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty, .@"union" => { return null; // TODO }, @@ -3114,31 +3072,21 @@ pub const Type = extern union { } } - /// Returns the integer tag type of the enum. - pub fn enumTagType(ty: Type, buffer: *Payload.Bits) Type { - switch (ty.tag()) { - .enum_full, .enum_nonexhaustive => { - const enum_full = ty.cast(Payload.EnumFull).?.data; - return enum_full.tag_ty; - }, + /// Asserts the type is an enum or a union. + /// TODO support unions + pub fn intTagType(self: Type, buffer: *Payload.Bits) Type { + switch (self.tag()) { + .enum_full, .enum_nonexhaustive => return self.cast(Payload.EnumFull).?.data.tag_ty, + .enum_numbered => return self.castTag(.enum_numbered).?.data.tag_ty, .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; + const enum_simple = self.castTag(.enum_simple).?.data; + const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count()); buffer.* = .{ .base = .{ .tag = .int_unsigned }, - .data = std.math.log2_int_ceil(usize, enum_simple.fields.count()), + .data = bits, }; return Type.initPayload(&buffer.base); }, - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .float_mode, - .reduce_op, - .call_options, - .export_options, - .extern_options, - => @panic("TODO resolve std.builtin types"), - else => unreachable, } } @@ -3156,10 +3104,8 @@ pub const Type = extern union { const enum_full = ty.cast(Payload.EnumFull).?.data; return enum_full.fields.count(); }, - .enum_simple => { - const enum_simple = ty.castTag(.enum_simple).?.data; - return enum_simple.fields.count(); - }, + .enum_simple => return ty.castTag(.enum_simple).?.data.fields.count(), + .enum_numbered => return ty.castTag(.enum_numbered).?.data.fields.count(), .atomic_order, .atomic_rmw_op, .calling_convention, @@ -3185,6 +3131,10 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.fields.keys()[field_index]; }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.fields.keys()[field_index]; + }, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -3209,6 +3159,10 @@ pub const Type = extern union { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.fields.getIndex(field_name); }, + .enum_numbered => { + const enum_numbered = ty.castTag(.enum_numbered).?.data; + return enum_numbered.fields.getIndex(field_name); + }, .atomic_order, .atomic_rmw_op, .calling_convention, @@ -3252,6 +3206,15 @@ pub const Type = extern union { return enum_full.values.getIndexContext(enum_tag, .{ .ty = tag_ty }); } }, + .enum_numbered => { + const enum_obj = ty.castTag(.enum_numbered).?.data; + const tag_ty = enum_obj.tag_ty; + if (enum_obj.values.count() == 0) { + return S.fieldWithRange(tag_ty, enum_tag, enum_obj.fields.count()); + } else { + return enum_obj.values.getIndexContext(enum_tag, .{ .ty = tag_ty }); + } + }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); @@ -3303,6 +3266,7 @@ pub const Type = extern union { const enum_full = ty.cast(Payload.EnumFull).?.data; return enum_full.srcLoc(); }, + .enum_numbered => return ty.castTag(.enum_numbered).?.data.srcLoc(), .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.srcLoc(); @@ -3340,6 +3304,7 @@ pub const Type = extern union { const enum_full = ty.cast(Payload.EnumFull).?.data; return enum_full.owner_decl; }, + .enum_numbered => return ty.castTag(.enum_numbered).?.data.owner_decl, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; return enum_simple.owner_decl; @@ -3397,6 +3362,15 @@ pub const Type = extern union { return enum_full.values.containsContext(int, .{ .ty = tag_ty }); } }, + .enum_numbered => { + const enum_obj = ty.castTag(.enum_numbered).?.data; + const tag_ty = enum_obj.tag_ty; + if (enum_obj.values.count() == 0) { + return S.intInRange(tag_ty, int, enum_obj.fields.count()); + } else { + return enum_obj.values.containsContext(int, .{ .ty = tag_ty }); + } + }, .enum_simple => { const enum_simple = ty.castTag(.enum_simple).?.data; const fields_len = enum_simple.fields.count(); @@ -3534,6 +3508,7 @@ pub const Type = extern union { @"union", union_tagged, enum_simple, + enum_numbered, enum_full, enum_nonexhaustive, @@ -3642,6 +3617,7 @@ pub const Type = extern union { .@"union", .union_tagged => Payload.Union, .enum_full, .enum_nonexhaustive => Payload.EnumFull, .enum_simple => Payload.EnumSimple, + .enum_numbered => Payload.EnumNumbered, .empty_struct => Payload.ContainerScope, }; } @@ -3818,6 +3794,11 @@ pub const Type = extern union { base: Payload = .{ .tag = .enum_simple }, data: *Module.EnumSimple, }; + + pub const EnumNumbered = struct { + base: Payload = .{ .tag = .enum_numbered }, + data: *Module.EnumNumbered, + }; }; pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type { @@ -3850,6 +3831,23 @@ pub const Type = extern union { }; return Type.initPayload(&type_payload.base); } + + pub fn smallestUnsignedInt(arena: *Allocator, max: u64) !Type { + const bits = bits: { + if (max == 0) break :bits 0; + const base = std.math.log2(max); + const upper = (@as(u64, 1) << base) - 1; + break :bits base + @boolToInt(upper < max); + }; + return switch (bits) { + 1 => initTag(.u1), + 8 => initTag(.u8), + 16 => initTag(.u16), + 32 => initTag(.u32), + 64 => initTag(.u64), + else => return Tag.int_unsigned.create(arena, bits), + }; + } }; pub const CType = enum { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 14b5e374dd..6b8705e044 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -2,3 +2,15 @@ const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const Tag = std.meta.Tag; + +const Foo = union { + float: f64, + int: i32, +}; + +test "basic unions" { + var foo = Foo{ .int = 1 }; + try expect(foo.int == 1); + foo = Foo{ .float = 12.34 }; + try expect(foo.float == 12.34); +} diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig index 086bd981cd..5741858d51 100644 --- a/test/behavior/union_stage1.zig +++ b/test/behavior/union_stage1.zig @@ -39,13 +39,6 @@ const Foo = union { int: i32, }; -test "basic unions" { - var foo = Foo{ .int = 1 }; - try expect(foo.int == 1); - foo = Foo{ .float = 12.34 }; - try expect(foo.float == 12.34); -} - test "comptime union field access" { comptime { var foo = Foo{ .int = 0 }; -- cgit v1.2.3 From 09e1f37cb6a8e0df4c521f4b76eab07f0c811852 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 27 Sep 2021 23:11:00 -0700 Subject: stage2: implement union coercion to its own tag * AIR: add `get_union_tag` instruction - implement in LLVM backend * Sema: implement == and != for union and enum literal - Also implement coercion from union to its own tag type * Value: implement hashing for union values The motivating example is this snippet: comptime assert(@typeInfo(T) == .Float); This was the next blocker for stage2 building compiler-rt. Now it is switch at compile-time on an integer. --- src/Air.zig | 4 +++ src/Liveness.zig | 1 + src/Sema.zig | 82 ++++++++++++++++++++++++++++++++++++------ src/codegen.zig | 9 +++++ src/codegen/c.zig | 17 +++++++++ src/codegen/llvm.zig | 13 +++++++ src/print_air.zig | 1 + src/type.zig | 8 +++++ src/value.zig | 15 +++++++- test/behavior/union.zig | 18 ++++++++++ test/behavior/union_stage1.zig | 27 +++----------- 11 files changed, 162 insertions(+), 33 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index 40070dccfb..b4552f9d7b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -290,6 +290,9 @@ pub const Inst = struct { /// Result type is always void. /// Uses the `bin_op` field. LHS is union pointer, RHS is new tag value. set_union_tag, + /// Given a tagged union value, get its tag value. + /// Uses the `ty_op` field. + get_union_tag, /// Given a slice value, return the length. /// Result type is always usize. /// Uses the `ty_op` field. @@ -630,6 +633,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .array_to_slice, .float_to_int, .int_to_float, + .get_union_tag, => return air.getRefType(datas[inst].ty_op.ty), .loop, diff --git a/src/Liveness.zig b/src/Liveness.zig index 9a7126d135..a9ff586aeb 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -297,6 +297,7 @@ fn analyzeInst( .array_to_slice, .float_to_int, .int_to_float, + .get_union_tag, => { const o = inst_datas[inst].ty_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); diff --git a/src/Sema.zig b/src/Sema.zig index f076389797..b669cdb979 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1349,7 +1349,13 @@ fn zirUnionDecl( errdefer new_decl_arena.deinit(); const union_obj = try new_decl_arena.allocator.create(Module.Union); - const union_ty = try Type.Tag.@"union".create(&new_decl_arena.allocator, union_obj); + const type_tag: Type.Tag = if (small.has_tag_type or small.auto_enum_tag) .union_tagged else .@"union"; + const union_payload = try new_decl_arena.allocator.create(Type.Payload.Union); + union_payload.* = .{ + .base = .{ .tag = type_tag }, + .data = union_obj, + }; + const union_ty = Type.initPayload(&union_payload.base); const union_val = try Value.Tag.ty.create(&new_decl_arena.allocator, union_ty); const type_name = try sema.createTypeName(block, small.name_strategy); const new_decl = try sema.mod.createAnonymousDeclNamed(&block.base, .{ @@ -6477,10 +6483,11 @@ fn zirCmpEq( const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type}); } - if (((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or - (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union))) - { - return mod.fail(&block.base, src, "TODO implement equality comparison between a union's tag value and an enum literal", .{}); + if (lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) { + return sema.analyzeCmpUnionTag(block, rhs, rhs_src, lhs, lhs_src, op); + } + if (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union) { + return sema.analyzeCmpUnionTag(block, lhs, lhs_src, rhs, rhs_src, op); } if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { const runtime_src: LazySrcLoc = src: { @@ -6521,6 +6528,28 @@ fn zirCmpEq( return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); } +fn analyzeCmpUnionTag( + sema: *Sema, + block: *Scope.Block, + un: Air.Inst.Ref, + un_src: LazySrcLoc, + tag: Air.Inst.Ref, + tag_src: LazySrcLoc, + op: std.math.CompareOperator, +) CompileError!Air.Inst.Ref { + const union_ty = sema.typeOf(un); + const union_tag_ty = union_ty.unionTagType() orelse { + // TODO note at declaration site that says "union foo is not tagged" + return sema.mod.fail(&block.base, un_src, "comparison of union and enum literal is only valid for tagged union types", .{}); + }; + // Coerce both the union and the tag to the union's tag type, and then execute the + // enum comparison codepath. + const coerced_tag = try sema.coerce(block, union_tag_ty, tag, tag_src); + const coerced_union = try sema.coerce(block, union_tag_ty, un, un_src); + + return sema.cmpSelf(block, coerced_union, coerced_tag, op, un_src, tag_src); +} + /// Only called for non-equality operators. See also `zirCmpEq`. fn zirCmp( sema: *Sema, @@ -6567,10 +6596,21 @@ fn analyzeCmp( @tagName(op), resolved_type, }); } - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); + return sema.cmpSelf(block, casted_lhs, casted_rhs, op, lhs_src, rhs_src); +} +fn cmpSelf( + sema: *Sema, + block: *Scope.Block, + casted_lhs: Air.Inst.Ref, + casted_rhs: Air.Inst.Ref, + op: std.math.CompareOperator, + lhs_src: LazySrcLoc, + rhs_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + const resolved_type = sema.typeOf(casted_lhs); const runtime_src: LazySrcLoc = src: { if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(resolved_type); @@ -9919,9 +9959,9 @@ fn coerce( } } }, - .Enum => { - // enum literal to enum - if (inst_ty.zigTypeTag() == .EnumLiteral) { + .Enum => switch (inst_ty.zigTypeTag()) { + .EnumLiteral => { + // enum literal to enum const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -9948,7 +9988,15 @@ fn coerce( resolved_dest_type, try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), ); - } + }, + .Union => blk: { + // union to its own tag type + const union_tag_ty = inst_ty.unionTagType() orelse break :blk; + if (union_tag_ty.eql(dest_type)) { + return sema.unionToTag(block, dest_type, inst, inst_src); + } + }, + else => {}, }, .ErrorUnion => { // T to E!T or E to E!T @@ -10802,6 +10850,20 @@ fn wrapErrorUnion( } } +fn unionToTag( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + un: Air.Inst.Ref, + un_src: LazySrcLoc, +) !Air.Inst.Ref { + if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| { + return sema.addConstant(dest_type, un_val.unionTag()); + } + try sema.requireRuntimeBlock(block, un_src); + return block.addTyOp(.get_union_tag, dest_type, un); +} + fn resolvePeerTypes( sema: *Sema, block: *Scope.Block, diff --git a/src/codegen.zig b/src/codegen.zig index 6a605edca9..4eda3f2594 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -890,6 +890,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memcpy => try self.airMemcpy(inst), .memset => try self.airMemset(inst), .set_union_tag => try self.airSetUnionTag(inst), + .get_union_tag => try self.airGetUnionTag(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -1552,6 +1553,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airGetUnionTag for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index fc0c86b8f1..a6534b1eba 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -956,6 +956,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .memset => try airMemset(f, inst), .memcpy => try airMemcpy(f, inst), .set_union_tag => try airSetUnionTag(f, inst), + .get_union_tag => try airGetUnionTag(f, inst), .int_to_float, .float_to_int, @@ -2096,6 +2097,22 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { return CValue.none; } +fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { + if (f.liveness.isUnused(inst)) + return CValue.none; + + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + + try writer.writeAll("get_union_tag("); + try f.writeCValue(writer, operand); + try writer.writeAll(");\n"); + return local; +} + fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 { return switch (order) { .Unordered => "memory_order_relaxed", diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ab164b5d91..4a0d218ead 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1304,6 +1304,7 @@ pub const FuncGen = struct { .memset => try self.airMemset(inst), .memcpy => try self.airMemcpy(inst), .set_union_tag => try self.airSetUnionTag(inst), + .get_union_tag => try self.airGetUnionTag(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -2557,6 +2558,18 @@ pub const FuncGen = struct { return null; } + fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const un_ty = self.air.typeOf(ty_op.operand); + const un = try self.resolveInst(ty_op.operand); + + _ = un_ty; // TODO handle when onlyTagHasCodegenBits() == true and other union forms + return self.builder.buildExtractValue(un, 1, ""); + } + fn fieldPtr( self: *FuncGen, inst: Air.Inst.Index, diff --git a/src/print_air.zig b/src/print_air.zig index e735d03bd3..2a7538f81a 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -179,6 +179,7 @@ const Writer = struct { .array_to_slice, .int_to_float, .float_to_int, + .get_union_tag, => try w.writeTyOp(s, inst), .block, diff --git a/src/type.zig b/src/type.zig index bb798959f4..781fe74d45 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2487,6 +2487,12 @@ pub const Type = extern union { }; } + pub fn unionFieldType(ty: Type, enum_tag: Value) Type { + const union_obj = ty.cast(Payload.Union).?.data; + const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag).?; + return union_obj.fields.values()[index].ty; + } + /// Asserts that the type is an error union. pub fn errorUnionPayload(self: Type) Type { return switch (self.tag()) { @@ -3801,6 +3807,8 @@ pub const Type = extern union { }; }; + pub const @"bool" = initTag(.bool); + pub fn ptr(arena: *Allocator, d: Payload.Pointer.Data) !Type { assert(d.host_size == 0 or d.bit_offset < d.host_size * 8); diff --git a/src/value.zig b/src/value.zig index cb5d211b1e..69f8945e01 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1275,7 +1275,12 @@ pub const Value = extern union { } }, .Union => { - @panic("TODO implement hashing union values"); + const union_obj = val.castTag(.@"union").?.data; + if (ty.unionTagType()) |tag_ty| { + union_obj.tag.hash(tag_ty, hasher); + } + const active_field_ty = ty.unionFieldType(union_obj.tag); + union_obj.val.hash(active_field_ty, hasher); }, .Fn => { @panic("TODO implement hashing function values"); @@ -1431,6 +1436,14 @@ pub const Value = extern union { } } + pub fn unionTag(val: Value) Value { + switch (val.tag()) { + .undef => return val, + .@"union" => return val.castTag(.@"union").?.data.tag, + else => unreachable, + } + } + /// Returns a pointer to the element value at the index. pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value { if (self.castTag(.elem_ptr)) |elem_ptr| { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 6b8705e044..afefa7cf85 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -14,3 +14,21 @@ test "basic unions" { foo = Foo{ .float = 12.34 }; try expect(foo.float == 12.34); } + +test "init union with runtime value" { + var foo: Foo = undefined; + + setFloat(&foo, 12.34); + try expect(foo.float == 12.34); + + setInt(&foo, 42); + try expect(foo.int == 42); +} + +fn setFloat(foo: *Foo, x: f64) void { + foo.* = Foo{ .float = x }; +} + +fn setInt(foo: *Foo, x: i32) void { + foo.* = Foo{ .int = x }; +} diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig index 5741858d51..725d7bd028 100644 --- a/test/behavior/union_stage1.zig +++ b/test/behavior/union_stage1.zig @@ -49,24 +49,6 @@ test "comptime union field access" { } } -test "init union with runtime value" { - var foo: Foo = undefined; - - setFloat(&foo, 12.34); - try expect(foo.float == 12.34); - - setInt(&foo, 42); - try expect(foo.int == 42); -} - -fn setFloat(foo: *Foo, x: f64) void { - foo.* = Foo{ .float = x }; -} - -fn setInt(foo: *Foo, x: i32) void { - foo.* = Foo{ .int = x }; -} - const FooExtern = extern union { float: f64, int: i32, @@ -185,12 +167,13 @@ test "union field access gives the enum values" { } test "cast union to tag type of union" { - try testCastUnionToTag(TheUnion{ .B = 1234 }); - comptime try testCastUnionToTag(TheUnion{ .B = 1234 }); + try testCastUnionToTag(); + comptime try testCastUnionToTag(); } -fn testCastUnionToTag(x: TheUnion) !void { - try expect(@as(TheTag, x) == TheTag.B); +fn testCastUnionToTag() !void { + var u = TheUnion{ .B = 1234 }; + try expect(@as(TheTag, u) == TheTag.B); } test "cast tag type of union to union" { -- cgit v1.2.3 From 79bc5891c1c4cde0592fe1b10b6c9a85914155cf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 28 Sep 2021 15:45:58 -0700 Subject: stage2: more arithmetic support * AIR: add `mod` instruction for modulus division - Implement for LLVM backend * Sema: implement `@mod`, `@rem`, and `%`. * Sema: fix comptime switch evaluation * Sema: implement comptime shift left * Sema: fix the logic inside analyzeArithmetic to handle all the nuances between the different mathematical operations. - Implement comptime wrapping operations --- src/Air.zig | 11 +- src/Liveness.zig | 1 + src/Sema.zig | 659 ++++++++++++++++++++++++++------ src/Zir.zig | 22 +- src/codegen.zig | 9 + src/codegen/c.zig | 2 + src/codegen/llvm.zig | 29 ++ src/print_air.zig | 1 + src/value.zig | 138 +++++++ test/behavior.zig | 3 +- test/behavior/math.zig | 848 ----------------------------------------- test/behavior/math_stage1.zig | 855 ++++++++++++++++++++++++++++++++++++++++++ 12 files changed, 1604 insertions(+), 974 deletions(-) create mode 100644 test/behavior/math_stage1.zig (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index b4552f9d7b..b5d19127a0 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -69,10 +69,16 @@ pub const Inst = struct { /// is the same as both operands. /// Uses the `bin_op` field. div, - /// Integer or float remainder. - /// Both operands are guaranteed to be the same type, and the result type is the same as both operands. + /// Integer or float remainder division. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. rem, + /// Integer or float modulus division. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. + /// Uses the `bin_op` field. + mod, /// Add an offset to a pointer, returning a new pointer. /// The offset is in element type units, not bytes. /// Wrapping is undefined behavior. @@ -568,6 +574,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .mulwrap, .div, .rem, + .mod, .bit_and, .bit_or, .xor, diff --git a/src/Liveness.zig b/src/Liveness.zig index a9ff586aeb..25dd29b0f6 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -232,6 +232,7 @@ fn analyzeInst( .mulwrap, .div, .rem, + .mod, .ptr_add, .ptr_sub, .bit_and, diff --git a/src/Sema.zig b/src/Sema.zig index 35a434eb35..de94a8c6b8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -319,8 +319,6 @@ pub fn analyzeBody( .div_exact => try sema.zirDivExact(block, inst), .div_floor => try sema.zirDivFloor(block, inst), .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), .shl_exact => try sema.zirShlExact(block, inst), .shr_exact => try sema.zirShrExact(block, inst), .bit_offset_of => try sema.zirBitOffsetOf(block, inst), @@ -363,14 +361,16 @@ pub fn analyzeBody( .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), + .add => try sema.zirArithmetic(block, inst, .add), + .addwrap => try sema.zirArithmetic(block, inst, .addwrap), + .div => try sema.zirArithmetic(block, inst, .div), + .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem), + .mod => try sema.zirArithmetic(block, inst, .mod), + .rem => try sema.zirArithmetic(block, inst, .rem), + .mul => try sema.zirArithmetic(block, inst, .mul), + .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap), + .sub => try sema.zirArithmetic(block, inst, .sub), + .subwrap => try sema.zirArithmetic(block, inst, .subwrap), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can @@ -886,6 +886,14 @@ fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) Compile return sema.mod.fail(&block.base, src, "use of undefined value here causes undefined behavior", .{}); } +fn failWithDivideByZero(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { + return sema.mod.fail(&block.base, src, "division by zero here causes undefined behavior", .{}); +} + +fn failWithModRemNegative(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, lhs_ty: Type, rhs_ty: Type) CompileError { + return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); +} + /// Appropriate to call when the coercion has already been done by result /// location semantics. Asserts the value fits in the provided `Int` type. /// Only supports `Int` types 64 bits or less. @@ -2366,8 +2374,12 @@ fn resolveBlockBody( body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, ) CompileError!Air.Inst.Ref { - _ = try sema.analyzeBody(child_block, body); - return sema.analyzeBlockBody(parent_block, src, child_block, merges); + if (child_block.is_comptime) { + return sema.resolveBody(child_block, body); + } else { + _ = try sema.analyzeBody(child_block, body); + return sema.analyzeBlockBody(parent_block, src, child_block, merges); + } } fn analyzeBlockBody( @@ -5867,23 +5879,36 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| { - if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.addConstUndef(sema.typeOf(lhs)); - } - return sema.mod.fail(&block.base, src, "TODO implement comptime shl", .{}); + const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); + const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); + + const runtime_src = if (maybe_lhs_val) |lhs_val| rs: { + const lhs_ty = sema.typeOf(lhs); + + if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + const rhs_val = maybe_rhs_val orelse break :rs rhs_src; + if (rhs_val.isUndef()) return sema.addConstUndef(lhs_ty); + + // If rhs is 0, return lhs without doing any calculations. + if (rhs_val.compareWithZero(.eq)) { + return sema.addConstant(lhs_ty, lhs_val); } - } + const val = try lhs_val.shl(rhs_val, sema.arena); + return sema.addConstant(lhs_ty, val); + } else rs: { + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) return sema.addConstUndef(sema.typeOf(lhs)); + } + break :rs lhs_src; + }; - try sema.requireRuntimeBlock(block, src); + try sema.requireRuntimeBlock(block, runtime_src); return block.addBinOp(.shl, lhs, rhs); } @@ -6141,11 +6166,15 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirArithmetic( + sema: *Sema, + block: *Scope.Block, + inst: Zir.Inst.Index, + zir_tag: Zir.Inst.Tag, +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); - const tag_override = block.sema.code.instructions.items(.tag)[inst]; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; sema.src = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; @@ -6154,7 +6183,7 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); + return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src); } fn zirOverflowArithmetic( @@ -6187,6 +6216,7 @@ fn zirSatArithmetic( fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, + /// TODO performance investigation: make this comptime? zir_tag: Zir.Inst.Tag, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, @@ -6204,7 +6234,7 @@ fn analyzeArithmetic( lhs_ty.arrayLen(), rhs_ty.arrayLen(), }); } - return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBinOp", .{}); + return sema.mod.fail(&block.base, src, "TODO implement support for vectors in Sema.analyzeArithmetic", .{}); } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ lhs_ty, rhs_ty, @@ -6247,7 +6277,9 @@ fn analyzeArithmetic( }; const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; - const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); + const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ + .override = &[_]LazySrcLoc{ lhs_src, rhs_src }, + }); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -6267,86 +6299,499 @@ fn analyzeArithmetic( }); } - if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { - if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.addConstUndef(resolved_type); - } - // incase rhs is 0, simply return lhs without doing any calculations - // TODO Once division is implemented we should throw an error when dividing by 0. - if (rhs_val.compareWithZero(.eq)) { - switch (zir_tag) { - .add, .addwrap, .sub, .subwrap => { - return sema.addConstant(scalar_type, lhs_val); - }, - else => {}, + const target = sema.mod.getTarget(); + const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs); + const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs); + const rs: struct { src: LazySrcLoc, air_tag: Air.Inst.Tag } = rs: { + switch (zir_tag) { + .add => { + // For integers: + // If either of the operands are zero, then the other operand is + // returned, even if it is undefined. + // If either of the operands are undefined, it's a compile error + // because there is a possible value for which the addition would + // overflow (max_int), causing illegal behavior. + // For floats: either operand being undef makes the result undef. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { + return casted_rhs; + } } - } - - const value = switch (zir_tag) { - .add => blk: { - const val = if (is_int) - try lhs_val.intAdd(rhs_val, sema.arena) - else - try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena); - break :blk val; - }, - .sub => blk: { - const val = if (is_int) - try lhs_val.intSub(rhs_val, sema.arena) - else - try lhs_val.floatSub(rhs_val, scalar_type, sema.arena); - break :blk val; - }, - .div => blk: { - const val = if (is_int) - try lhs_val.intDiv(rhs_val, sema.arena) - else - try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena); - break :blk val; - }, - .mul => blk: { - const val = if (is_int) - try lhs_val.intMul(rhs_val, sema.arena) - else - try lhs_val.floatMul(rhs_val, scalar_type, sema.arena); - break :blk val; - }, - else => return sema.mod.fail(&block.base, src, "TODO implement comptime arithmetic for operand '{s}'", .{@tagName(zir_tag)}), - }; - - log.debug("{s}({}, {}) result: {}", .{ @tagName(zir_tag), lhs_val, rhs_val, value }); - - return sema.addConstant(scalar_type, value); - } else { - try sema.requireRuntimeBlock(block, rhs_src); - } - } else { - try sema.requireRuntimeBlock(block, lhs_src); - } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, rhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, lhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (maybe_rhs_val) |rhs_val| { + if (is_int) { + return sema.addConstant( + scalar_type, + try lhs_val.intAdd(rhs_val, sema.arena), + ); + } else { + return sema.addConstant( + scalar_type, + try lhs_val.floatAdd(rhs_val, scalar_type, sema.arena), + ); + } + } else break :rs .{ .src = rhs_src, .air_tag = .add }; + } else break :rs .{ .src = lhs_src, .air_tag = .add }; + }, + .addwrap => { + // Integers only; floats are checked above. + // If either of the operands are zero, then the other operand is + // returned, even if it is undefined. + // If either of the operands are undefined, the result is undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { + return casted_rhs; + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + if (maybe_lhs_val) |lhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.numberAddWrap(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; + } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; + }, + .sub => { + // For integers: + // If the rhs is zero, then the other operand is + // returned, even if it is undefined. + // If either of the operands are undefined, it's a compile error + // because there is a possible value for which the subtraction would + // overflow, causing illegal behavior. + // For floats: either operand being undef makes the result undef. + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, rhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, lhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (maybe_rhs_val) |rhs_val| { + if (is_int) { + return sema.addConstant( + scalar_type, + try lhs_val.intSub(rhs_val, sema.arena), + ); + } else { + return sema.addConstant( + scalar_type, + try lhs_val.floatSub(rhs_val, scalar_type, sema.arena), + ); + } + } else break :rs .{ .src = rhs_src, .air_tag = .sub }; + } else break :rs .{ .src = lhs_src, .air_tag = .sub }; + }, + .subwrap => { + // Integers only; floats are checked above. + // If the RHS is zero, then the other operand is returned, even if it is undefined. + // If either of the operands are undefined, the result is undefined. + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (maybe_rhs_val) |rhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.numberSubWrap(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; + } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; + }, + .div => { + // For integers: + // If the lhs is zero, then zero is returned regardless of rhs. + // If the rhs is zero, compile error for division by zero. + // If the rhs is undefined, compile error because there is a possible + // value (zero) for which the division would be illegal behavior. + // If the lhs is undefined: + // * if lhs type is signed: + // * if rhs is comptime-known and not -1, result is undefined + // * if rhs is -1 or runtime-known, compile error because there is a + // possible value (-min_int * -1) for which division would be + // illegal behavior. + // * if lhs type is unsigned, undef is returned regardless of rhs. + // For floats: + // If the rhs is zero, compile error for division by zero. + // If the rhs is undefined, compile error because there is a possible + // value (zero) for which the division would be illegal behavior. + // If the lhs is undefined, result is undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef()) { + if (lhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) { + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) { + return sema.addConstUndef(scalar_type); + } + } + return sema.failWithUseOfUndef(block, rhs_src); + } + return sema.addConstUndef(scalar_type); + } - if (zir_tag == .mod_rem) { - const dirty_lhs = lhs_ty.isSignedInt() or lhs_ty.isRuntimeFloat(); - const dirty_rhs = rhs_ty.isSignedInt() or rhs_ty.isRuntimeFloat(); - if (dirty_lhs or dirty_rhs) { - return sema.mod.fail(&block.base, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty }); + if (maybe_rhs_val) |rhs_val| { + if (is_int) { + return sema.addConstant( + scalar_type, + try lhs_val.intDiv(rhs_val, sema.arena), + ); + } else { + return sema.addConstant( + scalar_type, + try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena), + ); + } + } else break :rs .{ .src = rhs_src, .air_tag = .div }; + } else break :rs .{ .src = lhs_src, .air_tag = .div }; + }, + .mul => { + // For integers: + // If either of the operands are zero, the result is zero. + // If either of the operands are one, the result is the other + // operand, even if it is undefined. + // If either of the operands are undefined, it's a compile error + // because there is a possible value for which the addition would + // overflow (max_int), causing illegal behavior. + // For floats: either operand being undef makes the result undef. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef()) { + if (lhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (lhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_rhs; + } + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, rhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (rhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (rhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_lhs; + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + if (is_int) { + return sema.failWithUseOfUndef(block, lhs_src); + } else { + return sema.addConstUndef(scalar_type); + } + } + if (is_int) { + return sema.addConstant( + scalar_type, + try lhs_val.intMul(rhs_val, sema.arena), + ); + } else { + return sema.addConstant( + scalar_type, + try lhs_val.floatMul(rhs_val, scalar_type, sema.arena), + ); + } + } else break :rs .{ .src = lhs_src, .air_tag = .mul }; + } else break :rs .{ .src = rhs_src, .air_tag = .mul }; + }, + .mulwrap => { + // Integers only; floats are handled above. + // If either of the operands are zero, the result is zero. + // If either of the operands are one, the result is the other + // operand, even if it is undefined. + // If either of the operands are undefined, the result is undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef()) { + if (lhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (lhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_rhs; + } + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (rhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_lhs; + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + return sema.addConstant( + scalar_type, + try lhs_val.numberMulWrap(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; + } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; + }, + .mod_rem => { + // For integers: + // Either operand being undef is a compile error because there exists + // a possible value (TODO what is it?) that would invoke illegal behavior. + // TODO: can lhs zero be handled better? + // TODO: can lhs undef be handled better? + // + // For floats: + // If the rhs is zero, compile error for division by zero. + // If the rhs is undefined, compile error because there is a possible + // value (zero) for which the division would be illegal behavior. + // If the lhs is undefined, result is undefined. + // + // For either one: if the result would be different between @mod and @rem, + // then emit a compile error saying you have to pick one. + if (is_int) { + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, lhs_src); + } + if (lhs_val.compareWithZero(.lt)) { + return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); + } + } else if (lhs_ty.isSignedInt()) { + return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + if (rhs_val.compareWithZero(.lt)) { + return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); + } + if (maybe_lhs_val) |lhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.intRem(rhs_val, sema.arena), + ); + } + break :rs .{ .src = lhs_src, .air_tag = .rem }; + } else if (rhs_ty.isSignedInt()) { + return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); + } else { + break :rs .{ .src = rhs_src, .air_tag = .rem }; + } + } + // float operands + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + if (rhs_val.compareWithZero(.lt)) { + return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef() or lhs_val.compareWithZero(.lt)) { + return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); + } + return sema.addConstant( + scalar_type, + try lhs_val.floatRem(rhs_val, sema.arena), + ); + } else { + return sema.failWithModRemNegative(block, lhs_src, lhs_ty, rhs_ty); + } + } else { + return sema.failWithModRemNegative(block, rhs_src, lhs_ty, rhs_ty); + } + }, + .rem => { + // For integers: + // Either operand being undef is a compile error because there exists + // a possible value (TODO what is it?) that would invoke illegal behavior. + // TODO: can lhs zero be handled better? + // TODO: can lhs undef be handled better? + // + // For floats: + // If the rhs is zero, compile error for division by zero. + // If the rhs is undefined, compile error because there is a possible + // value (zero) for which the division would be illegal behavior. + // If the lhs is undefined, result is undefined. + if (is_int) { + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, lhs_src); + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + if (maybe_lhs_val) |lhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.intRem(rhs_val, sema.arena), + ); + } + break :rs .{ .src = lhs_src, .air_tag = .rem }; + } else { + break :rs .{ .src = rhs_src, .air_tag = .rem }; + } + } + // float operands + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (maybe_rhs_val) |rhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.floatRem(rhs_val, sema.arena), + ); + } else break :rs .{ .src = rhs_src, .air_tag = .rem }; + } else break :rs .{ .src = lhs_src, .air_tag = .rem }; + }, + .mod => { + // For integers: + // Either operand being undef is a compile error because there exists + // a possible value (TODO what is it?) that would invoke illegal behavior. + // TODO: can lhs zero be handled better? + // TODO: can lhs undef be handled better? + // + // For floats: + // If the rhs is zero, compile error for division by zero. + // If the rhs is undefined, compile error because there is a possible + // value (zero) for which the division would be illegal behavior. + // If the lhs is undefined, result is undefined. + if (is_int) { + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, lhs_src); + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + if (maybe_lhs_val) |lhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.intMod(rhs_val, sema.arena), + ); + } + break :rs .{ .src = lhs_src, .air_tag = .mod }; + } else { + break :rs .{ .src = rhs_src, .air_tag = .mod }; + } + } + // float operands + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.failWithUseOfUndef(block, rhs_src); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.failWithDivideByZero(block, rhs_src); + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (maybe_rhs_val) |rhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.floatMod(rhs_val, sema.arena), + ); + } else break :rs .{ .src = rhs_src, .air_tag = .mod }; + } else break :rs .{ .src = lhs_src, .air_tag = .mod }; + }, + else => unreachable, } - } - - const air_tag: Air.Inst.Tag = switch (zir_tag) { - .add => .add, - .addwrap => .addwrap, - .sub => .sub, - .subwrap => .subwrap, - .mul => .mul, - .mulwrap => .mulwrap, - .div => .div, - .mod_rem => .rem, - .rem => .rem, - else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}'", .{@tagName(zir_tag)}), }; - return block.addBinOp(air_tag, casted_lhs, casted_rhs); + try sema.requireRuntimeBlock(block, rs.src); + return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs); } fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -7401,7 +7846,7 @@ fn analyzeRet( fn floatOpAllowed(tag: Zir.Inst.Tag) bool { // extend this swich as additional operators are implemented return switch (tag) { - .add, .sub, .mul, .div => true, + .add, .sub, .mul, .div, .mod, .rem, .mod_rem => true, else => false, }; } @@ -8068,16 +8513,6 @@ fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); -} - -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - return sema.zirArithmetic(block, inst); -} - fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); diff --git a/src/Zir.zig b/src/Zir.zig index 483880c9b6..7c171e736d 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -395,17 +395,6 @@ pub const Inst = struct { /// Merge two error sets into one, `E1 || E2`. /// Uses the `pl_node` field with payload `Bin`. merge_error_sets, - /// Ambiguously remainder division or modulus. If the computation would possibly have - /// a different value depending on whether the operation is remainder division or modulus, - /// a compile error is emitted. Otherwise the computation is performed. - /// Uses the `pl_node` union field. Payload is `Bin`. - mod_rem, - /// Arithmetic multiplication. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - mul, - /// Twos complement wrapping integer multiplication. - /// Uses the `pl_node` union field. Payload is `Bin`. - mulwrap, /// Turns an R-Value into a const L-Value. In other words, it takes a value, /// stores it in a memory location, and returns a const pointer to it. If the value /// is `comptime`, the memory location is global static constant data. Otherwise, @@ -828,6 +817,17 @@ pub const Inst = struct { /// Implements the `@rem` builtin. /// Uses the `pl_node` union field with payload `Bin`. rem, + /// Ambiguously remainder division or modulus. If the computation would possibly have + /// a different value depending on whether the operation is remainder division or modulus, + /// a compile error is emitted. Otherwise the computation is performed. + /// Uses the `pl_node` union field. Payload is `Bin`. + mod_rem, + /// Arithmetic multiplication. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul, + /// Twos complement wrapping integer multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mulwrap, /// Integer shift-left. Zeroes are shifted in from the right hand side. /// Uses the `pl_node` union field. Payload is `Bin`. diff --git a/src/codegen.zig b/src/codegen.zig index 4eda3f2594..7c359e90c0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -832,6 +832,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .mulwrap => try self.airMulWrap(inst), .div => try self.airDiv(inst), .rem => try self.airRem(inst), + .mod => try self.airMod(inst), .cmp_lt => try self.airCmp(inst, .lt), .cmp_lte => try self.airCmp(inst, .lte), @@ -1353,6 +1354,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airMod(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement mod for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index a6534b1eba..4964f17cd3 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -897,6 +897,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO // that wrapping is UB. .div => try airBinOp( f, inst, " / "), .rem => try airBinOp( f, inst, " % "), + // TODO implement modulus division + .mod => try airBinOp( f, inst, " mod "), .cmp_eq => try airBinOp(f, inst, " == "), .cmp_gt => try airBinOp(f, inst, " > "), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 4a0d218ead..f65e0f6d72 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1244,6 +1244,7 @@ pub const FuncGen = struct { .mulwrap => try self.airMul(inst, true), .div => try self.airDiv(inst), .rem => try self.airRem(inst), + .mod => try self.airMod(inst), .ptr_add => try self.airPtrAdd(inst), .ptr_sub => try self.airPtrSub(inst), @@ -2095,6 +2096,34 @@ pub const FuncGen = struct { return self.builder.buildURem(lhs, rhs, ""); } + fn airMod(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); + const inst_llvm_ty = try self.dg.llvmType(inst_ty); + + if (inst_ty.isRuntimeFloat()) { + const a = self.builder.buildFRem(lhs, rhs, ""); + const b = self.builder.buildFAdd(a, rhs, ""); + const c = self.builder.buildFRem(b, rhs, ""); + const zero = inst_llvm_ty.constNull(); + const ltz = self.builder.buildFCmp(.OLT, lhs, zero, ""); + return self.builder.buildSelect(ltz, c, a, ""); + } + if (inst_ty.isSignedInt()) { + const a = self.builder.buildSRem(lhs, rhs, ""); + const b = self.builder.buildNSWAdd(a, rhs, ""); + const c = self.builder.buildSRem(b, rhs, ""); + const zero = inst_llvm_ty.constNull(); + const ltz = self.builder.buildICmp(.SLT, lhs, zero, ""); + return self.builder.buildSelect(ltz, c, a, ""); + } + return self.builder.buildURem(lhs, rhs, ""); + } + fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; diff --git a/src/print_air.zig b/src/print_air.zig index 2a7538f81a..90df06760b 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -110,6 +110,7 @@ const Writer = struct { .mulwrap, .div, .rem, + .mod, .ptr_add, .ptr_sub, .bit_and, diff --git a/src/value.zig b/src/value.zig index 336f5f9cf7..29d8fa8db9 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1616,6 +1616,34 @@ pub const Value = extern union { return result; } + /// Supports both floats and ints; handles undefined. + pub fn numberMulWrap( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + return floatMul(lhs, rhs, ty, arena); + } + const result = try intMul(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + @panic("TODO comptime wrapping integer multiplication"); + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + @panic("TODO comptime wrapping integer multiplication"); + } + + return result; + } + /// Supports both floats and ints; handles undefined. pub fn numberMax(lhs: Value, rhs: Value, arena: *Allocator) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); @@ -1840,6 +1868,82 @@ pub const Value = extern union { } } + pub fn intRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs_q = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1, + ); + const limbs_r = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null); + const result_limbs = result_r.limbs[0..result_r.len]; + + if (result_r.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } + } + + pub fn intMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const rhs_bigint = rhs.toBigInt(&rhs_space); + const limbs_q = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1, + ); + const limbs_r = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null); + const result_limbs = result_r.limbs[0..result_r.len]; + + if (result_r.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } + } + + pub fn floatRem(lhs: Value, rhs: Value, allocator: *Allocator) !Value { + _ = lhs; + _ = rhs; + _ = allocator; + @panic("TODO implement Value.floatRem"); + } + + pub fn floatMod(lhs: Value, rhs: Value, allocator: *Allocator) !Value { + _ = lhs; + _ = rhs; + _ = allocator; + @panic("TODO implement Value.floatMod"); + } + pub fn intMul(lhs: Value, rhs: Value, allocator: *Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -1875,6 +1979,31 @@ pub const Value = extern union { return Tag.int_u64.create(arena, truncated); } + pub fn shl(lhs: Value, rhs: Value, allocator: *Allocator) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space); + const shift = rhs.toUnsignedInt(); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftLeft(lhs_bigint, shift); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; + + if (result_bigint.positive) { + return Value.Tag.int_big_positive.create(allocator, result_limbs); + } else { + return Value.Tag.int_big_negative.create(allocator, result_limbs); + } + } + pub fn shr(lhs: Value, rhs: Value, allocator: *Allocator) !Value { // TODO is this a performance issue? maybe we should try the operation without // resorting to BigInt first. @@ -2227,4 +2356,13 @@ pub const Value = extern union { /// are possible without using an allocator. limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, }; + + pub const zero = initTag(.zero); + pub const one = initTag(.one); + pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base }; +}; + +var negative_one_payload: Value.Payload.I64 = .{ + .base = .{ .tag = .int_i64 }, + .data = -1, }; diff --git a/test/behavior.zig b/test/behavior.zig index 4bfd947fcf..479e1feffc 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -10,6 +10,7 @@ test { _ = @import("behavior/eval.zig"); _ = @import("behavior/generics.zig"); _ = @import("behavior/if.zig"); + _ = @import("behavior/math.zig"); _ = @import("behavior/member_func.zig"); _ = @import("behavior/pointers.zig"); _ = @import("behavior/sizeof_and_typeof.zig"); @@ -119,7 +120,7 @@ test { _ = @import("behavior/incomplete_struct_param_tld.zig"); _ = @import("behavior/inttoptr.zig"); _ = @import("behavior/ir_block_deps.zig"); - _ = @import("behavior/math.zig"); + _ = @import("behavior/math_stage1.zig"); _ = @import("behavior/maximum_minimum.zig"); _ = @import("behavior/merge_error_sets.zig"); _ = @import("behavior/misc.zig"); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 7a5c31f67a..510cc3d438 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -6,171 +6,6 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const mem = std.mem; -test "division" { - try testDivision(); - comptime try testDivision(); -} -fn testDivision() !void { - try expect(div(u32, 13, 3) == 4); - try expect(div(f16, 1.0, 2.0) == 0.5); - try expect(div(f32, 1.0, 2.0) == 0.5); - - try expect(divExact(u32, 55, 11) == 5); - try expect(divExact(i32, -55, 11) == -5); - try expect(divExact(f16, 55.0, 11.0) == 5.0); - try expect(divExact(f16, -55.0, 11.0) == -5.0); - try expect(divExact(f32, 55.0, 11.0) == 5.0); - try expect(divExact(f32, -55.0, 11.0) == -5.0); - - try expect(divFloor(i32, 5, 3) == 1); - try expect(divFloor(i32, -5, 3) == -2); - try expect(divFloor(f16, 5.0, 3.0) == 1.0); - try expect(divFloor(f16, -5.0, 3.0) == -2.0); - try expect(divFloor(f32, 5.0, 3.0) == 1.0); - try expect(divFloor(f32, -5.0, 3.0) == -2.0); - try expect(divFloor(i32, -0x80000000, -2) == 0x40000000); - try expect(divFloor(i32, 0, -0x80000000) == 0); - try expect(divFloor(i32, -0x40000001, 0x40000000) == -2); - try expect(divFloor(i32, -0x80000000, 1) == -0x80000000); - try expect(divFloor(i32, 10, 12) == 0); - try expect(divFloor(i32, -14, 12) == -2); - try expect(divFloor(i32, -2, 12) == -1); - - try expect(divTrunc(i32, 5, 3) == 1); - try expect(divTrunc(i32, -5, 3) == -1); - try expect(divTrunc(f16, 5.0, 3.0) == 1.0); - try expect(divTrunc(f16, -5.0, 3.0) == -1.0); - try expect(divTrunc(f32, 5.0, 3.0) == 1.0); - try expect(divTrunc(f32, -5.0, 3.0) == -1.0); - try expect(divTrunc(f64, 5.0, 3.0) == 1.0); - try expect(divTrunc(f64, -5.0, 3.0) == -1.0); - try expect(divTrunc(i32, 10, 12) == 0); - try expect(divTrunc(i32, -14, 12) == -1); - try expect(divTrunc(i32, -2, 12) == 0); - - try expect(mod(i32, 10, 12) == 10); - try expect(mod(i32, -14, 12) == 10); - try expect(mod(i32, -2, 12) == 10); - - comptime { - try expect( - 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600, - ); - try expect( - @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600, - ); - try expect( - 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2, - ); - try expect( - @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2, - ); - try expect( - @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2, - ); - try expect( - @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2, - ); - try expect( - 4126227191251978491697987544882340798050766755606969681711 % 10 == 1, - ); - } -} -fn div(comptime T: type, a: T, b: T) T { - return a / b; -} -fn divExact(comptime T: type, a: T, b: T) T { - return @divExact(a, b); -} -fn divFloor(comptime T: type, a: T, b: T) T { - return @divFloor(a, b); -} -fn divTrunc(comptime T: type, a: T, b: T) T { - return @divTrunc(a, b); -} -fn mod(comptime T: type, a: T, b: T) T { - return @mod(a, b); -} - -test "@addWithOverflow" { - var result: u8 = undefined; - try expect(@addWithOverflow(u8, 250, 100, &result)); - try expect(!@addWithOverflow(u8, 100, 150, &result)); - try expect(result == 250); -} - -// TODO test mulWithOverflow -// TODO test subWithOverflow - -test "@shlWithOverflow" { - var result: u16 = undefined; - try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result)); - try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result)); - try expect(result == 0b1011111111111100); -} - -test "@*WithOverflow with u0 values" { - var result: u0 = undefined; - try expect(!@addWithOverflow(u0, 0, 0, &result)); - try expect(!@subWithOverflow(u0, 0, 0, &result)); - try expect(!@mulWithOverflow(u0, 0, 0, &result)); - try expect(!@shlWithOverflow(u0, 0, 0, &result)); -} - -test "@clz" { - try testClz(); - comptime try testClz(); -} - -fn testClz() !void { - try expect(@clz(u8, 0b10001010) == 0); - try expect(@clz(u8, 0b00001010) == 4); - try expect(@clz(u8, 0b00011010) == 3); - try expect(@clz(u8, 0b00000000) == 8); - try expect(@clz(u128, 0xffffffffffffffff) == 64); - try expect(@clz(u128, 0x10000000000000000) == 63); -} - -test "@clz vectors" { - try testClzVectors(); - comptime try testClzVectors(); -} - -fn testClzVectors() !void { - @setEvalBranchQuota(10_000); - try expectEqual(@clz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 0))); - try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00001010))), @splat(64, @as(u4, 4))); - try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00011010))), @splat(64, @as(u4, 3))); - try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8))); - try expectEqual(@clz(u128, @splat(64, @as(u128, 0xffffffffffffffff))), @splat(64, @as(u8, 64))); - try expectEqual(@clz(u128, @splat(64, @as(u128, 0x10000000000000000))), @splat(64, @as(u8, 63))); -} - -test "@ctz" { - try testCtz(); - comptime try testCtz(); -} - -fn testCtz() !void { - try expect(@ctz(u8, 0b10100000) == 5); - try expect(@ctz(u8, 0b10001010) == 1); - try expect(@ctz(u8, 0b00000000) == 8); - try expect(@ctz(u16, 0b00000000) == 16); -} - -test "@ctz vectors" { - try testClzVectors(); - comptime try testClzVectors(); -} - -fn testCtzVectors() !void { - @setEvalBranchQuota(10_000); - try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10100000))), @splat(64, @as(u4, 5))); - try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 1))); - try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8))); - try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16))); -} - test "assignment operators" { var i: u32 = 0; i += 5; @@ -218,686 +53,3 @@ fn testThreeExprInARow(f: bool, t: bool) !void { fn assertFalse(b: bool) !void { try expect(!b); } - -test "const number literal" { - const one = 1; - const eleven = ten + one; - - try expect(eleven == 11); -} -const ten = 10; - -test "unsigned wrapping" { - try testUnsignedWrappingEval(maxInt(u32)); - comptime try testUnsignedWrappingEval(maxInt(u32)); -} -fn testUnsignedWrappingEval(x: u32) !void { - const zero = x +% 1; - try expect(zero == 0); - const orig = zero -% 1; - try expect(orig == maxInt(u32)); -} - -test "signed wrapping" { - try testSignedWrappingEval(maxInt(i32)); - comptime try testSignedWrappingEval(maxInt(i32)); -} -fn testSignedWrappingEval(x: i32) !void { - const min_val = x +% 1; - try expect(min_val == minInt(i32)); - const max_val = min_val -% 1; - try expect(max_val == maxInt(i32)); -} - -test "signed negation wrapping" { - try testSignedNegationWrappingEval(minInt(i16)); - comptime try testSignedNegationWrappingEval(minInt(i16)); -} -fn testSignedNegationWrappingEval(x: i16) !void { - try expect(x == -32768); - const neg = -%x; - try expect(neg == -32768); -} - -test "unsigned negation wrapping" { - try testUnsignedNegationWrappingEval(1); - comptime try testUnsignedNegationWrappingEval(1); -} -fn testUnsignedNegationWrappingEval(x: u16) !void { - try expect(x == 1); - const neg = -%x; - try expect(neg == maxInt(u16)); -} - -test "unsigned 64-bit division" { - try test_u64_div(); - comptime try test_u64_div(); -} -fn test_u64_div() !void { - const result = divWithResult(1152921504606846976, 34359738365); - try expect(result.quotient == 33554432); - try expect(result.remainder == 100663296); -} -fn divWithResult(a: u64, b: u64) DivResult { - return DivResult{ - .quotient = a / b, - .remainder = a % b, - }; -} -const DivResult = struct { - quotient: u64, - remainder: u64, -}; - -test "binary not" { - try expect(comptime x: { - break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101; - }); - try expect(comptime x: { - break :x ~@as(u64, 2147483647) == 18446744071562067968; - }); - try testBinaryNot(0b1010101010101010); -} - -fn testBinaryNot(x: u16) !void { - try expect(~x == 0b0101010101010101); -} - -test "small int addition" { - var x: u2 = 0; - try expect(x == 0); - - x += 1; - try expect(x == 1); - - x += 1; - try expect(x == 2); - - x += 1; - try expect(x == 3); - - var result: @TypeOf(x) = 3; - try expect(@addWithOverflow(@TypeOf(x), x, 1, &result)); - - try expect(result == 0); -} - -test "float equality" { - const x: f64 = 0.012; - const y: f64 = x + 1.0; - - try testFloatEqualityImpl(x, y); - comptime try testFloatEqualityImpl(x, y); -} - -fn testFloatEqualityImpl(x: f64, y: f64) !void { - const y2 = x + 1.0; - try expect(y == y2); -} - -test "allow signed integer division/remainder when values are comptime known and positive or exact" { - try expect(5 / 3 == 1); - try expect(-5 / -3 == 1); - try expect(-6 / 3 == -2); - - try expect(5 % 3 == 2); - try expect(-6 % 3 == 0); -} - -test "hex float literal parsing" { - comptime try expect(0x1.0 == 1.0); -} - -test "quad hex float literal parsing in range" { - const a = 0x1.af23456789bbaaab347645365cdep+5; - const b = 0x1.dedafcff354b6ae9758763545432p-9; - const c = 0x1.2f34dd5f437e849b4baab754cdefp+4534; - const d = 0x1.edcbff8ad76ab5bf46463233214fp-435; - if (false) { - a; - b; - c; - d; - } -} - -test "quad hex float literal parsing accurate" { - const a: f128 = 0x1.1111222233334444555566667777p+0; - - // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing. - const expected: u128 = 0x3fff1111222233334444555566667777; - try expect(@bitCast(u128, a) == expected); - - // non-normalized - const b: f128 = 0x11.111222233334444555566667777p-4; - try expect(@bitCast(u128, b) == expected); - - const S = struct { - fn doTheTest() !void { - { - var f: f128 = 0x1.2eab345678439abcdefea56782346p+5; - try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234); - } - { - var f: f128 = 0x1.edcb34a235253948765432134674fp-1; - try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134674); - } - { - var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50; - try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50); - } - { - var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9; - try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568); - } - const exp2ft = [_]f64{ - 0x1.6a09e667f3bcdp-1, - 0x1.7a11473eb0187p-1, - 0x1.8ace5422aa0dbp-1, - 0x1.9c49182a3f090p-1, - 0x1.ae89f995ad3adp-1, - 0x1.c199bdd85529cp-1, - 0x1.d5818dcfba487p-1, - 0x1.ea4afa2a490dap-1, - 0x1.0000000000000p+0, - 0x1.0b5586cf9890fp+0, - 0x1.172b83c7d517bp+0, - 0x1.2387a6e756238p+0, - 0x1.306fe0a31b715p+0, - 0x1.3dea64c123422p+0, - 0x1.4bfdad5362a27p+0, - 0x1.5ab07dd485429p+0, - 0x1.8p23, - 0x1.62e430p-1, - 0x1.ebfbe0p-3, - 0x1.c6b348p-5, - 0x1.3b2c9cp-7, - 0x1.0p127, - -0x1.0p-149, - }; - - const answers = [_]u64{ - 0x3fe6a09e667f3bcd, - 0x3fe7a11473eb0187, - 0x3fe8ace5422aa0db, - 0x3fe9c49182a3f090, - 0x3feae89f995ad3ad, - 0x3fec199bdd85529c, - 0x3fed5818dcfba487, - 0x3feea4afa2a490da, - 0x3ff0000000000000, - 0x3ff0b5586cf9890f, - 0x3ff172b83c7d517b, - 0x3ff2387a6e756238, - 0x3ff306fe0a31b715, - 0x3ff3dea64c123422, - 0x3ff4bfdad5362a27, - 0x3ff5ab07dd485429, - 0x4168000000000000, - 0x3fe62e4300000000, - 0x3fcebfbe00000000, - 0x3fac6b3480000000, - 0x3f83b2c9c0000000, - 0x47e0000000000000, - 0xb6a0000000000000, - }; - - for (exp2ft) |x, i| { - try expect(@bitCast(u64, x) == answers[i]); - } - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "underscore separator parsing" { - try expect(0_0_0_0 == 0); - try expect(1_234_567 == 1234567); - try expect(001_234_567 == 1234567); - try expect(0_0_1_2_3_4_5_6_7 == 1234567); - - try expect(0b0_0_0_0 == 0); - try expect(0b1010_1010 == 0b10101010); - try expect(0b0000_1010_1010 == 0b10101010); - try expect(0b1_0_1_0_1_0_1_0 == 0b10101010); - - try expect(0o0_0_0_0 == 0); - try expect(0o1010_1010 == 0o10101010); - try expect(0o0000_1010_1010 == 0o10101010); - try expect(0o1_0_1_0_1_0_1_0 == 0o10101010); - - try expect(0x0_0_0_0 == 0); - try expect(0x1010_1010 == 0x10101010); - try expect(0x0000_1010_1010 == 0x10101010); - try expect(0x1_0_1_0_1_0_1_0 == 0x10101010); - - try expect(123_456.789_000e1_0 == 123456.789000e10); - try expect(0_1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10); - - try expect(0x1234_5678.9ABC_DEF0p-1_0 == 0x12345678.9ABCDEF0p-10); - try expect(0x1_2_3_4_5_6_7_8.9_A_B_C_D_E_F_0p-0_0_0_1_0 == 0x12345678.9ABCDEF0p-10); -} - -test "hex float literal within range" { - const a = 0x1.0p16383; - const b = 0x0.1p16387; - const c = 0x1.0p-16382; - if (false) { - a; - b; - c; - } -} - -test "truncating shift left" { - try testShlTrunc(maxInt(u16)); - comptime try testShlTrunc(maxInt(u16)); -} -fn testShlTrunc(x: u16) !void { - const shifted = x << 1; - try expect(shifted == 65534); -} - -test "truncating shift right" { - try testShrTrunc(maxInt(u16)); - comptime try testShrTrunc(maxInt(u16)); -} -fn testShrTrunc(x: u16) !void { - const shifted = x >> 1; - try expect(shifted == 32767); -} - -test "exact shift left" { - try testShlExact(0b00110101); - comptime try testShlExact(0b00110101); -} -fn testShlExact(x: u8) !void { - const shifted = @shlExact(x, 2); - try expect(shifted == 0b11010100); -} - -test "exact shift right" { - try testShrExact(0b10110100); - comptime try testShrExact(0b10110100); -} -fn testShrExact(x: u8) !void { - const shifted = @shrExact(x, 2); - try expect(shifted == 0b00101101); -} - -test "shift left/right on u0 operand" { - const S = struct { - fn doTheTest() !void { - var x: u0 = 0; - var y: u0 = 0; - try expectEqual(@as(u0, 0), x << 0); - try expectEqual(@as(u0, 0), x >> 0); - try expectEqual(@as(u0, 0), x << y); - try expectEqual(@as(u0, 0), x >> y); - try expectEqual(@as(u0, 0), @shlExact(x, 0)); - try expectEqual(@as(u0, 0), @shrExact(x, 0)); - try expectEqual(@as(u0, 0), @shlExact(x, y)); - try expectEqual(@as(u0, 0), @shrExact(x, y)); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "comptime_int addition" { - comptime { - try expect(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950); - try expect(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380); - } -} - -test "comptime_int multiplication" { - comptime { - try expect( - 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567, - ); - try expect( - 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016, - ); - } -} - -test "comptime_int shifting" { - comptime { - try expect((@as(u128, 1) << 127) == 0x80000000000000000000000000000000); - } -} - -test "comptime_int multi-limb shift and mask" { - comptime { - var a = 0xefffffffa0000001eeeeeeefaaaaaaab; - - try expect(@as(u32, a & 0xffffffff) == 0xaaaaaaab); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xeeeeeeef); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xa0000001); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xefffffff); - a >>= 32; - - try expect(a == 0); - } -} - -test "comptime_int multi-limb partial shift right" { - comptime { - var a = 0x1ffffffffeeeeeeee; - a >>= 16; - try expect(a == 0x1ffffffffeeee); - } -} - -test "xor" { - try test_xor(); - comptime try test_xor(); -} - -fn test_xor() !void { - try expect(0xFF ^ 0x00 == 0xFF); - try expect(0xF0 ^ 0x0F == 0xFF); - try expect(0xFF ^ 0xF0 == 0x0F); - try expect(0xFF ^ 0x0F == 0xF0); - try expect(0xFF ^ 0xFF == 0x00); -} - -test "comptime_int xor" { - comptime { - try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x0000000000000000FFFFFFFFFFFFFFFF); - try expect(0x0000000000000000FFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFF0000000000000000); - try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000000000000000000000000000); - try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0x00000000FFFFFFFF00000000FFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000FFFFFFFF00000000FFFFFFFF); - try expect(0x00000000FFFFFFFF00000000FFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFF00000000FFFFFFFF00000000); - } -} - -test "f128" { - try test_f128(); - comptime try test_f128(); -} - -fn make_f128(x: f128) f128 { - return x; -} - -fn test_f128() !void { - try expect(@sizeOf(f128) == 16); - try expect(make_f128(1.0) == 1.0); - try expect(make_f128(1.0) != 1.1); - try expect(make_f128(1.0) > 0.9); - try expect(make_f128(1.0) >= 0.9); - try expect(make_f128(1.0) >= 1.0); - try should_not_be_zero(1.0); -} - -fn should_not_be_zero(x: f128) !void { - try expect(x != 0.0); -} - -test "comptime float rem int" { - comptime { - var x = @as(f32, 1) % 2; - try expect(x == 1.0); - } -} - -test "remainder division" { - comptime try remdiv(f16); - comptime try remdiv(f32); - comptime try remdiv(f64); - comptime try remdiv(f128); - try remdiv(f16); - try remdiv(f64); - try remdiv(f128); -} - -fn remdiv(comptime T: type) !void { - try expect(@as(T, 1) == @as(T, 1) % @as(T, 2)); - try expect(@as(T, 1) == @as(T, 7) % @as(T, 3)); -} - -test "@sqrt" { - try testSqrt(f64, 12.0); - comptime try testSqrt(f64, 12.0); - try testSqrt(f32, 13.0); - comptime try testSqrt(f32, 13.0); - try testSqrt(f16, 13.0); - comptime try testSqrt(f16, 13.0); - - const x = 14.0; - const y = x * x; - const z = @sqrt(y); - comptime try expect(z == x); -} - -fn testSqrt(comptime T: type, x: T) !void { - try expect(@sqrt(x * x) == x); -} - -test "@fabs" { - try testFabs(f128, 12.0); - comptime try testFabs(f128, 12.0); - try testFabs(f64, 12.0); - comptime try testFabs(f64, 12.0); - try testFabs(f32, 12.0); - comptime try testFabs(f32, 12.0); - try testFabs(f16, 12.0); - comptime try testFabs(f16, 12.0); - - const x = 14.0; - const y = -x; - const z = @fabs(y); - comptime try expectEqual(x, z); -} - -fn testFabs(comptime T: type, x: T) !void { - const y = -x; - const z = @fabs(y); - try expectEqual(x, z); -} - -test "@floor" { - // FIXME: Generates a floorl function call - // testFloor(f128, 12.0); - comptime try testFloor(f128, 12.0); - try testFloor(f64, 12.0); - comptime try testFloor(f64, 12.0); - try testFloor(f32, 12.0); - comptime try testFloor(f32, 12.0); - try testFloor(f16, 12.0); - comptime try testFloor(f16, 12.0); - - const x = 14.0; - const y = x + 0.7; - const z = @floor(y); - comptime try expectEqual(x, z); -} - -fn testFloor(comptime T: type, x: T) !void { - const y = x + 0.6; - const z = @floor(y); - try expectEqual(x, z); -} - -test "@ceil" { - // FIXME: Generates a ceill function call - //testCeil(f128, 12.0); - comptime try testCeil(f128, 12.0); - try testCeil(f64, 12.0); - comptime try testCeil(f64, 12.0); - try testCeil(f32, 12.0); - comptime try testCeil(f32, 12.0); - try testCeil(f16, 12.0); - comptime try testCeil(f16, 12.0); - - const x = 14.0; - const y = x - 0.7; - const z = @ceil(y); - comptime try expectEqual(x, z); -} - -fn testCeil(comptime T: type, x: T) !void { - const y = x - 0.8; - const z = @ceil(y); - try expectEqual(x, z); -} - -test "@trunc" { - // FIXME: Generates a truncl function call - //testTrunc(f128, 12.0); - comptime try testTrunc(f128, 12.0); - try testTrunc(f64, 12.0); - comptime try testTrunc(f64, 12.0); - try testTrunc(f32, 12.0); - comptime try testTrunc(f32, 12.0); - try testTrunc(f16, 12.0); - comptime try testTrunc(f16, 12.0); - - const x = 14.0; - const y = x + 0.7; - const z = @trunc(y); - comptime try expectEqual(x, z); -} - -fn testTrunc(comptime T: type, x: T) !void { - { - const y = x + 0.8; - const z = @trunc(y); - try expectEqual(x, z); - } - - { - const y = -x - 0.8; - const z = @trunc(y); - try expectEqual(-x, z); - } -} - -test "@round" { - // FIXME: Generates a roundl function call - //testRound(f128, 12.0); - comptime try testRound(f128, 12.0); - try testRound(f64, 12.0); - comptime try testRound(f64, 12.0); - try testRound(f32, 12.0); - comptime try testRound(f32, 12.0); - try testRound(f16, 12.0); - comptime try testRound(f16, 12.0); - - const x = 14.0; - const y = x + 0.4; - const z = @round(y); - comptime try expectEqual(x, z); -} - -fn testRound(comptime T: type, x: T) !void { - const y = x - 0.5; - const z = @round(y); - try expectEqual(x, z); -} - -test "comptime_int param and return" { - const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702); - try expect(a == 137114567242441932203689521744947848950); - - const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768); - try expect(b == 985095453608931032642182098849559179469148836107390954364380); -} - -fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int { - return a + b; -} - -test "vector integer addition" { - const S = struct { - fn doTheTest() !void { - var a: std.meta.Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; - var b: std.meta.Vector(4, i32) = [_]i32{ 5, 6, 7, 8 }; - var result = a + b; - var result_array: [4]i32 = result; - const expected = [_]i32{ 6, 8, 10, 12 }; - try expectEqualSlices(i32, &expected, &result_array); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "NaN comparison" { - try testNanEqNan(f16); - try testNanEqNan(f32); - try testNanEqNan(f64); - try testNanEqNan(f128); - comptime try testNanEqNan(f16); - comptime try testNanEqNan(f32); - comptime try testNanEqNan(f64); - comptime try testNanEqNan(f128); -} - -fn testNanEqNan(comptime F: type) !void { - var nan1 = std.math.nan(F); - var nan2 = std.math.nan(F); - try expect(nan1 != nan2); - try expect(!(nan1 == nan2)); - try expect(!(nan1 > nan2)); - try expect(!(nan1 >= nan2)); - try expect(!(nan1 < nan2)); - try expect(!(nan1 <= nan2)); -} - -test "128-bit multiplication" { - var a: i128 = 3; - var b: i128 = 2; - var c = a * b; - try expect(c == 6); -} - -test "vector comparison" { - const S = struct { - fn doTheTest() !void { - var a: std.meta.Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 }; - var b: std.meta.Vector(6, i32) = [_]i32{ -1, 3, 0, 6, 10, -10 }; - try expect(mem.eql(bool, &@as([6]bool, a < b), &[_]bool{ false, false, true, true, true, false })); - try expect(mem.eql(bool, &@as([6]bool, a <= b), &[_]bool{ false, true, true, true, true, false })); - try expect(mem.eql(bool, &@as([6]bool, a == b), &[_]bool{ false, true, false, false, false, false })); - try expect(mem.eql(bool, &@as([6]bool, a != b), &[_]bool{ true, false, true, true, true, true })); - try expect(mem.eql(bool, &@as([6]bool, a > b), &[_]bool{ true, false, false, false, false, true })); - try expect(mem.eql(bool, &@as([6]bool, a >= b), &[_]bool{ true, true, false, false, false, true })); - } - }; - try S.doTheTest(); - comptime try S.doTheTest(); -} - -test "compare undefined literal with comptime_int" { - var x = undefined == 1; - // x is now undefined with type bool - x = true; - try expect(x); -} - -test "signed zeros are represented properly" { - const S = struct { - fn doTheTest() !void { - inline for ([_]type{ f16, f32, f64, f128 }) |T| { - const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); - var as_fp_val = -@as(T, 0.0); - var as_uint_val = @bitCast(ST, as_fp_val); - // Ensure the sign bit is set. - try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1); - } - } - }; - - try S.doTheTest(); - comptime try S.doTheTest(); -} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig new file mode 100644 index 0000000000..9f412930b5 --- /dev/null +++ b/test/behavior/math_stage1.zig @@ -0,0 +1,855 @@ +const std = @import("std"); +const expect = std.testing.expect; +const expectEqual = std.testing.expectEqual; +const expectEqualSlices = std.testing.expectEqualSlices; +const maxInt = std.math.maxInt; +const minInt = std.math.minInt; +const mem = std.mem; + +test "division" { + try testDivision(); + comptime try testDivision(); +} +fn testDivision() !void { + try expect(div(u32, 13, 3) == 4); + try expect(div(f16, 1.0, 2.0) == 0.5); + try expect(div(f32, 1.0, 2.0) == 0.5); + + try expect(divExact(u32, 55, 11) == 5); + try expect(divExact(i32, -55, 11) == -5); + try expect(divExact(f16, 55.0, 11.0) == 5.0); + try expect(divExact(f16, -55.0, 11.0) == -5.0); + try expect(divExact(f32, 55.0, 11.0) == 5.0); + try expect(divExact(f32, -55.0, 11.0) == -5.0); + + try expect(divFloor(i32, 5, 3) == 1); + try expect(divFloor(i32, -5, 3) == -2); + try expect(divFloor(f16, 5.0, 3.0) == 1.0); + try expect(divFloor(f16, -5.0, 3.0) == -2.0); + try expect(divFloor(f32, 5.0, 3.0) == 1.0); + try expect(divFloor(f32, -5.0, 3.0) == -2.0); + try expect(divFloor(i32, -0x80000000, -2) == 0x40000000); + try expect(divFloor(i32, 0, -0x80000000) == 0); + try expect(divFloor(i32, -0x40000001, 0x40000000) == -2); + try expect(divFloor(i32, -0x80000000, 1) == -0x80000000); + try expect(divFloor(i32, 10, 12) == 0); + try expect(divFloor(i32, -14, 12) == -2); + try expect(divFloor(i32, -2, 12) == -1); + + try expect(divTrunc(i32, 5, 3) == 1); + try expect(divTrunc(i32, -5, 3) == -1); + try expect(divTrunc(f16, 5.0, 3.0) == 1.0); + try expect(divTrunc(f16, -5.0, 3.0) == -1.0); + try expect(divTrunc(f32, 5.0, 3.0) == 1.0); + try expect(divTrunc(f32, -5.0, 3.0) == -1.0); + try expect(divTrunc(f64, 5.0, 3.0) == 1.0); + try expect(divTrunc(f64, -5.0, 3.0) == -1.0); + try expect(divTrunc(i32, 10, 12) == 0); + try expect(divTrunc(i32, -14, 12) == -1); + try expect(divTrunc(i32, -2, 12) == 0); + + try expect(mod(i32, 10, 12) == 10); + try expect(mod(i32, -14, 12) == 10); + try expect(mod(i32, -2, 12) == 10); + + comptime { + try expect( + 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600, + ); + try expect( + @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600, + ); + try expect( + 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2, + ); + try expect( + @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2, + ); + try expect( + @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2, + ); + try expect( + @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2, + ); + try expect( + 4126227191251978491697987544882340798050766755606969681711 % 10 == 1, + ); + } +} +fn div(comptime T: type, a: T, b: T) T { + return a / b; +} +fn divExact(comptime T: type, a: T, b: T) T { + return @divExact(a, b); +} +fn divFloor(comptime T: type, a: T, b: T) T { + return @divFloor(a, b); +} +fn divTrunc(comptime T: type, a: T, b: T) T { + return @divTrunc(a, b); +} +fn mod(comptime T: type, a: T, b: T) T { + return @mod(a, b); +} + +test "@addWithOverflow" { + var result: u8 = undefined; + try expect(@addWithOverflow(u8, 250, 100, &result)); + try expect(!@addWithOverflow(u8, 100, 150, &result)); + try expect(result == 250); +} + +// TODO test mulWithOverflow +// TODO test subWithOverflow + +test "@shlWithOverflow" { + var result: u16 = undefined; + try expect(@shlWithOverflow(u16, 0b0010111111111111, 3, &result)); + try expect(!@shlWithOverflow(u16, 0b0010111111111111, 2, &result)); + try expect(result == 0b1011111111111100); +} + +test "@*WithOverflow with u0 values" { + var result: u0 = undefined; + try expect(!@addWithOverflow(u0, 0, 0, &result)); + try expect(!@subWithOverflow(u0, 0, 0, &result)); + try expect(!@mulWithOverflow(u0, 0, 0, &result)); + try expect(!@shlWithOverflow(u0, 0, 0, &result)); +} + +test "@clz" { + try testClz(); + comptime try testClz(); +} + +fn testClz() !void { + try expect(@clz(u8, 0b10001010) == 0); + try expect(@clz(u8, 0b00001010) == 4); + try expect(@clz(u8, 0b00011010) == 3); + try expect(@clz(u8, 0b00000000) == 8); + try expect(@clz(u128, 0xffffffffffffffff) == 64); + try expect(@clz(u128, 0x10000000000000000) == 63); +} + +test "@clz vectors" { + try testClzVectors(); + comptime try testClzVectors(); +} + +fn testClzVectors() !void { + @setEvalBranchQuota(10_000); + try expectEqual(@clz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 0))); + try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00001010))), @splat(64, @as(u4, 4))); + try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00011010))), @splat(64, @as(u4, 3))); + try expectEqual(@clz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8))); + try expectEqual(@clz(u128, @splat(64, @as(u128, 0xffffffffffffffff))), @splat(64, @as(u8, 64))); + try expectEqual(@clz(u128, @splat(64, @as(u128, 0x10000000000000000))), @splat(64, @as(u8, 63))); +} + +test "@ctz" { + try testCtz(); + comptime try testCtz(); +} + +fn testCtz() !void { + try expect(@ctz(u8, 0b10100000) == 5); + try expect(@ctz(u8, 0b10001010) == 1); + try expect(@ctz(u8, 0b00000000) == 8); + try expect(@ctz(u16, 0b00000000) == 16); +} + +test "@ctz vectors" { + try testClzVectors(); + comptime try testClzVectors(); +} + +fn testCtzVectors() !void { + @setEvalBranchQuota(10_000); + try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10100000))), @splat(64, @as(u4, 5))); + try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b10001010))), @splat(64, @as(u4, 1))); + try expectEqual(@ctz(u8, @splat(64, @as(u8, 0b00000000))), @splat(64, @as(u4, 8))); + try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16))); +} + +test "const number literal" { + const one = 1; + const eleven = ten + one; + + try expect(eleven == 11); +} +const ten = 10; + +test "unsigned wrapping" { + try testUnsignedWrappingEval(maxInt(u32)); + comptime try testUnsignedWrappingEval(maxInt(u32)); +} +fn testUnsignedWrappingEval(x: u32) !void { + const zero = x +% 1; + try expect(zero == 0); + const orig = zero -% 1; + try expect(orig == maxInt(u32)); +} + +test "signed wrapping" { + try testSignedWrappingEval(maxInt(i32)); + comptime try testSignedWrappingEval(maxInt(i32)); +} +fn testSignedWrappingEval(x: i32) !void { + const min_val = x +% 1; + try expect(min_val == minInt(i32)); + const max_val = min_val -% 1; + try expect(max_val == maxInt(i32)); +} + +test "signed negation wrapping" { + try testSignedNegationWrappingEval(minInt(i16)); + comptime try testSignedNegationWrappingEval(minInt(i16)); +} +fn testSignedNegationWrappingEval(x: i16) !void { + try expect(x == -32768); + const neg = -%x; + try expect(neg == -32768); +} + +test "unsigned negation wrapping" { + try testUnsignedNegationWrappingEval(1); + comptime try testUnsignedNegationWrappingEval(1); +} +fn testUnsignedNegationWrappingEval(x: u16) !void { + try expect(x == 1); + const neg = -%x; + try expect(neg == maxInt(u16)); +} + +test "unsigned 64-bit division" { + try test_u64_div(); + comptime try test_u64_div(); +} +fn test_u64_div() !void { + const result = divWithResult(1152921504606846976, 34359738365); + try expect(result.quotient == 33554432); + try expect(result.remainder == 100663296); +} +fn divWithResult(a: u64, b: u64) DivResult { + return DivResult{ + .quotient = a / b, + .remainder = a % b, + }; +} +const DivResult = struct { + quotient: u64, + remainder: u64, +}; + +test "binary not" { + try expect(comptime x: { + break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101; + }); + try expect(comptime x: { + break :x ~@as(u64, 2147483647) == 18446744071562067968; + }); + try testBinaryNot(0b1010101010101010); +} + +fn testBinaryNot(x: u16) !void { + try expect(~x == 0b0101010101010101); +} + +test "small int addition" { + var x: u2 = 0; + try expect(x == 0); + + x += 1; + try expect(x == 1); + + x += 1; + try expect(x == 2); + + x += 1; + try expect(x == 3); + + var result: @TypeOf(x) = 3; + try expect(@addWithOverflow(@TypeOf(x), x, 1, &result)); + + try expect(result == 0); +} + +test "float equality" { + const x: f64 = 0.012; + const y: f64 = x + 1.0; + + try testFloatEqualityImpl(x, y); + comptime try testFloatEqualityImpl(x, y); +} + +fn testFloatEqualityImpl(x: f64, y: f64) !void { + const y2 = x + 1.0; + try expect(y == y2); +} + +test "allow signed integer division/remainder when values are comptime known and positive or exact" { + try expect(5 / 3 == 1); + try expect(-5 / -3 == 1); + try expect(-6 / 3 == -2); + + try expect(5 % 3 == 2); + try expect(-6 % 3 == 0); +} + +test "hex float literal parsing" { + comptime try expect(0x1.0 == 1.0); +} + +test "quad hex float literal parsing in range" { + const a = 0x1.af23456789bbaaab347645365cdep+5; + const b = 0x1.dedafcff354b6ae9758763545432p-9; + const c = 0x1.2f34dd5f437e849b4baab754cdefp+4534; + const d = 0x1.edcbff8ad76ab5bf46463233214fp-435; + if (false) { + a; + b; + c; + d; + } +} + +test "quad hex float literal parsing accurate" { + const a: f128 = 0x1.1111222233334444555566667777p+0; + + // implied 1 is dropped, with an exponent of 0 (0x3fff) after biasing. + const expected: u128 = 0x3fff1111222233334444555566667777; + try expect(@bitCast(u128, a) == expected); + + // non-normalized + const b: f128 = 0x11.111222233334444555566667777p-4; + try expect(@bitCast(u128, b) == expected); + + const S = struct { + fn doTheTest() !void { + { + var f: f128 = 0x1.2eab345678439abcdefea56782346p+5; + try expect(@bitCast(u128, f) == 0x40042eab345678439abcdefea5678234); + } + { + var f: f128 = 0x1.edcb34a235253948765432134674fp-1; + try expect(@bitCast(u128, f) == 0x3ffeedcb34a235253948765432134674); + } + { + var f: f128 = 0x1.353e45674d89abacc3a2ebf3ff4ffp-50; + try expect(@bitCast(u128, f) == 0x3fcd353e45674d89abacc3a2ebf3ff50); + } + { + var f: f128 = 0x1.ed8764648369535adf4be3214567fp-9; + try expect(@bitCast(u128, f) == 0x3ff6ed8764648369535adf4be3214568); + } + const exp2ft = [_]f64{ + 0x1.6a09e667f3bcdp-1, + 0x1.7a11473eb0187p-1, + 0x1.8ace5422aa0dbp-1, + 0x1.9c49182a3f090p-1, + 0x1.ae89f995ad3adp-1, + 0x1.c199bdd85529cp-1, + 0x1.d5818dcfba487p-1, + 0x1.ea4afa2a490dap-1, + 0x1.0000000000000p+0, + 0x1.0b5586cf9890fp+0, + 0x1.172b83c7d517bp+0, + 0x1.2387a6e756238p+0, + 0x1.306fe0a31b715p+0, + 0x1.3dea64c123422p+0, + 0x1.4bfdad5362a27p+0, + 0x1.5ab07dd485429p+0, + 0x1.8p23, + 0x1.62e430p-1, + 0x1.ebfbe0p-3, + 0x1.c6b348p-5, + 0x1.3b2c9cp-7, + 0x1.0p127, + -0x1.0p-149, + }; + + const answers = [_]u64{ + 0x3fe6a09e667f3bcd, + 0x3fe7a11473eb0187, + 0x3fe8ace5422aa0db, + 0x3fe9c49182a3f090, + 0x3feae89f995ad3ad, + 0x3fec199bdd85529c, + 0x3fed5818dcfba487, + 0x3feea4afa2a490da, + 0x3ff0000000000000, + 0x3ff0b5586cf9890f, + 0x3ff172b83c7d517b, + 0x3ff2387a6e756238, + 0x3ff306fe0a31b715, + 0x3ff3dea64c123422, + 0x3ff4bfdad5362a27, + 0x3ff5ab07dd485429, + 0x4168000000000000, + 0x3fe62e4300000000, + 0x3fcebfbe00000000, + 0x3fac6b3480000000, + 0x3f83b2c9c0000000, + 0x47e0000000000000, + 0xb6a0000000000000, + }; + + for (exp2ft) |x, i| { + try expect(@bitCast(u64, x) == answers[i]); + } + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "underscore separator parsing" { + try expect(0_0_0_0 == 0); + try expect(1_234_567 == 1234567); + try expect(001_234_567 == 1234567); + try expect(0_0_1_2_3_4_5_6_7 == 1234567); + + try expect(0b0_0_0_0 == 0); + try expect(0b1010_1010 == 0b10101010); + try expect(0b0000_1010_1010 == 0b10101010); + try expect(0b1_0_1_0_1_0_1_0 == 0b10101010); + + try expect(0o0_0_0_0 == 0); + try expect(0o1010_1010 == 0o10101010); + try expect(0o0000_1010_1010 == 0o10101010); + try expect(0o1_0_1_0_1_0_1_0 == 0o10101010); + + try expect(0x0_0_0_0 == 0); + try expect(0x1010_1010 == 0x10101010); + try expect(0x0000_1010_1010 == 0x10101010); + try expect(0x1_0_1_0_1_0_1_0 == 0x10101010); + + try expect(123_456.789_000e1_0 == 123456.789000e10); + try expect(0_1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10); + + try expect(0x1234_5678.9ABC_DEF0p-1_0 == 0x12345678.9ABCDEF0p-10); + try expect(0x1_2_3_4_5_6_7_8.9_A_B_C_D_E_F_0p-0_0_0_1_0 == 0x12345678.9ABCDEF0p-10); +} + +test "hex float literal within range" { + const a = 0x1.0p16383; + const b = 0x0.1p16387; + const c = 0x1.0p-16382; + if (false) { + a; + b; + c; + } +} + +test "truncating shift left" { + try testShlTrunc(maxInt(u16)); + comptime try testShlTrunc(maxInt(u16)); +} +fn testShlTrunc(x: u16) !void { + const shifted = x << 1; + try expect(shifted == 65534); +} + +test "truncating shift right" { + try testShrTrunc(maxInt(u16)); + comptime try testShrTrunc(maxInt(u16)); +} +fn testShrTrunc(x: u16) !void { + const shifted = x >> 1; + try expect(shifted == 32767); +} + +test "exact shift left" { + try testShlExact(0b00110101); + comptime try testShlExact(0b00110101); +} +fn testShlExact(x: u8) !void { + const shifted = @shlExact(x, 2); + try expect(shifted == 0b11010100); +} + +test "exact shift right" { + try testShrExact(0b10110100); + comptime try testShrExact(0b10110100); +} +fn testShrExact(x: u8) !void { + const shifted = @shrExact(x, 2); + try expect(shifted == 0b00101101); +} + +test "shift left/right on u0 operand" { + const S = struct { + fn doTheTest() !void { + var x: u0 = 0; + var y: u0 = 0; + try expectEqual(@as(u0, 0), x << 0); + try expectEqual(@as(u0, 0), x >> 0); + try expectEqual(@as(u0, 0), x << y); + try expectEqual(@as(u0, 0), x >> y); + try expectEqual(@as(u0, 0), @shlExact(x, 0)); + try expectEqual(@as(u0, 0), @shrExact(x, 0)); + try expectEqual(@as(u0, 0), @shlExact(x, y)); + try expectEqual(@as(u0, 0), @shrExact(x, y)); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "comptime_int addition" { + comptime { + try expect(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950); + try expect(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380); + } +} + +test "comptime_int multiplication" { + comptime { + try expect( + 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567, + ); + try expect( + 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016, + ); + } +} + +test "comptime_int shifting" { + comptime { + try expect((@as(u128, 1) << 127) == 0x80000000000000000000000000000000); + } +} + +test "comptime_int multi-limb shift and mask" { + comptime { + var a = 0xefffffffa0000001eeeeeeefaaaaaaab; + + try expect(@as(u32, a & 0xffffffff) == 0xaaaaaaab); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xeeeeeeef); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xa0000001); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xefffffff); + a >>= 32; + + try expect(a == 0); + } +} + +test "comptime_int multi-limb partial shift right" { + comptime { + var a = 0x1ffffffffeeeeeeee; + a >>= 16; + try expect(a == 0x1ffffffffeeee); + } +} + +test "xor" { + try test_xor(); + comptime try test_xor(); +} + +fn test_xor() !void { + try expect(0xFF ^ 0x00 == 0xFF); + try expect(0xF0 ^ 0x0F == 0xFF); + try expect(0xFF ^ 0xF0 == 0x0F); + try expect(0xFF ^ 0x0F == 0xF0); + try expect(0xFF ^ 0xFF == 0x00); +} + +test "comptime_int xor" { + comptime { + try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x0000000000000000FFFFFFFFFFFFFFFF); + try expect(0x0000000000000000FFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFF0000000000000000); + try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000000000000000000000000000); + try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0x00000000FFFFFFFF00000000FFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000FFFFFFFF00000000FFFFFFFF); + try expect(0x00000000FFFFFFFF00000000FFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFF00000000FFFFFFFF00000000); + } +} + +test "f128" { + try test_f128(); + comptime try test_f128(); +} + +fn make_f128(x: f128) f128 { + return x; +} + +fn test_f128() !void { + try expect(@sizeOf(f128) == 16); + try expect(make_f128(1.0) == 1.0); + try expect(make_f128(1.0) != 1.1); + try expect(make_f128(1.0) > 0.9); + try expect(make_f128(1.0) >= 0.9); + try expect(make_f128(1.0) >= 1.0); + try should_not_be_zero(1.0); +} + +fn should_not_be_zero(x: f128) !void { + try expect(x != 0.0); +} + +test "comptime float rem int" { + comptime { + var x = @as(f32, 1) % 2; + try expect(x == 1.0); + } +} + +test "remainder division" { + comptime try remdiv(f16); + comptime try remdiv(f32); + comptime try remdiv(f64); + comptime try remdiv(f128); + try remdiv(f16); + try remdiv(f64); + try remdiv(f128); +} + +fn remdiv(comptime T: type) !void { + try expect(@as(T, 1) == @as(T, 1) % @as(T, 2)); + try expect(@as(T, 1) == @as(T, 7) % @as(T, 3)); +} + +test "@sqrt" { + try testSqrt(f64, 12.0); + comptime try testSqrt(f64, 12.0); + try testSqrt(f32, 13.0); + comptime try testSqrt(f32, 13.0); + try testSqrt(f16, 13.0); + comptime try testSqrt(f16, 13.0); + + const x = 14.0; + const y = x * x; + const z = @sqrt(y); + comptime try expect(z == x); +} + +fn testSqrt(comptime T: type, x: T) !void { + try expect(@sqrt(x * x) == x); +} + +test "@fabs" { + try testFabs(f128, 12.0); + comptime try testFabs(f128, 12.0); + try testFabs(f64, 12.0); + comptime try testFabs(f64, 12.0); + try testFabs(f32, 12.0); + comptime try testFabs(f32, 12.0); + try testFabs(f16, 12.0); + comptime try testFabs(f16, 12.0); + + const x = 14.0; + const y = -x; + const z = @fabs(y); + comptime try expectEqual(x, z); +} + +fn testFabs(comptime T: type, x: T) !void { + const y = -x; + const z = @fabs(y); + try expectEqual(x, z); +} + +test "@floor" { + // FIXME: Generates a floorl function call + // testFloor(f128, 12.0); + comptime try testFloor(f128, 12.0); + try testFloor(f64, 12.0); + comptime try testFloor(f64, 12.0); + try testFloor(f32, 12.0); + comptime try testFloor(f32, 12.0); + try testFloor(f16, 12.0); + comptime try testFloor(f16, 12.0); + + const x = 14.0; + const y = x + 0.7; + const z = @floor(y); + comptime try expectEqual(x, z); +} + +fn testFloor(comptime T: type, x: T) !void { + const y = x + 0.6; + const z = @floor(y); + try expectEqual(x, z); +} + +test "@ceil" { + // FIXME: Generates a ceill function call + //testCeil(f128, 12.0); + comptime try testCeil(f128, 12.0); + try testCeil(f64, 12.0); + comptime try testCeil(f64, 12.0); + try testCeil(f32, 12.0); + comptime try testCeil(f32, 12.0); + try testCeil(f16, 12.0); + comptime try testCeil(f16, 12.0); + + const x = 14.0; + const y = x - 0.7; + const z = @ceil(y); + comptime try expectEqual(x, z); +} + +fn testCeil(comptime T: type, x: T) !void { + const y = x - 0.8; + const z = @ceil(y); + try expectEqual(x, z); +} + +test "@trunc" { + // FIXME: Generates a truncl function call + //testTrunc(f128, 12.0); + comptime try testTrunc(f128, 12.0); + try testTrunc(f64, 12.0); + comptime try testTrunc(f64, 12.0); + try testTrunc(f32, 12.0); + comptime try testTrunc(f32, 12.0); + try testTrunc(f16, 12.0); + comptime try testTrunc(f16, 12.0); + + const x = 14.0; + const y = x + 0.7; + const z = @trunc(y); + comptime try expectEqual(x, z); +} + +fn testTrunc(comptime T: type, x: T) !void { + { + const y = x + 0.8; + const z = @trunc(y); + try expectEqual(x, z); + } + + { + const y = -x - 0.8; + const z = @trunc(y); + try expectEqual(-x, z); + } +} + +test "@round" { + // FIXME: Generates a roundl function call + //testRound(f128, 12.0); + comptime try testRound(f128, 12.0); + try testRound(f64, 12.0); + comptime try testRound(f64, 12.0); + try testRound(f32, 12.0); + comptime try testRound(f32, 12.0); + try testRound(f16, 12.0); + comptime try testRound(f16, 12.0); + + const x = 14.0; + const y = x + 0.4; + const z = @round(y); + comptime try expectEqual(x, z); +} + +fn testRound(comptime T: type, x: T) !void { + const y = x - 0.5; + const z = @round(y); + try expectEqual(x, z); +} + +test "comptime_int param and return" { + const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702); + try expect(a == 137114567242441932203689521744947848950); + + const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768); + try expect(b == 985095453608931032642182098849559179469148836107390954364380); +} + +fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int { + return a + b; +} + +test "vector integer addition" { + const S = struct { + fn doTheTest() !void { + var a: std.meta.Vector(4, i32) = [_]i32{ 1, 2, 3, 4 }; + var b: std.meta.Vector(4, i32) = [_]i32{ 5, 6, 7, 8 }; + var result = a + b; + var result_array: [4]i32 = result; + const expected = [_]i32{ 6, 8, 10, 12 }; + try expectEqualSlices(i32, &expected, &result_array); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "NaN comparison" { + try testNanEqNan(f16); + try testNanEqNan(f32); + try testNanEqNan(f64); + try testNanEqNan(f128); + comptime try testNanEqNan(f16); + comptime try testNanEqNan(f32); + comptime try testNanEqNan(f64); + comptime try testNanEqNan(f128); +} + +fn testNanEqNan(comptime F: type) !void { + var nan1 = std.math.nan(F); + var nan2 = std.math.nan(F); + try expect(nan1 != nan2); + try expect(!(nan1 == nan2)); + try expect(!(nan1 > nan2)); + try expect(!(nan1 >= nan2)); + try expect(!(nan1 < nan2)); + try expect(!(nan1 <= nan2)); +} + +test "128-bit multiplication" { + var a: i128 = 3; + var b: i128 = 2; + var c = a * b; + try expect(c == 6); +} + +test "vector comparison" { + const S = struct { + fn doTheTest() !void { + var a: std.meta.Vector(6, i32) = [_]i32{ 1, 3, -1, 5, 7, 9 }; + var b: std.meta.Vector(6, i32) = [_]i32{ -1, 3, 0, 6, 10, -10 }; + try expect(mem.eql(bool, &@as([6]bool, a < b), &[_]bool{ false, false, true, true, true, false })); + try expect(mem.eql(bool, &@as([6]bool, a <= b), &[_]bool{ false, true, true, true, true, false })); + try expect(mem.eql(bool, &@as([6]bool, a == b), &[_]bool{ false, true, false, false, false, false })); + try expect(mem.eql(bool, &@as([6]bool, a != b), &[_]bool{ true, false, true, true, true, true })); + try expect(mem.eql(bool, &@as([6]bool, a > b), &[_]bool{ true, false, false, false, false, true })); + try expect(mem.eql(bool, &@as([6]bool, a >= b), &[_]bool{ true, true, false, false, false, true })); + } + }; + try S.doTheTest(); + comptime try S.doTheTest(); +} + +test "compare undefined literal with comptime_int" { + var x = undefined == 1; + // x is now undefined with type bool + x = true; + try expect(x); +} + +test "signed zeros are represented properly" { + const S = struct { + fn doTheTest() !void { + inline for ([_]type{ f16, f32, f64, f128 }) |T| { + const ST = std.meta.Int(.unsigned, @typeInfo(T).Float.bits); + var as_fp_val = -@as(T, 0.0); + var as_uint_val = @bitCast(ST, as_fp_val); + // Ensure the sign bit is set. + try expect(as_uint_val >> (@typeInfo(T).Float.bits - 1) == 1); + } + } + }; + + try S.doTheTest(); + comptime try S.doTheTest(); +} -- cgit v1.2.3 From 29f41896ed9d99e82a88f4b63efa182ca0d2f93c Mon Sep 17 00:00:00 2001 From: Travis Staloch Date: Thu, 2 Sep 2021 13:50:24 -0700 Subject: sat-arithmetic: add operator support - adds initial support for the operators +|, -|, *|, <<|, +|=, -|=, *|=, <<|= - uses operators in addition to builtins in behavior test - adds binOpExt() and assignBinOpExt() to AstGen.zig. these need to be audited --- lib/std/zig/Ast.zig | 32 +++++++++ lib/std/zig/parse.zig | 8 +++ lib/std/zig/render.zig | 8 +++ lib/std/zig/tokenizer.zig | 79 ++++++++++++++++++++ src/Air.zig | 22 ++++++ src/AstGen.zig | 124 +++++++++++++++++++++++++++++++- src/Liveness.zig | 4 ++ src/codegen.zig | 12 ++++ src/codegen/c.zig | 3 + src/codegen/llvm.zig | 66 ++++++++++++----- src/codegen/llvm/bindings.zig | 24 +++++++ src/print_air.zig | 4 ++ src/stage1/all_types.hpp | 16 +++-- src/stage1/astgen.cpp | 24 +++++-- src/stage1/codegen.cpp | 8 +-- src/stage1/ir.cpp | 24 +++---- src/stage1/ir_print.cpp | 8 +-- src/stage1/parser.cpp | 16 +++++ src/stage1/tokenizer.cpp | 85 ++++++++++++++++++++++ src/stage1/tokenizer.hpp | 8 +++ test/behavior/saturating_arithmetic.zig | 35 +++++++-- 21 files changed, 556 insertions(+), 54 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 5838dcd37a..3632551d17 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -396,6 +396,7 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex { .assign_add, .assign_sub, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_and, .assign_bit_xor, @@ -403,6 +404,9 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex { .assign_mul_wrap, .assign_add_wrap, .assign_sub_wrap, + .assign_mul_sat, + .assign_add_sat, + .assign_sub_sat, .assign, .merge_error_sets, .mul, @@ -410,12 +414,16 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex { .mod, .array_mult, .mul_wrap, + .mul_sat, .add, .sub, .array_cat, .add_wrap, .sub_wrap, + .add_sat, + .sub_sat, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_and, .bit_xor, @@ -652,6 +660,7 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex { .assign_add, .assign_sub, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_and, .assign_bit_xor, @@ -659,6 +668,9 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex { .assign_mul_wrap, .assign_add_wrap, .assign_sub_wrap, + .assign_mul_sat, + .assign_add_sat, + .assign_sub_sat, .assign, .merge_error_sets, .mul, @@ -666,12 +678,16 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex { .mod, .array_mult, .mul_wrap, + .mul_sat, .add, .sub, .array_cat, .add_wrap, .sub_wrap, + .add_sat, + .sub_sat, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_and, .bit_xor, @@ -2525,6 +2541,8 @@ pub const Node = struct { assign_sub, /// `lhs <<= rhs`. main_token is op. assign_bit_shift_left, + /// `lhs <<|= rhs`. main_token is op. + assign_bit_shift_left_sat, /// `lhs >>= rhs`. main_token is op. assign_bit_shift_right, /// `lhs &= rhs`. main_token is op. @@ -2539,6 +2557,12 @@ pub const Node = struct { assign_add_wrap, /// `lhs -%= rhs`. main_token is op. assign_sub_wrap, + /// `lhs *|= rhs`. main_token is op. + assign_mul_sat, + /// `lhs +|= rhs`. main_token is op. + assign_add_sat, + /// `lhs -|= rhs`. main_token is op. + assign_sub_sat, /// `lhs = rhs`. main_token is op. assign, /// `lhs || rhs`. main_token is the `||`. @@ -2553,6 +2577,8 @@ pub const Node = struct { array_mult, /// `lhs *% rhs`. main_token is the `*%`. mul_wrap, + /// `lhs *| rhs`. main_token is the `*%`. + mul_sat, /// `lhs + rhs`. main_token is the `+`. add, /// `lhs - rhs`. main_token is the `-`. @@ -2563,8 +2589,14 @@ pub const Node = struct { add_wrap, /// `lhs -% rhs`. main_token is the `-%`. sub_wrap, + /// `lhs +| rhs`. main_token is the `+|`. + add_sat, + /// `lhs -| rhs`. main_token is the `-|`. + sub_sat, /// `lhs << rhs`. main_token is the `<<`. bit_shift_left, + /// `lhs <<| rhs`. main_token is the `<<|`. + bit_shift_left_sat, /// `lhs >> rhs`. main_token is the `>>`. bit_shift_right, /// `lhs & rhs`. main_token is the `&`. diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index f7697027a3..a2780b5225 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -1269,6 +1269,7 @@ const Parser = struct { .plus_equal => .assign_add, .minus_equal => .assign_sub, .angle_bracket_angle_bracket_left_equal => .assign_bit_shift_left, + .angle_bracket_angle_bracket_left_pipe_equal => .assign_bit_shift_left_sat, .angle_bracket_angle_bracket_right_equal => .assign_bit_shift_right, .ampersand_equal => .assign_bit_and, .caret_equal => .assign_bit_xor, @@ -1276,6 +1277,9 @@ const Parser = struct { .asterisk_percent_equal => .assign_mul_wrap, .plus_percent_equal => .assign_add_wrap, .minus_percent_equal => .assign_sub_wrap, + .asterisk_pipe_equal => .assign_mul_sat, + .plus_pipe_equal => .assign_add_sat, + .minus_pipe_equal => .assign_sub_sat, .equal => .assign, else => return expr, }; @@ -1343,6 +1347,7 @@ const Parser = struct { .keyword_catch = .{ .prec = 40, .tag = .@"catch" }, .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .bit_shift_left }, + .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .bit_shift_left_sat }, .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .bit_shift_right }, .plus = .{ .prec = 60, .tag = .add }, @@ -1350,6 +1355,8 @@ const Parser = struct { .plus_plus = .{ .prec = 60, .tag = .array_cat }, .plus_percent = .{ .prec = 60, .tag = .add_wrap }, .minus_percent = .{ .prec = 60, .tag = .sub_wrap }, + .plus_pipe = .{ .prec = 60, .tag = .add_sat }, + .minus_pipe = .{ .prec = 60, .tag = .sub_sat }, .pipe_pipe = .{ .prec = 70, .tag = .merge_error_sets }, .asterisk = .{ .prec = 70, .tag = .mul }, @@ -1357,6 +1364,7 @@ const Parser = struct { .percent = .{ .prec = 70, .tag = .mod }, .asterisk_asterisk = .{ .prec = 70, .tag = .array_mult }, .asterisk_percent = .{ .prec = 70, .tag = .mul_wrap }, + .asterisk_pipe = .{ .prec = 70, .tag = .mul_sat }, }); fn parseExprPrecedence(p: *Parser, min_prec: i32) Error!Node.Index { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 3029d38cb9..47f019d1cf 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -333,26 +333,32 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, .add, .add_wrap, + .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, + .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, + .assign_add_sat, .assign_mul, .assign_mul_wrap, + .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_xor, .bool_and, @@ -367,8 +373,10 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, .mod, .mul, .mul_wrap, + .mul_sat, .sub, .sub_wrap, + .sub_sat, .@"orelse", => { const infix = datas[node]; diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 3ef6c9a6ba..6afe7750d3 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -103,15 +103,21 @@ pub const Token = struct { plus_equal, plus_percent, plus_percent_equal, + plus_pipe, + plus_pipe_equal, minus, minus_equal, minus_percent, minus_percent_equal, + minus_pipe, + minus_pipe_equal, asterisk, asterisk_equal, asterisk_asterisk, asterisk_percent, asterisk_percent_equal, + asterisk_pipe, + asterisk_pipe_equal, arrow, colon, slash, @@ -124,6 +130,8 @@ pub const Token = struct { angle_bracket_left_equal, angle_bracket_angle_bracket_left, angle_bracket_angle_bracket_left_equal, + angle_bracket_angle_bracket_left_pipe, + angle_bracket_angle_bracket_left_pipe_equal, angle_bracket_right, angle_bracket_right_equal, angle_bracket_angle_bracket_right, @@ -227,15 +235,21 @@ pub const Token = struct { .plus_equal => "+=", .plus_percent => "+%", .plus_percent_equal => "+%=", + .plus_pipe => "+|", + .plus_pipe_equal => "+|=", .minus => "-", .minus_equal => "-=", .minus_percent => "-%", .minus_percent_equal => "-%=", + .minus_pipe => "-|", + .minus_pipe_equal => "-|=", .asterisk => "*", .asterisk_equal => "*=", .asterisk_asterisk => "**", .asterisk_percent => "*%", .asterisk_percent_equal => "*%=", + .asterisk_pipe => "*|", + .asterisk_pipe_equal => "*|=", .arrow => "->", .colon => ":", .slash => "/", @@ -248,6 +262,8 @@ pub const Token = struct { .angle_bracket_left_equal => "<=", .angle_bracket_angle_bracket_left => "<<", .angle_bracket_angle_bracket_left_equal => "<<=", + .angle_bracket_angle_bracket_left_pipe => "<<|", + .angle_bracket_angle_bracket_left_pipe_equal => "<<|=", .angle_bracket_right => ">", .angle_bracket_right_equal => ">=", .angle_bracket_angle_bracket_right => ">>", @@ -352,8 +368,10 @@ pub const Tokenizer = struct { pipe, minus, minus_percent, + minus_pipe, asterisk, asterisk_percent, + asterisk_pipe, slash, line_comment_start, line_comment, @@ -382,8 +400,10 @@ pub const Tokenizer = struct { percent, plus, plus_percent, + plus_pipe, angle_bracket_left, angle_bracket_angle_bracket_left, + angle_bracket_angle_bracket_left_pipe, angle_bracket_right, angle_bracket_angle_bracket_right, period, @@ -584,6 +604,9 @@ pub const Tokenizer = struct { '%' => { state = .asterisk_percent; }, + '|' => { + state = .asterisk_pipe; + }, else => { result.tag = .asterisk; break; @@ -602,6 +625,18 @@ pub const Tokenizer = struct { }, }, + .asterisk_pipe => switch (c) { + '=' => { + result.tag = .asterisk_pipe_equal; + self.index += 1; + break; + }, + else => { + result.tag = .asterisk_pipe; + break; + }, + }, + .percent => switch (c) { '=' => { result.tag = .percent_equal; @@ -628,6 +663,9 @@ pub const Tokenizer = struct { '%' => { state = .plus_percent; }, + '|' => { + state = .plus_pipe; + }, else => { result.tag = .plus; break; @@ -646,6 +684,18 @@ pub const Tokenizer = struct { }, }, + .plus_pipe => switch (c) { + '=' => { + result.tag = .plus_pipe_equal; + self.index += 1; + break; + }, + else => { + result.tag = .plus_pipe; + break; + }, + }, + .caret => switch (c) { '=' => { result.tag = .caret_equal; @@ -903,6 +953,9 @@ pub const Tokenizer = struct { '%' => { state = .minus_percent; }, + '|' => { + state = .minus_pipe; + }, else => { result.tag = .minus; break; @@ -920,6 +973,17 @@ pub const Tokenizer = struct { break; }, }, + .minus_pipe => switch (c) { + '=' => { + result.tag = .minus_pipe_equal; + self.index += 1; + break; + }, + else => { + result.tag = .minus_pipe; + break; + }, + }, .angle_bracket_left => switch (c) { '<' => { @@ -942,12 +1006,27 @@ pub const Tokenizer = struct { self.index += 1; break; }, + '|' => { + result.tag = .angle_bracket_angle_bracket_left_pipe; + }, else => { result.tag = .angle_bracket_angle_bracket_left; break; }, }, + .angle_bracket_angle_bracket_left_pipe => switch (c) { + '=' => { + result.tag = .angle_bracket_angle_bracket_left_pipe_equal; + self.index += 1; + break; + }, + else => { + result.tag = .angle_bracket_angle_bracket_left_pipe; + break; + }, + }, + .angle_bracket_right => switch (c) { '>' => { state = .angle_bracket_angle_bracket_right; diff --git a/src/Air.zig b/src/Air.zig index b5d19127a0..b7d3938352 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -44,6 +44,11 @@ pub const Inst = struct { /// is the same as both operands. /// Uses the `bin_op` field. addwrap, + /// Saturating integer addition. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. + /// Uses the `bin_op` field. + addsat, /// Float or integer subtraction. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -54,6 +59,11 @@ pub const Inst = struct { /// is the same as both operands. /// Uses the `bin_op` field. subwrap, + /// Saturating integer subtraction. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. + /// Uses the `bin_op` field. + subsat, /// Float or integer multiplication. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -64,6 +74,11 @@ pub const Inst = struct { /// is the same as both operands. /// Uses the `bin_op` field. mulwrap, + /// Saturating integer multiplication. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. + /// Uses the `bin_op` field. + mulsat, /// Integer or float division. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -110,6 +125,9 @@ pub const Inst = struct { /// Shift left. `<<` /// Uses the `bin_op` field. shl, + /// Shift left saturating. `<<|` + /// Uses the `bin_op` field. + shl_sat, /// Bitwise XOR. `^` /// Uses the `bin_op` field. xor, @@ -568,10 +586,13 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .add, .addwrap, + .addsat, .sub, .subwrap, + .subsat, .mul, .mulwrap, + .mulsat, .div, .rem, .mod, @@ -582,6 +603,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .ptr_sub, .shr, .shl, + .shl_sat, => return air.typeOf(datas[inst].bin_op.lhs), .cmp_lt, diff --git a/src/AstGen.zig b/src/AstGen.zig index 15594ac27c..b3af3eb86b 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -318,27 +318,35 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins .assign_bit_and, .assign_bit_or, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, + .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, + .assign_add_sat, .assign_mul, .assign_mul_wrap, + .assign_mul_sat, .add, .add_wrap, + .add_sat, .sub, .sub_wrap, + .sub_sat, .mul, .mul_wrap, + .mul_sat, .div, .mod, .bit_and, .bit_or, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_xor, .bang_equal, @@ -526,6 +534,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr try assignShift(gz, scope, node, .shl); return rvalue(gz, rl, .void_value, node); }, + .assign_bit_shift_left_sat => { + try assignBinOpExt(gz, scope, node, .shl_with_saturation); + return rvalue(gz, rl, .void_value, node); + }, .assign_bit_shift_right => { try assignShift(gz, scope, node, .shr); return rvalue(gz, rl, .void_value, node); @@ -555,6 +567,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr try assignOp(gz, scope, node, .subwrap); return rvalue(gz, rl, .void_value, node); }, + .assign_sub_sat => { + try assignBinOpExt(gz, scope, node, .sub_with_saturation); + return rvalue(gz, rl, .void_value, node); + }, .assign_mod => { try assignOp(gz, scope, node, .mod_rem); return rvalue(gz, rl, .void_value, node); @@ -567,6 +583,10 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr try assignOp(gz, scope, node, .addwrap); return rvalue(gz, rl, .void_value, node); }, + .assign_add_sat => { + try assignBinOpExt(gz, scope, node, .add_with_saturation); + return rvalue(gz, rl, .void_value, node); + }, .assign_mul => { try assignOp(gz, scope, node, .mul); return rvalue(gz, rl, .void_value, node); @@ -575,17 +595,25 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr try assignOp(gz, scope, node, .mulwrap); return rvalue(gz, rl, .void_value, node); }, + .assign_mul_sat => { + try assignBinOpExt(gz, scope, node, .mul_with_saturation); + return rvalue(gz, rl, .void_value, node); + }, // zig fmt: off - .bit_shift_left => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), - .bit_shift_right => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), + .bit_shift_left => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), + .bit_shift_left_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl_with_saturation), + .bit_shift_right => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), .add => return simpleBinOp(gz, scope, rl, node, .add), .add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap), + .add_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .add_with_saturation), .sub => return simpleBinOp(gz, scope, rl, node, .sub), .sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap), + .sub_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .sub_with_saturation), .mul => return simpleBinOp(gz, scope, rl, node, .mul), .mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap), + .mul_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .mul_with_saturation), .div => return simpleBinOp(gz, scope, rl, node, .div), .mod => return simpleBinOp(gz, scope, rl, node, .mod_rem), .bit_and => { @@ -2685,6 +2713,31 @@ fn assignOp( _ = try gz.addBin(.store, lhs_ptr, result); } +// TODO: is there an existing method to accomplish this? +// TODO: likely rename this to indicate rhs type coercion or add more params to make it more general +fn assignBinOpExt( + gz: *GenZir, + scope: *Scope, + infix_node: Ast.Node.Index, + op_inst_tag: Zir.Inst.Extended, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .coerced_ty = lhs_type }, node_datas[infix_node].rhs); + const result = try gz.addExtendedPayload(op_inst_tag, Zir.Inst.BinNode{ + .node = gz.nodeIndexToRelative(infix_node), + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addBin(.store, lhs_ptr, result); +} + fn assignShift( gz: *GenZir, scope: *Scope, @@ -2708,6 +2761,29 @@ fn assignShift( _ = try gz.addBin(.store, lhs_ptr, result); } +fn assignShiftSat( + gz: *GenZir, + scope: *Scope, + infix_node: ast.Node.Index, + op_inst_tag: Zir.Inst.Tag, +) InnerError!void { + try emitDbgNode(gz, infix_node); + const astgen = gz.astgen; + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); + const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); + const rhs_type = try gz.addUnNode(.typeof, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .ty = rhs_type }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + _ = try gz.addBin(.store, lhs_ptr, result); +} + fn boolNot(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; const tree = astgen.tree; @@ -7827,6 +7903,26 @@ fn shiftOp( return rvalue(gz, rl, result, node); } +// TODO: is there an existing way to do this? +// TODO: likely rename this to reflect result_loc == .none or add more params to make it more general +fn binOpExt( + gz: *GenZir, + scope: *Scope, + rl: ResultLoc, + node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Extended, +) InnerError!Zir.Inst.Ref { + const lhs = try expr(gz, scope, .none, lhs_node); + const rhs = try expr(gz, scope, .none, rhs_node); + const result = try gz.addExtendedPayload(tag, Zir.Inst.Bin{ + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, rl, result, node); +} + fn cImport( gz: *GenZir, scope: *Scope, @@ -8119,26 +8215,32 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool .asm_simple, .add, .add_wrap, + .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, + .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, + .assign_add_sat, .assign_mul, .assign_mul_wrap, + .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_xor, .bool_and, @@ -8154,10 +8256,12 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool .mod, .mul, .mul_wrap, + .mul_sat, .switch_range, .field_access, .sub, .sub_wrap, + .sub_sat, .slice, .slice_open, .slice_sentinel, @@ -8352,26 +8456,32 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never .tagged_union_enum_tag_trailing, .add, .add_wrap, + .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, + .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, + .assign_add_sat, .assign_mul, .assign_mul_wrap, + .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_xor, .bool_and, @@ -8387,9 +8497,11 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never .mod, .mul, .mul_wrap, + .mul_sat, .switch_range, .sub, .sub_wrap, + .sub_sat, .slice, .slice_open, .slice_sentinel, @@ -8524,26 +8636,32 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .asm_simple, .add, .add_wrap, + .add_sat, .array_cat, .array_mult, .assign, .assign_bit_and, .assign_bit_or, .assign_bit_shift_left, + .assign_bit_shift_left_sat, .assign_bit_shift_right, .assign_bit_xor, .assign_div, .assign_sub, .assign_sub_wrap, + .assign_sub_sat, .assign_mod, .assign_add, .assign_add_wrap, + .assign_add_sat, .assign_mul, .assign_mul_wrap, + .assign_mul_sat, .bang_equal, .bit_and, .bit_or, .bit_shift_left, + .bit_shift_left_sat, .bit_shift_right, .bit_xor, .bool_and, @@ -8559,10 +8677,12 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .mod, .mul, .mul_wrap, + .mul_sat, .switch_range, .field_access, .sub, .sub_wrap, + .sub_sat, .slice, .slice_open, .slice_sentinel, diff --git a/src/Liveness.zig b/src/Liveness.zig index 25dd29b0f6..c34153b76f 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -226,10 +226,13 @@ fn analyzeInst( switch (inst_tags[inst]) { .add, .addwrap, + .addsat, .sub, .subwrap, + .subsat, .mul, .mulwrap, + .mulsat, .div, .rem, .mod, @@ -252,6 +255,7 @@ fn analyzeInst( .ptr_elem_val, .ptr_ptr_elem_val, .shl, + .shl_sat, .shr, .atomic_store_unordered, .atomic_store_monotonic, diff --git a/src/codegen.zig b/src/codegen.zig index 7c359e90c0..a1f812388f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -826,10 +826,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // zig fmt: off .add, .ptr_add => try self.airAdd(inst), .addwrap => try self.airAddWrap(inst), + .addsat => try self.airArithmeticOpSat(inst, "addsat"), .sub, .ptr_sub => try self.airSub(inst), .subwrap => try self.airSubWrap(inst), + .subsat => try self.airArithmeticOpSat(inst, "subsat"), .mul => try self.airMul(inst), .mulwrap => try self.airMulWrap(inst), + .mulsat => try self.airArithmeticOpSat(inst, "mulsat"), .div => try self.airDiv(inst), .rem => try self.airRem(inst), .mod => try self.airMod(inst), @@ -848,6 +851,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .xor => try self.airXor(inst), .shr => try self.airShr(inst), .shl => try self.airShl(inst), + .shl_sat => try self.airArithmeticOpSat(inst, "shl_sat"), .alloc => try self.airAlloc(inst), .arg => try self.airArg(inst), @@ -1320,6 +1324,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airArithmeticOpSat(self: *Self, inst: Air.Inst.Index, comptime name: []const u8) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement " ++ name ++ " for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn airMul(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4964f17cd3..dce0c10b4c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -885,14 +885,17 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO // that wrapping is UB. .add, .ptr_add => try airBinOp( f, inst, " + "), .addwrap => try airWrapOp(f, inst, " + ", "addw_"), + .addsat => return o.dg.fail("TODO: C backend: implement codegen for addsat", .{}), // TODO use a different strategy for sub that communicates to the optimizer // that wrapping is UB. .sub, .ptr_sub => try airBinOp( f, inst, " - "), .subwrap => try airWrapOp(f, inst, " - ", "subw_"), + .subsat => return o.dg.fail("TODO: C backend: implement codegen for subsat", .{}), // TODO use a different strategy for mul that communicates to the optimizer // that wrapping is UB. .mul => try airBinOp( f, inst, " * "), .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"), + .mulsat => return o.dg.fail("TODO: C backend: implement codegen for mulsat", .{}), // TODO use a different strategy for div that communicates to the optimizer // that wrapping is UB. .div => try airBinOp( f, inst, " / "), diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index f65e0f6d72..c21985d1c5 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1236,12 +1236,15 @@ pub const FuncGen = struct { for (body) |inst| { const opt_value: ?*const llvm.Value = switch (air_tags[inst]) { // zig fmt: off - .add => try self.airAdd(inst, false), - .addwrap => try self.airAdd(inst, true), - .sub => try self.airSub(inst, false), - .subwrap => try self.airSub(inst, true), - .mul => try self.airMul(inst, false), - .mulwrap => try self.airMul(inst, true), + .add => try self.airAdd(inst, .standard), + .addwrap => try self.airAdd(inst, .wrapping), + .addsat => try self.airAdd(inst, .saturated), + .sub => try self.airSub(inst, .standard), + .subwrap => try self.airSub(inst, .wrapping), + .subsat => try self.airSub(inst, .saturated), + .mul => try self.airMul(inst, .standard), + .mulwrap => try self.airMul(inst, .wrapping), + .mulsat => try self.airMul(inst, .saturated), .div => try self.airDiv(inst), .rem => try self.airRem(inst), .mod => try self.airMod(inst), @@ -1252,7 +1255,8 @@ pub const FuncGen = struct { .bit_or, .bool_or => try self.airOr(inst), .xor => try self.airXor(inst), - .shl => try self.airShl(inst), + .shl => try self.airShl(inst, false), + .shl_sat => try self.airShl(inst, true), .shr => try self.airShr(inst), .cmp_eq => try self.airCmp(inst, .eq), @@ -2024,7 +2028,8 @@ pub const FuncGen = struct { return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{}); } - fn airAdd(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value { + const ArithmeticType = enum { standard, wrapping, saturated }; + fn airAdd(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -2033,13 +2038,20 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isRuntimeFloat()) return self.builder.buildFAdd(lhs, rhs, ""); - if (wrap) return self.builder.buildAdd(lhs, rhs, ""); + if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, ""); + if (ty == .wrapping) + return self.builder.buildAdd(lhs, rhs, "") + else if (ty == .saturated) { + if (inst_ty.isSignedInt()) + return self.builder.buildSAddSat(lhs, rhs, "") + else + return self.builder.buildUAddSat(lhs, rhs, ""); + } if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } - fn airSub(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value { + fn airSub(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -2048,13 +2060,20 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isRuntimeFloat()) return self.builder.buildFSub(lhs, rhs, ""); - if (wrap) return self.builder.buildSub(lhs, rhs, ""); + if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, ""); + if (ty == .wrapping) + return self.builder.buildSub(lhs, rhs, "") + else if (ty == .saturated) { + if (inst_ty.isSignedInt()) + return self.builder.buildSSubSat(lhs, rhs, "") + else + return self.builder.buildUSubSat(lhs, rhs, ""); + } if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } - fn airMul(self: *FuncGen, inst: Air.Inst.Index, wrap: bool) !?*const llvm.Value { + fn airMul(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -2063,8 +2082,15 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isRuntimeFloat()) return self.builder.buildFMul(lhs, rhs, ""); - if (wrap) return self.builder.buildMul(lhs, rhs, ""); + if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, ""); + if (ty == .wrapping) + return self.builder.buildMul(lhs, rhs, "") + else if (ty == .saturated) { + if (inst_ty.isSignedInt()) + return self.builder.buildSMulFixSat(lhs, rhs, "") + else + return self.builder.buildUMulFixSat(lhs, rhs, ""); + } if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } @@ -2174,7 +2200,7 @@ pub const FuncGen = struct { return self.builder.buildXor(lhs, rhs, ""); } - fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + fn airShl(self: *FuncGen, inst: Air.Inst.Index, sat: bool) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; @@ -2186,6 +2212,12 @@ pub const FuncGen = struct { self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; + if (sat) { + return if (lhs_type.isSignedInt()) + self.builder.buildSShlSat(lhs, casted_rhs, "") + else + self.builder.buildUShlSat(lhs, casted_rhs, ""); + } return self.builder.buildShl(lhs, casted_rhs, ""); } diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 9d32682260..178c381235 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -397,6 +397,12 @@ pub const Builder = opaque { pub const buildNUWAdd = LLVMBuildNUWAdd; extern fn LLVMBuildNUWAdd(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildSAddSat = ZigLLVMBuildSAddSat; + extern fn ZigLLVMBuildSAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + + pub const buildUAddSat = ZigLLVMBuildUAddSat; + extern fn ZigLLVMBuildUAddSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildFSub = LLVMBuildFSub; extern fn LLVMBuildFSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; @@ -409,6 +415,12 @@ pub const Builder = opaque { pub const buildNUWSub = LLVMBuildNUWSub; extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildSSubSat = ZigLLVMBuildSSubSat; + extern fn ZigLLVMBuildSSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + + pub const buildUSubSat = ZigLLVMBuildUSubSat; + extern fn ZigLLVMBuildUSubSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildFMul = LLVMBuildFMul; extern fn LLVMBuildFMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; @@ -421,6 +433,12 @@ pub const Builder = opaque { pub const buildNUWMul = LLVMBuildNUWMul; extern fn LLVMBuildNUWMul(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildSMulFixSat = ZigLLVMBuildSMulFixSat; + extern fn ZigLLVMBuildSMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + + pub const buildUMulFixSat = ZigLLVMBuildUMulFixSat; + extern fn ZigLLVMBuildUMulFixSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildUDiv = LLVMBuildUDiv; extern fn LLVMBuildUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; @@ -451,6 +469,12 @@ pub const Builder = opaque { pub const buildShl = LLVMBuildShl; extern fn LLVMBuildShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildSShlSat = ZigLLVMBuildSShlSat; + extern fn ZigLLVMBuildSShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + + pub const buildUShlSat = ZigLLVMBuildUShlSat; + extern fn ZigLLVMBuildUShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildOr = LLVMBuildOr; extern fn LLVMBuildOr(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; diff --git a/src/print_air.zig b/src/print_air.zig index 90df06760b..7d178b52f3 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -104,10 +104,13 @@ const Writer = struct { .add, .addwrap, + .addsat, .sub, .subwrap, + .subsat, .mul, .mulwrap, + .mulsat, .div, .rem, .mod, @@ -130,6 +133,7 @@ const Writer = struct { .ptr_elem_val, .ptr_ptr_elem_val, .shl, + .shl_sat, .shr, .set_union_tag, => try w.writeBinOp(s, inst), diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index 13c37fc839..e31a7015b0 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -812,14 +812,18 @@ enum BinOpType { BinOpTypeInvalid, BinOpTypeAssign, BinOpTypeAssignTimes, + BinOpTypeAssignTimesSat, BinOpTypeAssignTimesWrap, BinOpTypeAssignDiv, BinOpTypeAssignMod, BinOpTypeAssignPlus, + BinOpTypeAssignPlusSat, BinOpTypeAssignPlusWrap, BinOpTypeAssignMinus, + BinOpTypeAssignMinusSat, BinOpTypeAssignMinusWrap, BinOpTypeAssignBitShiftLeft, + BinOpTypeAssignBitShiftLeftSat, BinOpTypeAssignBitShiftRight, BinOpTypeAssignBitAnd, BinOpTypeAssignBitXor, @@ -836,12 +840,16 @@ enum BinOpType { BinOpTypeBinXor, BinOpTypeBinAnd, BinOpTypeBitShiftLeft, + BinOpTypeBitShiftLeftSat, BinOpTypeBitShiftRight, BinOpTypeAdd, + BinOpTypeAddSat, BinOpTypeAddWrap, BinOpTypeSub, + BinOpTypeSubSat, BinOpTypeSubWrap, BinOpTypeMult, + BinOpTypeMultSat, BinOpTypeMultWrap, BinOpTypeDiv, BinOpTypeMod, @@ -2958,10 +2966,10 @@ enum IrBinOp { IrBinOpArrayMult, IrBinOpMaximum, IrBinOpMinimum, - IrBinOpSatAdd, - IrBinOpSatSub, - IrBinOpSatMul, - IrBinOpSatShl, + IrBinOpAddSat, + IrBinOpSubSat, + IrBinOpMultSat, + IrBinOpShlSat, }; struct Stage1ZirInstBinOp { diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp index 9e5d9da9ee..14808dd0a2 100644 --- a/src/stage1/astgen.cpp +++ b/src/stage1/astgen.cpp @@ -3672,6 +3672,8 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMult), lval, result_loc); case BinOpTypeAssignTimesWrap: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMultWrap), lval, result_loc); + case BinOpTypeAssignTimesSat: + return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpMultSat), lval, result_loc); case BinOpTypeAssignDiv: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpDivUnspecified), lval, result_loc); case BinOpTypeAssignMod: @@ -3680,12 +3682,18 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAdd), lval, result_loc); case BinOpTypeAssignPlusWrap: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAddWrap), lval, result_loc); + case BinOpTypeAssignPlusSat: + return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpAddSat), lval, result_loc); case BinOpTypeAssignMinus: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSub), lval, result_loc); case BinOpTypeAssignMinusWrap: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSubWrap), lval, result_loc); + case BinOpTypeAssignMinusSat: + return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpSubSat), lval, result_loc); case BinOpTypeAssignBitShiftLeft: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc); + case BinOpTypeAssignBitShiftLeftSat: + return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpShlSat), lval, result_loc); case BinOpTypeAssignBitShiftRight: return ir_lval_wrap(ag, scope, astgen_assign_op(ag, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc); case BinOpTypeAssignBitAnd: @@ -3718,20 +3726,28 @@ static Stage1ZirInst *astgen_bin_op(Stage1AstGen *ag, Scope *scope, AstNode *nod return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBinAnd), lval, result_loc); case BinOpTypeBitShiftLeft: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBitShiftLeftLossy), lval, result_loc); + case BinOpTypeBitShiftLeftSat: + return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpShlSat), lval, result_loc); case BinOpTypeBitShiftRight: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpBitShiftRightLossy), lval, result_loc); case BinOpTypeAdd: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAdd), lval, result_loc); case BinOpTypeAddWrap: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAddWrap), lval, result_loc); + case BinOpTypeAddSat: + return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpAddSat), lval, result_loc); case BinOpTypeSub: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSub), lval, result_loc); case BinOpTypeSubWrap: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSubWrap), lval, result_loc); + case BinOpTypeSubSat: + return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpSubSat), lval, result_loc); case BinOpTypeMult: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMult), lval, result_loc); case BinOpTypeMultWrap: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMultWrap), lval, result_loc); + case BinOpTypeMultSat: + return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpMultSat), lval, result_loc); case BinOpTypeDiv: return ir_lval_wrap(ag, scope, astgen_bin_op_id(ag, scope, node, IrBinOpDivUnspecified), lval, result_loc); case BinOpTypeMod: @@ -4716,7 +4732,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast if (arg1_value == ag->codegen->invalid_inst_src) return arg1_value; - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatAdd, arg0_value, arg1_value, true); + Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpAddSat, arg0_value, arg1_value, true); return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); } case BuiltinFnIdSatSub: @@ -4731,7 +4747,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast if (arg1_value == ag->codegen->invalid_inst_src) return arg1_value; - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatSub, arg0_value, arg1_value, true); + Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSubSat, arg0_value, arg1_value, true); return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); } case BuiltinFnIdSatMul: @@ -4746,7 +4762,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast if (arg1_value == ag->codegen->invalid_inst_src) return arg1_value; - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatMul, arg0_value, arg1_value, true); + Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMultSat, arg0_value, arg1_value, true); return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); } case BuiltinFnIdSatShl: @@ -4761,7 +4777,7 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast if (arg1_value == ag->codegen->invalid_inst_src) return arg1_value; - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSatShl, arg0_value, arg1_value, true); + Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpShlSat, arg0_value, arg1_value, true); return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); } case BuiltinFnIdMemcpy: diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index f84847a9fe..eade843354 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -3333,7 +3333,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, } else { zig_unreachable(); } - case IrBinOpSatAdd: + case IrBinOpAddSat: if (scalar_type->id == ZigTypeIdInt) { if (scalar_type->data.integral.is_signed) { return ZigLLVMBuildSAddSat(g->builder, op1_value, op2_value, ""); @@ -3343,7 +3343,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, } else { zig_unreachable(); } - case IrBinOpSatSub: + case IrBinOpSubSat: if (scalar_type->id == ZigTypeIdInt) { if (scalar_type->data.integral.is_signed) { return ZigLLVMBuildSSubSat(g->builder, op1_value, op2_value, ""); @@ -3353,7 +3353,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, } else { zig_unreachable(); } - case IrBinOpSatMul: + case IrBinOpMultSat: if (scalar_type->id == ZigTypeIdInt) { if (scalar_type->data.integral.is_signed) { return ZigLLVMBuildSMulFixSat(g->builder, op1_value, op2_value, ""); @@ -3363,7 +3363,7 @@ static LLVMValueRef ir_render_bin_op(CodeGen *g, Stage1Air *executable, } else { zig_unreachable(); } - case IrBinOpSatShl: + case IrBinOpShlSat: if (scalar_type->id == ZigTypeIdInt) { if (scalar_type->data.integral.is_signed) { return ZigLLVMBuildSShlSat(g->builder, op1_value, op2_value, ""); diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index b853961beb..2f2cfe08f3 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -9820,28 +9820,28 @@ static ErrorMsg *ir_eval_math_op_scalar(IrAnalyze *ira, Scope *scope, AstNode *s float_min(out_val, op1_val, op2_val); } break; - case IrBinOpSatAdd: + case IrBinOpAddSat: if (is_int) { bigint_add_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed); } else { zig_unreachable(); } break; - case IrBinOpSatSub: + case IrBinOpSubSat: if (is_int) { bigint_sub_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed); } else { zig_unreachable(); } break; - case IrBinOpSatMul: + case IrBinOpMultSat: if (is_int) { bigint_mul_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed); } else { zig_unreachable(); } break; - case IrBinOpSatShl: + case IrBinOpShlSat: if (is_int) { bigint_shl_sat(&out_val->data.x_bigint, &op1_val->data.x_bigint, &op2_val->data.x_bigint, type_entry->data.integral.bit_count, type_entry->data.integral.is_signed); } else { @@ -10069,10 +10069,10 @@ static bool ok_float_op(IrBinOp op) { case IrBinOpBitShiftRightExact: case IrBinOpAddWrap: case IrBinOpSubWrap: - case IrBinOpSatAdd: - case IrBinOpSatSub: - case IrBinOpSatMul: - case IrBinOpSatShl: + case IrBinOpAddSat: + case IrBinOpSubSat: + case IrBinOpMultSat: + case IrBinOpShlSat: case IrBinOpMultWrap: case IrBinOpArrayCat: case IrBinOpArrayMult: @@ -11046,10 +11046,10 @@ static Stage1AirInst *ir_analyze_instruction_bin_op(IrAnalyze *ira, Stage1ZirIns case IrBinOpRemMod: case IrBinOpMaximum: case IrBinOpMinimum: - case IrBinOpSatAdd: - case IrBinOpSatSub: - case IrBinOpSatMul: - case IrBinOpSatShl: + case IrBinOpAddSat: + case IrBinOpSubSat: + case IrBinOpMultSat: + case IrBinOpShlSat: return ir_analyze_bin_op_math(ira, bin_op_instruction); case IrBinOpArrayCat: return ir_analyze_array_cat(ira, bin_op_instruction); diff --git a/src/stage1/ir_print.cpp b/src/stage1/ir_print.cpp index a76d3e4d5a..f92f146d84 100644 --- a/src/stage1/ir_print.cpp +++ b/src/stage1/ir_print.cpp @@ -737,13 +737,13 @@ static const char *ir_bin_op_id_str(IrBinOp op_id) { return "@maximum"; case IrBinOpMinimum: return "@minimum"; - case IrBinOpSatAdd: + case IrBinOpAddSat: return "@addWithSaturation"; - case IrBinOpSatSub: + case IrBinOpSubSat: return "@subWithSaturation"; - case IrBinOpSatMul: + case IrBinOpMultSat: return "@mulWithSaturation"; - case IrBinOpSatShl: + case IrBinOpShlSat: return "@shlWithSaturation"; } zig_unreachable(); diff --git a/src/stage1/parser.cpp b/src/stage1/parser.cpp index f7061bb232..fdc0777aff 100644 --- a/src/stage1/parser.cpp +++ b/src/stage1/parser.cpp @@ -2381,6 +2381,7 @@ static AstNode *ast_parse_switch_item(ParseContext *pc) { // / PLUSEQUAL // / MINUSEQUAL // / LARROW2EQUAL +// / LARROW2PIPEEQUAL // / RARROW2EQUAL // / AMPERSANDEQUAL // / CARETEQUAL @@ -2388,6 +2389,9 @@ static AstNode *ast_parse_switch_item(ParseContext *pc) { // / ASTERISKPERCENTEQUAL // / PLUSPERCENTEQUAL // / MINUSPERCENTEQUAL +// / ASTERISKPIPEEQUAL +// / PLUSPIPEEQUAL +// / MINUSPIPEEQUAL // / EQUAL static AstNode *ast_parse_assign_op(ParseContext *pc) { // In C, we have `T arr[N] = {[i] = T{}};` but it doesn't @@ -2396,17 +2400,21 @@ static AstNode *ast_parse_assign_op(ParseContext *pc) { table[TokenIdBitAndEq] = BinOpTypeAssignBitAnd; table[TokenIdBitOrEq] = BinOpTypeAssignBitOr; table[TokenIdBitShiftLeftEq] = BinOpTypeAssignBitShiftLeft; + table[TokenIdBitShiftLeftPipeEq] = BinOpTypeAssignBitShiftLeftSat; table[TokenIdBitShiftRightEq] = BinOpTypeAssignBitShiftRight; table[TokenIdBitXorEq] = BinOpTypeAssignBitXor; table[TokenIdDivEq] = BinOpTypeAssignDiv; table[TokenIdEq] = BinOpTypeAssign; table[TokenIdMinusEq] = BinOpTypeAssignMinus; table[TokenIdMinusPercentEq] = BinOpTypeAssignMinusWrap; + table[TokenIdMinusPipeEq] = BinOpTypeAssignMinusSat; table[TokenIdModEq] = BinOpTypeAssignMod; table[TokenIdPlusEq] = BinOpTypeAssignPlus; table[TokenIdPlusPercentEq] = BinOpTypeAssignPlusWrap; + table[TokenIdPlusPipeEq] = BinOpTypeAssignPlusSat; table[TokenIdTimesEq] = BinOpTypeAssignTimes; table[TokenIdTimesPercentEq] = BinOpTypeAssignTimesWrap; + table[TokenIdTimesPipeEq] = BinOpTypeAssignTimesSat; BinOpType op = table[pc->token_ids[pc->current_token]]; if (op != BinOpTypeInvalid) { @@ -2483,10 +2491,12 @@ static AstNode *ast_parse_bitwise_op(ParseContext *pc) { // BitShiftOp // <- LARROW2 +// / LARROW2PIPE // / RARROW2 static AstNode *ast_parse_bit_shift_op(ParseContext *pc) { BinOpType table[TokenIdCount] = {}; table[TokenIdBitShiftLeft] = BinOpTypeBitShiftLeft; + table[TokenIdBitShiftLeftPipe] = BinOpTypeBitShiftLeftSat; table[TokenIdBitShiftRight] = BinOpTypeBitShiftRight; BinOpType op = table[pc->token_ids[pc->current_token]]; @@ -2506,6 +2516,8 @@ static AstNode *ast_parse_bit_shift_op(ParseContext *pc) { // / PLUS2 // / PLUSPERCENT // / MINUSPERCENT +// / PLUSPIPE +// / MINUSPIPE static AstNode *ast_parse_addition_op(ParseContext *pc) { BinOpType table[TokenIdCount] = {}; table[TokenIdPlus] = BinOpTypeAdd; @@ -2513,6 +2525,8 @@ static AstNode *ast_parse_addition_op(ParseContext *pc) { table[TokenIdPlusPlus] = BinOpTypeArrayCat; table[TokenIdPlusPercent] = BinOpTypeAddWrap; table[TokenIdMinusPercent] = BinOpTypeSubWrap; + table[TokenIdPlusPipe] = BinOpTypeAddSat; + table[TokenIdMinusPipe] = BinOpTypeSubSat; BinOpType op = table[pc->token_ids[pc->current_token]]; if (op != BinOpTypeInvalid) { @@ -2532,6 +2546,7 @@ static AstNode *ast_parse_addition_op(ParseContext *pc) { // / PERCENT // / ASTERISK2 // / ASTERISKPERCENT +// / ASTERISKPIPE static AstNode *ast_parse_multiply_op(ParseContext *pc) { BinOpType table[TokenIdCount] = {}; table[TokenIdBarBar] = BinOpTypeMergeErrorSets; @@ -2540,6 +2555,7 @@ static AstNode *ast_parse_multiply_op(ParseContext *pc) { table[TokenIdPercent] = BinOpTypeMod; table[TokenIdStarStar] = BinOpTypeArrayMult; table[TokenIdTimesPercent] = BinOpTypeMultWrap; + table[TokenIdTimesPipe] = BinOpTypeMultSat; BinOpType op = table[pc->token_ids[pc->current_token]]; if (op != BinOpTypeInvalid) { diff --git a/src/stage1/tokenizer.cpp b/src/stage1/tokenizer.cpp index f10579c966..3560193927 100644 --- a/src/stage1/tokenizer.cpp +++ b/src/stage1/tokenizer.cpp @@ -226,8 +226,10 @@ enum TokenizeState { TokenizeState_pipe, TokenizeState_minus, TokenizeState_minus_percent, + TokenizeState_minus_pipe, TokenizeState_asterisk, TokenizeState_asterisk_percent, + TokenizeState_asterisk_pipe, TokenizeState_slash, TokenizeState_line_comment_start, TokenizeState_line_comment, @@ -257,8 +259,10 @@ enum TokenizeState { TokenizeState_percent, TokenizeState_plus, TokenizeState_plus_percent, + TokenizeState_plus_pipe, TokenizeState_angle_bracket_left, TokenizeState_angle_bracket_angle_bracket_left, + TokenizeState_angle_bracket_angle_bracket_left_pipe, TokenizeState_angle_bracket_right, TokenizeState_angle_bracket_angle_bracket_right, TokenizeState_period, @@ -548,6 +552,9 @@ void tokenize(const char *source, Tokenization *out) { case '%': t.state = TokenizeState_asterisk_percent; break; + case '|': + t.state = TokenizeState_asterisk_pipe; + break; default: t.state = TokenizeState_start; continue; @@ -568,6 +575,21 @@ void tokenize(const char *source, Tokenization *out) { continue; } break; + case TokenizeState_asterisk_pipe: + switch (c) { + case 0: + t.out->ids.last() = TokenIdTimesPipe; + goto eof; + case '=': + t.out->ids.last() = TokenIdTimesPipeEq; + t.state = TokenizeState_start; + break; + default: + t.out->ids.last() = TokenIdTimesPipe; + t.state = TokenizeState_start; + continue; + } + break; case TokenizeState_percent: switch (c) { case 0: @@ -596,6 +618,9 @@ void tokenize(const char *source, Tokenization *out) { case '%': t.state = TokenizeState_plus_percent; break; + case '|': + t.state = TokenizeState_plus_pipe; + break; default: t.state = TokenizeState_start; continue; @@ -616,6 +641,21 @@ void tokenize(const char *source, Tokenization *out) { continue; } break; + case TokenizeState_plus_pipe: + switch (c) { + case 0: + t.out->ids.last() = TokenIdPlusPipe; + goto eof; + case '=': + t.out->ids.last() = TokenIdPlusPipeEq; + t.state = TokenizeState_start; + break; + default: + t.out->ids.last() = TokenIdPlusPipe; + t.state = TokenizeState_start; + continue; + } + break; case TokenizeState_caret: switch (c) { case 0: @@ -891,6 +931,9 @@ void tokenize(const char *source, Tokenization *out) { case '%': t.state = TokenizeState_minus_percent; break; + case '|': + t.state = TokenizeState_minus_pipe; + break; default: t.state = TokenizeState_start; continue; @@ -911,6 +954,21 @@ void tokenize(const char *source, Tokenization *out) { continue; } break; + case TokenizeState_minus_pipe: + switch (c) { + case 0: + t.out->ids.last() = TokenIdMinusPipe; + goto eof; + case '=': + t.out->ids.last() = TokenIdMinusPipeEq; + t.state = TokenizeState_start; + break; + default: + t.out->ids.last() = TokenIdMinusPipe; + t.state = TokenizeState_start; + continue; + } + break; case TokenizeState_angle_bracket_left: switch (c) { case 0: @@ -936,12 +994,31 @@ void tokenize(const char *source, Tokenization *out) { t.out->ids.last() = TokenIdBitShiftLeftEq; t.state = TokenizeState_start; break; + case '|': + // t.out->ids.last() = TokenIdBitShiftLeftPipe; + t.state = TokenizeState_angle_bracket_angle_bracket_left_pipe; + break; default: t.out->ids.last() = TokenIdBitShiftLeft; t.state = TokenizeState_start; continue; } break; + case TokenizeState_angle_bracket_angle_bracket_left_pipe: + switch (c) { + case 0: + t.out->ids.last() = TokenIdBitShiftLeftPipe; + goto eof; + case '=': + t.out->ids.last() = TokenIdBitShiftLeftPipeEq; + t.state = TokenizeState_start; + break; + default: + t.out->ids.last() = TokenIdBitShiftLeftPipe; + t.state = TokenizeState_start; + continue; + } + break; case TokenizeState_angle_bracket_right: switch (c) { case 0: @@ -1437,6 +1514,8 @@ const char * token_name(TokenId id) { case TokenIdBitOrEq: return "|="; case TokenIdBitShiftLeft: return "<<"; case TokenIdBitShiftLeftEq: return "<<="; + case TokenIdBitShiftLeftPipe: return "<<|"; + case TokenIdBitShiftLeftPipeEq: return "<<|="; case TokenIdBitShiftRight: return ">>"; case TokenIdBitShiftRightEq: return ">>="; case TokenIdBitXorEq: return "^="; @@ -1521,12 +1600,16 @@ const char * token_name(TokenId id) { case TokenIdMinusEq: return "-="; case TokenIdMinusPercent: return "-%"; case TokenIdMinusPercentEq: return "-%="; + case TokenIdMinusPipe: return "-|"; + case TokenIdMinusPipeEq: return "-|="; case TokenIdModEq: return "%="; case TokenIdPercent: return "%"; case TokenIdPlus: return "+"; case TokenIdPlusEq: return "+="; case TokenIdPlusPercent: return "+%"; case TokenIdPlusPercentEq: return "+%="; + case TokenIdPlusPipe: return "+|"; + case TokenIdPlusPipeEq: return "+|="; case TokenIdPlusPlus: return "++"; case TokenIdRBrace: return "}"; case TokenIdRBracket: return "]"; @@ -1542,6 +1625,8 @@ const char * token_name(TokenId id) { case TokenIdTimesEq: return "*="; case TokenIdTimesPercent: return "*%"; case TokenIdTimesPercentEq: return "*%="; + case TokenIdTimesPipe: return "*|"; + case TokenIdTimesPipeEq: return "*|="; case TokenIdBuiltin: return "Builtin"; case TokenIdCount: zig_unreachable(); diff --git a/src/stage1/tokenizer.hpp b/src/stage1/tokenizer.hpp index 0e196597eb..56605c1764 100644 --- a/src/stage1/tokenizer.hpp +++ b/src/stage1/tokenizer.hpp @@ -23,6 +23,8 @@ enum TokenId : uint8_t { TokenIdBitOrEq, TokenIdBitShiftLeft, TokenIdBitShiftLeftEq, + TokenIdBitShiftLeftPipe, + TokenIdBitShiftLeftPipeEq, TokenIdBitShiftRight, TokenIdBitShiftRightEq, TokenIdBitXorEq, @@ -108,12 +110,16 @@ enum TokenId : uint8_t { TokenIdMinusEq, TokenIdMinusPercent, TokenIdMinusPercentEq, + TokenIdMinusPipe, + TokenIdMinusPipeEq, TokenIdModEq, TokenIdPercent, TokenIdPlus, TokenIdPlusEq, TokenIdPlusPercent, TokenIdPlusPercentEq, + TokenIdPlusPipe, + TokenIdPlusPipeEq, TokenIdPlusPlus, TokenIdRBrace, TokenIdRBracket, @@ -129,6 +135,8 @@ enum TokenId : uint8_t { TokenIdTimesEq, TokenIdTimesPercent, TokenIdTimesPercentEq, + TokenIdTimesPipe, + TokenIdTimesPipeEq, TokenIdCount, }; diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig index 553e9ff21a..7a28ed182d 100644 --- a/test/behavior/saturating_arithmetic.zig +++ b/test/behavior/saturating_arithmetic.zig @@ -11,13 +11,34 @@ fn testSaturatingOp(comptime op: Op, comptime T: type, test_data: [3]T) !void { const a = test_data[0]; const b = test_data[1]; const expected = test_data[2]; - const actual = switch (op) { - .add => @addWithSaturation(a, b), - .sub => @subWithSaturation(a, b), - .mul => @mulWithSaturation(a, b), - .shl => @shlWithSaturation(a, b), - }; - try expectEqual(expected, actual); + { + const actual = switch (op) { + .add => @addWithSaturation(a, b), + .sub => @subWithSaturation(a, b), + .mul => @mulWithSaturation(a, b), + .shl => @shlWithSaturation(a, b), + }; + try expectEqual(expected, actual); + } + { + const actual = switch (op) { + .add => a +| b, + .sub => a -| b, + .mul => a *| b, + .shl => a <<| b, + }; + try expectEqual(expected, actual); + } + { + var actual = a; + switch (op) { + .add => actual +|= b, + .sub => actual -|= b, + .mul => actual *|= b, + .shl => actual <<|= b, + } + try expectEqual(expected, actual); + } } test "@addWithSaturation" { -- cgit v1.2.3 From b9a95f2dd94e6175322d3388c3936eb600ec90ea Mon Sep 17 00:00:00 2001 From: Travis Staloch Date: Wed, 8 Sep 2021 15:19:03 -0700 Subject: sat-arithmetic: add c backend support - modify AstGen binOpExt()/assignBinOpExt() to accept generic extended payload T - rework Sema zirSatArithmetic() to use existing sema.analyzeArithmetic() by adding an `opt_extended` parameter. - add airSatOp() to codegen/c.zig - add saturating functions to src/link/C/zig.h --- src/AstGen.zig | 62 +++++++++++++------------- src/Sema.zig | 25 +++++++---- src/codegen/c.zig | 120 +++++++++++++++++++++++++++++++++++++++++++++++++-- src/codegen/llvm.zig | 6 +-- src/link/C/zig.h | 93 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 262 insertions(+), 44 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/AstGen.zig b/src/AstGen.zig index b3af3eb86b..25452cb386 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -535,7 +535,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_bit_shift_left_sat => { - try assignBinOpExt(gz, scope, node, .shl_with_saturation); + try assignBinOpExt(gz, scope, node, .shl_with_saturation, Zir.Inst.SaturatingArithmetic); return rvalue(gz, rl, .void_value, node); }, .assign_bit_shift_right => { @@ -568,7 +568,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_sub_sat => { - try assignBinOpExt(gz, scope, node, .sub_with_saturation); + try assignBinOpExt(gz, scope, node, .sub_with_saturation, Zir.Inst.SaturatingArithmetic); return rvalue(gz, rl, .void_value, node); }, .assign_mod => { @@ -584,7 +584,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_add_sat => { - try assignBinOpExt(gz, scope, node, .add_with_saturation); + try assignBinOpExt(gz, scope, node, .add_with_saturation, Zir.Inst.SaturatingArithmetic); return rvalue(gz, rl, .void_value, node); }, .assign_mul => { @@ -596,24 +596,24 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_mul_sat => { - try assignBinOpExt(gz, scope, node, .mul_with_saturation); + try assignBinOpExt(gz, scope, node, .mul_with_saturation, Zir.Inst.SaturatingArithmetic); return rvalue(gz, rl, .void_value, node); }, // zig fmt: off .bit_shift_left => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), - .bit_shift_left_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl_with_saturation), + .bit_shift_left_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl_with_saturation, Zir.Inst.SaturatingArithmetic), .bit_shift_right => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), .add => return simpleBinOp(gz, scope, rl, node, .add), .add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap), - .add_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .add_with_saturation), + .add_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .add_with_saturation, Zir.Inst.SaturatingArithmetic), .sub => return simpleBinOp(gz, scope, rl, node, .sub), .sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap), - .sub_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .sub_with_saturation), + .sub_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .sub_with_saturation, Zir.Inst.SaturatingArithmetic), .mul => return simpleBinOp(gz, scope, rl, node, .mul), .mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap), - .mul_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .mul_with_saturation), + .mul_sat => return binOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .mul_with_saturation, Zir.Inst.SaturatingArithmetic), .div => return simpleBinOp(gz, scope, rl, node, .div), .mod => return simpleBinOp(gz, scope, rl, node, .mod_rem), .bit_and => { @@ -2713,6 +2713,28 @@ fn assignOp( _ = try gz.addBin(.store, lhs_ptr, result); } +// TODO: is there an existing way to do this? +// TODO: likely rename this to reflect result_loc == .none or add more params to make it more general +fn binOpExt( + gz: *GenZir, + scope: *Scope, + rl: ResultLoc, + infix_node: Ast.Node.Index, + lhs_node: Ast.Node.Index, + rhs_node: Ast.Node.Index, + tag: Zir.Inst.Extended, + comptime T: type, +) InnerError!Zir.Inst.Ref { + const lhs = try expr(gz, scope, .none, lhs_node); + const rhs = try expr(gz, scope, .none, rhs_node); + const result = try gz.addExtendedPayload(tag, T{ + .node = gz.nodeIndexToRelative(infix_node), + .lhs = lhs, + .rhs = rhs, + }); + return rvalue(gz, rl, result, infix_node); +} + // TODO: is there an existing method to accomplish this? // TODO: likely rename this to indicate rhs type coercion or add more params to make it more general fn assignBinOpExt( @@ -2720,8 +2742,8 @@ fn assignBinOpExt( scope: *Scope, infix_node: Ast.Node.Index, op_inst_tag: Zir.Inst.Extended, + comptime T: type, ) InnerError!void { - try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); @@ -2730,7 +2752,7 @@ fn assignBinOpExt( const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); const rhs = try expr(gz, scope, .{ .coerced_ty = lhs_type }, node_datas[infix_node].rhs); - const result = try gz.addExtendedPayload(op_inst_tag, Zir.Inst.BinNode{ + const result = try gz.addExtendedPayload(op_inst_tag, T{ .node = gz.nodeIndexToRelative(infix_node), .lhs = lhs, .rhs = rhs, @@ -7903,26 +7925,6 @@ fn shiftOp( return rvalue(gz, rl, result, node); } -// TODO: is there an existing way to do this? -// TODO: likely rename this to reflect result_loc == .none or add more params to make it more general -fn binOpExt( - gz: *GenZir, - scope: *Scope, - rl: ResultLoc, - node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Extended, -) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .none, lhs_node); - const rhs = try expr(gz, scope, .none, rhs_node); - const result = try gz.addExtendedPayload(tag, Zir.Inst.Bin{ - .lhs = lhs, - .rhs = rhs, - }); - return rvalue(gz, rl, result, node); -} - fn cImport( gz: *GenZir, scope: *Scope, diff --git a/src/Sema.zig b/src/Sema.zig index de94a8c6b8..a41d330285 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -694,10 +694,11 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .c_define => return sema.zirCDefine( block, extended), .wasm_memory_size => return sema.zirWasmMemorySize( block, extended), .wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended), - .add_with_saturation=> return sema.zirSatArithmetic( block, extended), - .sub_with_saturation=> return sema.zirSatArithmetic( block, extended), - .mul_with_saturation=> return sema.zirSatArithmetic( block, extended), - .shl_with_saturation=> return sema.zirSatArithmetic( block, extended), + .add_with_saturation, + .sub_with_saturation, + .mul_with_saturation, + .shl_with_saturation, + => return sema.zirSatArithmetic( block, extended), // zig fmt: on } } @@ -6163,7 +6164,7 @@ fn zirNegate( const lhs = sema.resolveInst(.zero); const rhs = sema.resolveInst(inst_data.operand); - return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); + return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src, null); } fn zirArithmetic( @@ -6183,7 +6184,7 @@ fn zirArithmetic( const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src); + return sema.analyzeArithmetic(block, zir_tag, lhs, rhs, sema.src, lhs_src, rhs_src, null); } fn zirOverflowArithmetic( @@ -6209,10 +6210,17 @@ fn zirSatArithmetic( defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data; - const src: LazySrcLoc = .{ .node_offset = extra.node }; - return sema.mod.fail(&block.base, src, "TODO implement Sema.zirSatArithmetic", .{}); + sema.src = .{ .node_offset_bin_op = extra.node }; + const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = extra.node }; + const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = extra.node }; + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); + + return sema.analyzeArithmetic(block, .extended, lhs, rhs, sema.src, lhs_src, rhs_src, extended); } +// TODO: audit - not sure if its a good idea to reuse this, adding `opt_extended` param +// FIXME: somehow, rhs of <<| is required to be Log2T. this should accept T fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, @@ -6223,6 +6231,7 @@ fn analyzeArithmetic( src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, + opt_extended: ?Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { const lhs_ty = sema.typeOf(lhs); const rhs_ty = sema.typeOf(rhs); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index dce0c10b4c..9ded6fe0e8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -885,17 +885,17 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO // that wrapping is UB. .add, .ptr_add => try airBinOp( f, inst, " + "), .addwrap => try airWrapOp(f, inst, " + ", "addw_"), - .addsat => return o.dg.fail("TODO: C backend: implement codegen for addsat", .{}), + .addsat => return f.fail("TODO: C backend: implement codegen for addsat", .{}), // TODO use a different strategy for sub that communicates to the optimizer // that wrapping is UB. .sub, .ptr_sub => try airBinOp( f, inst, " - "), .subwrap => try airWrapOp(f, inst, " - ", "subw_"), - .subsat => return o.dg.fail("TODO: C backend: implement codegen for subsat", .{}), + .subsat => return f.fail("TODO: C backend: implement codegen for subsat", .{}), // TODO use a different strategy for mul that communicates to the optimizer // that wrapping is UB. .mul => try airBinOp( f, inst, " * "), .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"), - .mulsat => return o.dg.fail("TODO: C backend: implement codegen for mulsat", .{}), + .mulsat => return f.fail("TODO: C backend: implement codegen for mulsat", .{}), // TODO use a different strategy for div that communicates to the optimizer // that wrapping is UB. .div => try airBinOp( f, inst, " / "), @@ -919,6 +919,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .shr => try airBinOp(f, inst, " >> "), .shl => try airBinOp(f, inst, " << "), + .shl_sat => return f.fail("TODO: C backend: implement codegen for mulsat", .{}), + .not => try airNot( f, inst), @@ -1312,6 +1314,118 @@ fn airWrapOp( return ret; } +fn airSatOp( + o: *Object, + inst: Air.Inst.Index, + str_op: [*:0]const u8, + fn_op: [*:0]const u8, +) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const inst_ty = o.air.typeOfIndex(inst); + const int_info = inst_ty.intInfo(o.dg.module.getTarget()); + const bits = int_info.bits; + + // if it's an unsigned int with non-arbitrary bit size then we can just add + const ok_bits = switch (bits) { + 8, 16, 32, 64, 128 => true, + else => false, + }; + + if (bits > 64) { + return f.fail("TODO: C backend: airSatOp for large integers", .{}); + } + + var min_buf: [80]u8 = undefined; + const min = switch (int_info.signedness) { + .unsigned => "0", + else => switch (inst_ty.tag()) { + .c_short => "SHRT_MIN", + .c_int => "INT_MIN", + .c_long => "LONG_MIN", + .c_longlong => "LLONG_MIN", + .isize => "INTPTR_MIN", + else => blk: { + const val = -1 * std.math.pow(i65, 2, @intCast(i65, bits - 1)); + break :blk std.fmt.bufPrint(&min_buf, "{d}", .{val}) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + else => |e| return e, + }; + }, + }, + }; + + var max_buf: [80]u8 = undefined; + const max = switch (inst_ty.tag()) { + .c_short => "SHRT_MAX", + .c_ushort => "USHRT_MAX", + .c_int => "INT_MAX", + .c_uint => "UINT_MAX", + .c_long => "LONG_MAX", + .c_ulong => "ULONG_MAX", + .c_longlong => "LLONG_MAX", + .c_ulonglong => "ULLONG_MAX", + .isize => "INTPTR_MAX", + .usize => "UINTPTR_MAX", + else => blk: { + const pow_bits = switch (int_info.signedness) { + .signed => bits - 1, + .unsigned => bits, + }; + const val = std.math.pow(u65, 2, pow_bits) - 1; + break :blk std.fmt.bufPrint(&max_buf, "{}", .{val}) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + else => |e| return e, + }; + }, + }; + + const lhs = try o.resolveInst(bin_op.lhs); + const rhs = try o.resolveInst(bin_op.rhs); + const w = o.writer(); + + const ret = try o.allocLocal(inst_ty, .Mut); + try w.print(" = zig_{s}", .{fn_op}); + + switch (inst_ty.tag()) { + .isize => try w.writeAll("isize"), + .c_short => try w.writeAll("short"), + .c_int => try w.writeAll("int"), + .c_long => try w.writeAll("long"), + .c_longlong => try w.writeAll("longlong"), + else => { + const prefix_byte: u8 = switch (int_info.signedness) { + .signed => 'i', + .unsigned => 'u', + }; + for ([_]u8{ 8, 16, 32, 64 }) |nbits| { + if (bits <= nbits) { + try w.print("{c}{d}", .{ prefix_byte, nbits }); + break; + } + } else { + unreachable; + } + }, + } + + try w.writeByte('('); + try o.writeCValue(w, lhs); + try w.writeAll(", "); + try o.writeCValue(w, rhs); + + if (int_info.signedness == .signed) { + try w.print(", {s}", .{min}); + } + + try w.print(", {s});", .{max}); + try o.indent_writer.insertNewline(); + + return ret; +} + fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { if (f.liveness.isUnused(inst)) return CValue.none; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c21985d1c5..733984a9d6 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2038,7 +2038,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFAdd(lhs, rhs, ""); + if (inst_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, ""); if (ty == .wrapping) return self.builder.buildAdd(lhs, rhs, "") else if (ty == .saturated) { @@ -2060,7 +2060,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFSub(lhs, rhs, ""); + if (inst_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, ""); if (ty == .wrapping) return self.builder.buildSub(lhs, rhs, "") else if (ty == .saturated) { @@ -2082,7 +2082,7 @@ pub const FuncGen = struct { const rhs = try self.resolveInst(bin_op.rhs); const inst_ty = self.air.typeOfIndex(inst); - if (inst_ty.isFloat()) return self.builder.buildFMul(lhs, rhs, ""); + if (inst_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, ""); if (ty == .wrapping) return self.builder.buildMul(lhs, rhs, "") else if (ty == .saturated) { diff --git a/src/link/C/zig.h b/src/link/C/zig.h index b34068d1f2..84b1c3dac6 100644 --- a/src/link/C/zig.h +++ b/src/link/C/zig.h @@ -356,3 +356,96 @@ static inline long long zig_subw_longlong(long long lhs, long long rhs, long lon return (long long)(((unsigned long long)lhs) - ((unsigned long long)rhs)); } +/* + * Saturating aritmetic operations: add, sub, mul, shl + */ +#define zig_add_sat_u(ZT, T) static inline T zig_adds_##ZT(T x, T y, T max) { \ + return (x > max - y) ? max : x + y; \ +} + +#define zig_add_sat_s(ZT, T, T2) static inline T zig_adds_##ZT(T2 x, T2 y, T2 min, T2 max) { \ + T2 res = x + y; \ + return (res < min) ? min : (res > max) ? max : res; \ +} + +zig_add_sat_u( u8, uint8_t) +zig_add_sat_s( i8, int8_t, int16_t) +zig_add_sat_u(u16, uint16_t) +zig_add_sat_s(i16, int16_t, int32_t) +zig_add_sat_u(u32, uint32_t) +zig_add_sat_s(i32, int32_t, int64_t) +zig_add_sat_u(u64, uint64_t) +zig_add_sat_s(i64, int64_t, int128_t) +zig_add_sat_s(isize, intptr_t, int128_t) +zig_add_sat_s(short, short, int) +zig_add_sat_s(int, int, long) +zig_add_sat_s(long, long, long long) + +#define zig_sub_sat_u(ZT, T) static inline T zig_subs_##ZT(T x, T y, T max) { \ + return (x > max + y) ? max : x - y; \ +} + +#define zig_sub_sat_s(ZT, T, T2) static inline T zig_subs_##ZT(T2 x, T2 y, T2 min, T2 max) { \ + T2 res = x - y; \ + return (res < min) ? min : (res > max) ? max : res; \ +} + +zig_sub_sat_u( u8, uint8_t) +zig_sub_sat_s( i8, int8_t, int16_t) +zig_sub_sat_u(u16, uint16_t) +zig_sub_sat_s(i16, int16_t, int32_t) +zig_sub_sat_u(u32, uint32_t) +zig_sub_sat_s(i32, int32_t, int64_t) +zig_sub_sat_u(u64, uint64_t) +zig_sub_sat_s(i64, int64_t, int128_t) +zig_sub_sat_s(isize, intptr_t, int128_t) +zig_sub_sat_s(short, short, int) +zig_sub_sat_s(int, int, long) +zig_sub_sat_s(long, long, long long) + + +#define zig_mul_sat_u(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 max) { \ + T2 res = x * y; \ + return (res > max) ? max : res; \ +} + +#define zig_mul_sat_s(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 min, T2 max) { \ + T2 res = x * y; \ + return (res < min) ? min : (res > max) ? max : res; \ +} + +zig_mul_sat_u(u8, uint8_t, uint16_t) +zig_mul_sat_s(i8, int8_t, int16_t) +zig_mul_sat_u(u16, uint16_t, uint32_t) +zig_mul_sat_s(i16, int16_t, int32_t) +zig_mul_sat_u(u32, uint32_t, uint64_t) +zig_mul_sat_s(i32, int32_t, int64_t) +zig_mul_sat_u(u64, uint64_t, uint128_t) +zig_mul_sat_s(i64, int64_t, int128_t) +zig_mul_sat_s(isize, intptr_t, int128_t) +zig_mul_sat_s(short, short, int) +zig_mul_sat_s(int, int, long) +zig_mul_sat_s(long, long, long long) + +#define zig_shl_sat_u(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T max) { \ + T leading_zeros = __builtin_clz(x); \ + return (leading_zeros + y > bits) ? max : x << y; \ +} + +#define zig_shl_sat_s(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T min, T max) { \ + T leading_zeros = __builtin_clz(x & ~max); \ + return (leading_zeros + y > bits) ? max : x << y; \ +} + +zig_shl_sat_u(u8, uint8_t, 8) +zig_shl_sat_s(i8, int8_t, 7) +zig_shl_sat_u(u16, uint16_t, 16) +zig_shl_sat_s(i16, int16_t, 15) +zig_shl_sat_u(u32, uint32_t, 32) +zig_shl_sat_s(i32, int32_t, 31) +zig_shl_sat_u(u64, uint64_t, 64) +zig_shl_sat_s(i64, int64_t, 63) +zig_shl_sat_s(isize, intptr_t, 63) +zig_shl_sat_s(short, short, 15) +zig_shl_sat_s(int, int, 31) +zig_shl_sat_s(long, long, 63) \ No newline at end of file -- cgit v1.2.3 From bdb90a07bbf0fdedca71f5deace7087bc562b437 Mon Sep 17 00:00:00 2001 From: Travis Staloch Date: Wed, 8 Sep 2021 16:30:11 -0700 Subject: sat-arithmetic: fixups zig fmt / astcheck --- src/AstGen.zig | 2 +- src/codegen/c.zig | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/AstGen.zig b/src/AstGen.zig index 25452cb386..d3235ace53 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -2786,7 +2786,7 @@ fn assignShift( fn assignShiftSat( gz: *GenZir, scope: *Scope, - infix_node: ast.Node.Index, + infix_node: Ast.Node.Index, op_inst_tag: Zir.Inst.Tag, ) InnerError!void { try emitDbgNode(gz, infix_node); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9ded6fe0e8..37e19d9e1a 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1317,7 +1317,6 @@ fn airWrapOp( fn airSatOp( o: *Object, inst: Air.Inst.Index, - str_op: [*:0]const u8, fn_op: [*:0]const u8, ) !CValue { if (o.liveness.isUnused(inst)) @@ -1328,12 +1327,12 @@ fn airSatOp( const int_info = inst_ty.intInfo(o.dg.module.getTarget()); const bits = int_info.bits; - // if it's an unsigned int with non-arbitrary bit size then we can just add - const ok_bits = switch (bits) { - 8, 16, 32, 64, 128 => true, - else => false, - }; + switch (bits) { + 8, 16, 32, 64, 128 => {}, + else => return o.dg.fail("TODO: C backend: airSatOp for non power of 2 integers", .{}), + } + // if it's an unsigned int with non-arbitrary bit size then we can just add if (bits > 64) { return f.fail("TODO: C backend: airSatOp for large integers", .{}); } -- cgit v1.2.3 From 487059535242e2b94303502806feaa99d560c63b Mon Sep 17 00:00:00 2001 From: Travis Staloch Date: Thu, 9 Sep 2021 14:17:59 -0700 Subject: sat-arithmetic: add additional tokenizer tests --- lib/std/zig/tokenizer.zig | 12 ++++++++++++ src/codegen/c.zig | 1 + 2 files changed, 13 insertions(+) (limited to 'src/codegen/c.zig') diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index a7442b8b25..02fa3dd381 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -2019,6 +2019,18 @@ test "tokenizer - saturating" { try testTokenize("<<", &.{.angle_bracket_angle_bracket_left}); try testTokenize("<<|", &.{.angle_bracket_angle_bracket_left_pipe}); try testTokenize("<<|=", &.{.angle_bracket_angle_bracket_left_pipe_equal}); + + try testTokenize("*", &.{.asterisk}); + try testTokenize("*|", &.{.asterisk_pipe}); + try testTokenize("*|=", &.{.asterisk_pipe_equal}); + + try testTokenize("+", &.{.plus}); + try testTokenize("+|", &.{.plus_pipe}); + try testTokenize("+|=", &.{.plus_pipe_equal}); + + try testTokenize("-", &.{.minus}); + try testTokenize("-|", &.{.minus_pipe}); + try testTokenize("-|=", &.{.minus_pipe_equal}); } fn testTokenize(source: [:0]const u8, expected_tokens: []const Token.Tag) !void { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 37e19d9e1a..6101740eea 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1347,6 +1347,7 @@ fn airSatOp( .c_longlong => "LLONG_MIN", .isize => "INTPTR_MIN", else => blk: { + // compute the type minimum based on the bitcount (bits) const val = -1 * std.math.pow(i65, 2, @intCast(i65, bits - 1)); break :blk std.fmt.bufPrint(&min_buf, "{d}", .{val}) catch |err| switch (err) { error.NoSpaceLeft => unreachable, -- cgit v1.2.3 From 51673bcb315d837ee0fe80dc50571c2c07d80a2c Mon Sep 17 00:00:00 2001 From: Travis Staloch Date: Wed, 22 Sep 2021 00:14:08 -0700 Subject: get build passing again after rebase --- src/codegen/c.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/codegen/c.zig') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 6101740eea..1afa81b70f 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1334,7 +1334,7 @@ fn airSatOp( // if it's an unsigned int with non-arbitrary bit size then we can just add if (bits > 64) { - return f.fail("TODO: C backend: airSatOp for large integers", .{}); + return o.dg.fail("TODO: C backend: airSatOp for large integers", .{}); } var min_buf: [80]u8 = undefined; -- cgit v1.2.3 From 54675824449d16029fdf6a1873e78cb8f2147f60 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 28 Sep 2021 18:55:43 -0700 Subject: saturating arithmetic modifications * Remove the builtins `@addWithSaturation`, `@subWithSaturation`, `@mulWithSaturation`, and `@shlWithSaturation` now that we have first-class syntax for saturating arithmetic. * langref: Clarify the behavior of `@shlExact`. * Ast: rename `bit_shift_left` to `shl` and `bit_shift_right` to `shr` for consistency. * Air: rename to include underscore separator with consistency with the rest of the ops. * Air: add shl_exact instruction * Use non-extended tags for saturating arithmetic, to keep it simple so that all the arithmetic operations can be done the same way. - Sema: unify analyzeArithmetic with analyzeSatArithmetic - implement comptime `+|`, `-|`, and `*|` - allow float operands to saturating arithmetic * `<<|` allows any integer type for the RHS. * C backend: fix rebase conflicts * LLVM backend: reduce the amount of branching for arithmetic ops * zig.h: fix magic number not matching actual size of C integer types --- doc/langref.html.in | 63 +-------- lib/std/zig/Ast.zig | 36 ++--- lib/std/zig/parse.zig | 12 +- lib/std/zig/render.zig | 20 +-- src/Air.zig | 18 ++- src/AstGen.zig | 153 ++++++++------------- src/BuiltinFn.zig | 32 ----- src/Liveness.zig | 7 +- src/Sema.zig | 234 +++++++++++++++----------------- src/Zir.zig | 143 ++++++++++--------- src/codegen.zig | 56 +++++--- src/codegen/c.zig | 76 +++++------ src/codegen/llvm.zig | 188 +++++++++++++++++-------- src/codegen/llvm/bindings.zig | 6 + src/link/C/zig.h | 11 +- src/print_air.zig | 7 +- src/print_zir.zig | 22 +-- src/stage1/all_types.hpp | 4 - src/stage1/astgen.cpp | 60 -------- src/stage1/codegen.cpp | 4 - src/translate_c/ast.zig | 8 +- src/value.zig | 87 ++++++++++++ test/behavior/saturating_arithmetic.zig | 33 ++--- 23 files changed, 616 insertions(+), 664 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/doc/langref.html.in b/doc/langref.html.in index e750797997..2e69e37097 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -1407,7 +1407,6 @@ a +|= b{#endsyntax#} Saturating Addition.
    • Invokes {#link|Peer Type Resolution#} for the operands.
    • -
    • See also {#link|@addWithSaturation#}.
    @@ -1464,7 +1463,6 @@ a -|= b{#endsyntax#} Saturating Subtraction.
    • Invokes {#link|Peer Type Resolution#} for the operands.
    • -
    • See also {#link|@subWithSaturation#}.
    @@ -1556,7 +1554,6 @@ a *|= b{#endsyntax#} Saturating Multiplication.
    • Invokes {#link|Peer Type Resolution#} for the operands.
    • -
    • See also {#link|@mulWithSaturation#}.
    @@ -7235,15 +7232,6 @@ fn readFile(allocator: *Allocator, filename: []const u8) ![]u8 { If no overflow or underflow occurs, returns {#syntax#}false{#endsyntax#}.

    {#header_close#} - {#header_open|@addWithSaturation#} -
    {#syntax#}@addWithSaturation(a: T, b: T) T{#endsyntax#}
    -

    - Returns {#syntax#}a + b{#endsyntax#}. The result will be clamped between the type maximum and minimum. -

    -

    - The syntax {#syntax#}a +| b{#endsyntax#} is equivalent to calling {#syntax#}@addWithSaturation(a, b){#endsyntax#}. -

    - {#header_close#} {#header_open|@alignCast#}
    {#syntax#}@alignCast(comptime alignment: u29, ptr: anytype) anytype{#endsyntax#}

    @@ -8365,21 +8353,6 @@ test "@wasmMemoryGrow" {

    {#header_close#} - {#header_open|@mulWithSaturation#} -
    {#syntax#}@mulWithSaturation(a: T, b: T) T{#endsyntax#}
    -

    - Returns {#syntax#}a * b{#endsyntax#}. The result will be clamped between the type maximum and minimum. -

    -

    - The syntax {#syntax#}a *| b{#endsyntax#} is equivalent to calling {#syntax#}@mulWithSaturation(a, b){#endsyntax#}. -

    -

    - NOTE: Currently there is a bug in the llvm.smul.fix.sat intrinsic which affects {#syntax#}@mulWithSaturation{#endsyntax#} of signed integers. - This may result in an incorrect sign bit when there is overflow. This will be fixed in zig's 0.9.0 release. - Check this issue for more information. -

    - {#header_close#} - {#header_open|@panic#}
    {#syntax#}@panic(message: []const u8) noreturn{#endsyntax#}

    @@ -8597,14 +8570,16 @@ test "@setRuntimeSafety" { {#header_open|@shlExact#}

    {#syntax#}@shlExact(value: T, shift_amt: Log2T) T{#endsyntax#}

    - Performs the left shift operation ({#syntax#}<<{#endsyntax#}). Caller guarantees - that the shift will not shift any 1 bits out. + Performs the left shift operation ({#syntax#}<<{#endsyntax#}). + For unsigned integers, the result is {#link|undefined#} if any 1 bits + are shifted out. For signed integers, the result is {#link|undefined#} if + any bits that disagree with the resultant sign bit are shifted out.

    The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.

    - {#see_also|@shrExact|@shlWithOverflow|@shlWithSaturation#} + {#see_also|@shrExact|@shlWithOverflow#} {#header_close#} {#header_open|@shlWithOverflow#} @@ -8618,23 +8593,9 @@ test "@setRuntimeSafety" { The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.

    - {#see_also|@shlExact|@shrExact|@shlWithSaturation#} + {#see_also|@shlExact|@shrExact#} {#header_close#} - {#header_open|@shlWithSaturation#} -
    {#syntax#}@shlWithSaturation(a: T, shift_amt: T) T{#endsyntax#}
    -

    - Returns {#syntax#}a << b{#endsyntax#}. The result will be clamped between type minimum and maximum. -

    -

    - The syntax {#syntax#}a <<| b{#endsyntax#} is equivalent to calling {#syntax#}@shlWithSaturation(a, b){#endsyntax#}. -

    -

    - Unlike other @shl builtins, shift_amt doesn't need to be a Log2T as saturated overshifting is well defined. -

    - {#see_also|@shlExact|@shrExact|@shlWithOverflow#} - {#header_close#} - {#header_open|@shrExact#}
    {#syntax#}@shrExact(value: T, shift_amt: Log2T) T{#endsyntax#}

    @@ -8645,7 +8606,7 @@ test "@setRuntimeSafety" { The type of {#syntax#}shift_amt{#endsyntax#} is an unsigned integer with {#syntax#}log2(T.bit_count){#endsyntax#} bits. This is because {#syntax#}shift_amt >= T.bit_count{#endsyntax#} is undefined behavior.

    - {#see_also|@shlExact|@shlWithOverflow|@shlWithSaturation#} + {#see_also|@shlExact|@shlWithOverflow#} {#header_close#} {#header_open|@shuffle#} @@ -8945,16 +8906,6 @@ fn doTheTest() !void {

    {#header_close#} - {#header_open|@subWithSaturation#} -
    {#syntax#}@subWithSaturation(a: T, b: T) T{#endsyntax#}
    -

    - Returns {#syntax#}a - b{#endsyntax#}. The result will be clamped between the type maximum and minimum. -

    -

    - The syntax {#syntax#}a -| b{#endsyntax#} is equivalent to calling {#syntax#}@subWithSaturation(a, b){#endsyntax#}. -

    - {#header_close#} - {#header_open|@tagName#}
    {#syntax#}@tagName(value: anytype) [:0]const u8{#endsyntax#}

    diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index b69da459d3..4ee3a45221 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -395,9 +395,9 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex { .assign_mod, .assign_add, .assign_sub, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_and, .assign_bit_xor, .assign_bit_or, @@ -422,9 +422,9 @@ pub fn firstToken(tree: Tree, node: Node.Index) TokenIndex { .sub_wrap, .add_sat, .sub_sat, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_and, .bit_xor, .bit_or, @@ -659,9 +659,9 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex { .assign_mod, .assign_add, .assign_sub, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_and, .assign_bit_xor, .assign_bit_or, @@ -686,9 +686,9 @@ pub fn lastToken(tree: Tree, node: Node.Index) TokenIndex { .sub_wrap, .add_sat, .sub_sat, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_and, .bit_xor, .bit_or, @@ -2540,11 +2540,11 @@ pub const Node = struct { /// `lhs -= rhs`. main_token is op. assign_sub, /// `lhs <<= rhs`. main_token is op. - assign_bit_shift_left, + assign_shl, /// `lhs <<|= rhs`. main_token is op. - assign_bit_shift_left_sat, + assign_shl_sat, /// `lhs >>= rhs`. main_token is op. - assign_bit_shift_right, + assign_shr, /// `lhs &= rhs`. main_token is op. assign_bit_and, /// `lhs ^= rhs`. main_token is op. @@ -2594,11 +2594,11 @@ pub const Node = struct { /// `lhs -| rhs`. main_token is the `-|`. sub_sat, /// `lhs << rhs`. main_token is the `<<`. - bit_shift_left, + shl, /// `lhs <<| rhs`. main_token is the `<<|`. - bit_shift_left_sat, + shl_sat, /// `lhs >> rhs`. main_token is the `>>`. - bit_shift_right, + shr, /// `lhs & rhs`. main_token is the `&`. bit_and, /// `lhs ^ rhs`. main_token is the `^`. diff --git a/lib/std/zig/parse.zig b/lib/std/zig/parse.zig index a2780b5225..021b028455 100644 --- a/lib/std/zig/parse.zig +++ b/lib/std/zig/parse.zig @@ -1268,9 +1268,9 @@ const Parser = struct { .percent_equal => .assign_mod, .plus_equal => .assign_add, .minus_equal => .assign_sub, - .angle_bracket_angle_bracket_left_equal => .assign_bit_shift_left, - .angle_bracket_angle_bracket_left_pipe_equal => .assign_bit_shift_left_sat, - .angle_bracket_angle_bracket_right_equal => .assign_bit_shift_right, + .angle_bracket_angle_bracket_left_equal => .assign_shl, + .angle_bracket_angle_bracket_left_pipe_equal => .assign_shl_sat, + .angle_bracket_angle_bracket_right_equal => .assign_shr, .ampersand_equal => .assign_bit_and, .caret_equal => .assign_bit_xor, .pipe_equal => .assign_bit_or, @@ -1346,9 +1346,9 @@ const Parser = struct { .keyword_orelse = .{ .prec = 40, .tag = .@"orelse" }, .keyword_catch = .{ .prec = 40, .tag = .@"catch" }, - .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .bit_shift_left }, - .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .bit_shift_left_sat }, - .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .bit_shift_right }, + .angle_bracket_angle_bracket_left = .{ .prec = 50, .tag = .shl }, + .angle_bracket_angle_bracket_left_pipe = .{ .prec = 50, .tag = .shl_sat }, + .angle_bracket_angle_bracket_right = .{ .prec = 50, .tag = .shr }, .plus = .{ .prec = 60, .tag = .add }, .minus = .{ .prec = 60, .tag = .sub }, diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 47f019d1cf..4357960251 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -339,9 +339,9 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -357,9 +357,9 @@ fn renderExpression(gpa: *Allocator, ais: *Ais, tree: Ast, node: Ast.Node.Index, .bang_equal, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_xor, .bool_and, .bool_or, @@ -2528,8 +2528,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool { .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_right, + .assign_shl, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -2542,8 +2542,8 @@ fn nodeCausesSliceOpSpace(tag: Ast.Node.Tag) bool { .bang_equal, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_right, + .shl, + .shr, .bit_xor, .bool_and, .bool_or, diff --git a/src/Air.zig b/src/Air.zig index 00f223ad21..f05c18e87a 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -48,7 +48,7 @@ pub const Inst = struct { /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. /// Uses the `bin_op` field. - addsat, + add_sat, /// Float or integer subtraction. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -63,7 +63,7 @@ pub const Inst = struct { /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. /// Uses the `bin_op` field. - subsat, + sub_sat, /// Float or integer multiplication. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -78,7 +78,7 @@ pub const Inst = struct { /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. /// Uses the `bin_op` field. - mulsat, + mul_sat, /// Integer or float division. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type /// is the same as both operands. @@ -125,6 +125,11 @@ pub const Inst = struct { /// Shift left. `<<` /// Uses the `bin_op` field. shl, + /// Shift left; For unsigned integers, the shift produces a poison value if it shifts + /// out any non-zero bits. For signed integers, the shift produces a poison value if + /// it shifts out any bits that disagree with the resultant sign bit. + /// Uses the `bin_op` field. + shl_exact, /// Shift left saturating. `<<|` /// Uses the `bin_op` field. shl_sat, @@ -586,13 +591,13 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .add, .addwrap, - .addsat, + .add_sat, .sub, .subwrap, - .subsat, + .sub_sat, .mul, .mulwrap, - .mulsat, + .mul_sat, .div, .rem, .mod, @@ -603,6 +608,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .ptr_sub, .shr, .shl, + .shl_exact, .shl_sat, => return air.typeOf(datas[inst].bin_op.lhs), diff --git a/src/AstGen.zig b/src/AstGen.zig index 92087a7719..847860630a 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -317,9 +317,9 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -345,9 +345,9 @@ fn lvalExpr(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Ins .mod, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_xor, .bang_equal, .equal_equal, @@ -530,15 +530,15 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, - .assign_bit_shift_left => { + .assign_shl => { try assignShift(gz, scope, node, .shl); return rvalue(gz, rl, .void_value, node); }, - .assign_bit_shift_left_sat => { - try assignOpExt(gz, scope, node, .shl_with_saturation, Zir.Inst.SaturatingArithmetic); + .assign_shl_sat => { + try assignShiftSat(gz, scope, node); return rvalue(gz, rl, .void_value, node); }, - .assign_bit_shift_right => { + .assign_shr => { try assignShift(gz, scope, node, .shr); return rvalue(gz, rl, .void_value, node); }, @@ -568,7 +568,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_sub_sat => { - try assignOpExt(gz, scope, node, .sub_with_saturation, Zir.Inst.SaturatingArithmetic); + try assignOp(gz, scope, node, .sub_sat); return rvalue(gz, rl, .void_value, node); }, .assign_mod => { @@ -584,7 +584,7 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_add_sat => { - try assignOpExt(gz, scope, node, .add_with_saturation, Zir.Inst.SaturatingArithmetic); + try assignOp(gz, scope, node, .add_sat); return rvalue(gz, rl, .void_value, node); }, .assign_mul => { @@ -596,28 +596,27 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr return rvalue(gz, rl, .void_value, node); }, .assign_mul_sat => { - try assignOpExt(gz, scope, node, .mul_with_saturation, Zir.Inst.SaturatingArithmetic); + try assignOp(gz, scope, node, .mul_sat); return rvalue(gz, rl, .void_value, node); }, // zig fmt: off - .bit_shift_left => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), - .bit_shift_right => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), + .shl => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl), + .shr => return shiftOp(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shr), .add => return simpleBinOp(gz, scope, rl, node, .add), .add_wrap => return simpleBinOp(gz, scope, rl, node, .addwrap), + .add_sat => return simpleBinOp(gz, scope, rl, node, .add_sat), .sub => return simpleBinOp(gz, scope, rl, node, .sub), .sub_wrap => return simpleBinOp(gz, scope, rl, node, .subwrap), + .sub_sat => return simpleBinOp(gz, scope, rl, node, .sub_sat), .mul => return simpleBinOp(gz, scope, rl, node, .mul), .mul_wrap => return simpleBinOp(gz, scope, rl, node, .mulwrap), + .mul_sat => return simpleBinOp(gz, scope, rl, node, .mul_sat), .div => return simpleBinOp(gz, scope, rl, node, .div), .mod => return simpleBinOp(gz, scope, rl, node, .mod_rem), + .shl_sat => return simpleBinOp(gz, scope, rl, node, .shl_sat), - .add_sat => return simpleBinOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .add_with_saturation, Zir.Inst.SaturatingArithmetic), - .sub_sat => return simpleBinOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .sub_with_saturation, Zir.Inst.SaturatingArithmetic), - .mul_sat => return simpleBinOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .mul_with_saturation, Zir.Inst.SaturatingArithmetic), - .bit_shift_left_sat => return simpleBinOpExt(gz, scope, rl, node, node_datas[node].lhs, node_datas[node].rhs, .shl_with_saturation, Zir.Inst.SaturatingArithmetic), - .bit_and => { const current_ampersand_token = main_tokens[node]; if (token_tags[current_ampersand_token + 1] == .ampersand) { @@ -1928,8 +1927,8 @@ fn blockExprStmts(gz: *GenZir, parent_scope: *Scope, statements: []const Ast.Nod .assign => try assign(gz, scope, statement), - .assign_bit_shift_left => try assignShift(gz, scope, statement, .shl), - .assign_bit_shift_right => try assignShift(gz, scope, statement, .shr), + .assign_shl => try assignShift(gz, scope, statement, .shl), + .assign_shr => try assignShift(gz, scope, statement, .shr), .assign_bit_and => try assignOp(gz, scope, statement, .bit_and), .assign_bit_or => try assignOp(gz, scope, statement, .bit_or), @@ -1979,6 +1978,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner // ZIR instructions that might be a type other than `noreturn` or `void`. .add, .addwrap, + .add_sat, .param, .param_comptime, .param_anytype, @@ -2045,12 +2045,15 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner .mod_rem, .mul, .mulwrap, + .mul_sat, .ref, .shl, + .shl_sat, .shr, .str, .sub, .subwrap, + .sub_sat, .negate, .negate_wrap, .typeof, @@ -2715,55 +2718,30 @@ fn assignOp( _ = try gz.addBin(.store, lhs_ptr, result); } -fn simpleBinOpExt( - gz: *GenZir, - scope: *Scope, - rl: ResultLoc, - infix_node: Ast.Node.Index, - lhs_node: Ast.Node.Index, - rhs_node: Ast.Node.Index, - tag: Zir.Inst.Extended, - comptime T: type, -) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .none, lhs_node); - const rhs = try expr(gz, scope, .none, rhs_node); - const result = try gz.addExtendedPayload(tag, T{ - .node = gz.nodeIndexToRelative(infix_node), - .lhs = lhs, - .rhs = rhs, - }); - return rvalue(gz, rl, result, infix_node); -} - -fn assignOpExt( +fn assignShift( gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Extended, - comptime T: type, + op_inst_tag: Zir.Inst.Tag, ) InnerError!void { + try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .coerced_ty = lhs_type }, node_datas[infix_node].rhs); - const result = try gz.addExtendedPayload(op_inst_tag, T{ - .node = gz.nodeIndexToRelative(infix_node), + const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); + const rhs = try expr(gz, scope, .{ .ty = rhs_type }, node_datas[infix_node].rhs); + + const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); _ = try gz.addBin(.store, lhs_ptr, result); } -fn assignShift( - gz: *GenZir, - scope: *Scope, - infix_node: Ast.Node.Index, - op_inst_tag: Zir.Inst.Tag, -) InnerError!void { +fn assignShiftSat(gz: *GenZir, scope: *Scope, infix_node: Ast.Node.Index) InnerError!void { try emitDbgNode(gz, infix_node); const astgen = gz.astgen; const tree = astgen.tree; @@ -2771,10 +2749,10 @@ fn assignShift( const lhs_ptr = try lvalExpr(gz, scope, node_datas[infix_node].lhs); const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - const rhs_type = try gz.addUnNode(.typeof_log2_int_type, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .ty = rhs_type }, node_datas[infix_node].rhs); + // Saturating shift-left allows any integer type for both the LHS and RHS. + const rhs = try expr(gz, scope, .none, node_datas[infix_node].rhs); - const result = try gz.addPlNode(op_inst_tag, infix_node, Zir.Inst.Bin{ + const result = try gz.addPlNode(.shl_sat, infix_node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs, }); @@ -7556,11 +7534,6 @@ fn builtinCall( return rvalue(gz, rl, result, node); }, - .add_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .add_with_saturation), - .sub_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .sub_with_saturation), - .mul_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .mul_with_saturation), - .shl_with_saturation => return saturatingArithmetic(gz, scope, rl, node, params, .shl_with_saturation), - .atomic_load => { const int_type = try typeExpr(gz, scope, params[0]); // TODO allow this pointer type to be volatile @@ -7955,24 +7928,6 @@ fn overflowArithmetic( return rvalue(gz, rl, result, node); } -fn saturatingArithmetic( - gz: *GenZir, - scope: *Scope, - rl: ResultLoc, - node: Ast.Node.Index, - params: []const Ast.Node.Index, - tag: Zir.Inst.Extended, -) InnerError!Zir.Inst.Ref { - const lhs = try expr(gz, scope, .none, params[0]); - const rhs = try expr(gz, scope, .none, params[1]); - const result = try gz.addExtendedPayload(tag, Zir.Inst.SaturatingArithmetic{ - .node = gz.nodeIndexToRelative(node), - .lhs = lhs, - .rhs = rhs, - }); - return rvalue(gz, rl, result, node); -} - fn callExpr( gz: *GenZir, scope: *Scope, @@ -8198,9 +8153,9 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -8216,9 +8171,9 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool .bang_equal, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_xor, .bool_and, .bool_or, @@ -8439,9 +8394,9 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -8457,9 +8412,9 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never .bang_equal, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_xor, .bool_and, .bool_or, @@ -8619,9 +8574,9 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .assign, .assign_bit_and, .assign_bit_or, - .assign_bit_shift_left, - .assign_bit_shift_left_sat, - .assign_bit_shift_right, + .assign_shl, + .assign_shl_sat, + .assign_shr, .assign_bit_xor, .assign_div, .assign_sub, @@ -8637,9 +8592,9 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .bang_equal, .bit_and, .bit_or, - .bit_shift_left, - .bit_shift_left_sat, - .bit_shift_right, + .shl, + .shl_sat, + .shr, .bit_xor, .bool_and, .bool_or, diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig index e415d27a3a..8f23ec86d7 100644 --- a/src/BuiltinFn.zig +++ b/src/BuiltinFn.zig @@ -2,7 +2,6 @@ const std = @import("std"); pub const Tag = enum { add_with_overflow, - add_with_saturation, align_cast, align_of, as, @@ -66,7 +65,6 @@ pub const Tag = enum { wasm_memory_grow, mod, mul_with_overflow, - mul_with_saturation, panic, pop_count, ptr_cast, @@ -81,12 +79,10 @@ pub const Tag = enum { set_runtime_safety, shl_exact, shl_with_overflow, - shl_with_saturation, shr_exact, shuffle, size_of, splat, - sub_with_saturation, reduce, src, sqrt, @@ -531,34 +527,6 @@ pub const list = list: { .param_count = 2, }, }, - .{ - "@addWithSaturation", - .{ - .tag = .add_with_saturation, - .param_count = 2, - }, - }, - .{ - "@subWithSaturation", - .{ - .tag = .sub_with_saturation, - .param_count = 2, - }, - }, - .{ - "@mulWithSaturation", - .{ - .tag = .mul_with_saturation, - .param_count = 2, - }, - }, - .{ - "@shlWithSaturation", - .{ - .tag = .shl_with_saturation, - .param_count = 2, - }, - }, .{ "@memcpy", .{ diff --git a/src/Liveness.zig b/src/Liveness.zig index c34153b76f..93f28ad7b2 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -226,13 +226,13 @@ fn analyzeInst( switch (inst_tags[inst]) { .add, .addwrap, - .addsat, + .add_sat, .sub, .subwrap, - .subsat, + .sub_sat, .mul, .mulwrap, - .mulsat, + .mul_sat, .div, .rem, .mod, @@ -255,6 +255,7 @@ fn analyzeInst( .ptr_elem_val, .ptr_ptr_elem_val, .shl, + .shl_exact, .shl_sat, .shr, .atomic_store_unordered, diff --git a/src/Sema.zig b/src/Sema.zig index be10b6d663..f106d7ea9e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -246,7 +246,6 @@ pub fn analyzeBody( .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), .ref => try sema.zirRef(block, inst), .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), .shr => try sema.zirShr(block, inst), .slice_end => try sema.zirSliceEnd(block, inst), .slice_sentinel => try sema.zirSliceSentinel(block, inst), @@ -319,7 +318,6 @@ pub fn analyzeBody( .div_exact => try sema.zirDivExact(block, inst), .div_floor => try sema.zirDivFloor(block, inst), .div_trunc => try sema.zirDivTrunc(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), .shr_exact => try sema.zirShrExact(block, inst), .bit_offset_of => try sema.zirBitOffsetOf(block, inst), .offset_of => try sema.zirOffsetOf(block, inst), @@ -363,14 +361,21 @@ pub fn analyzeBody( .add => try sema.zirArithmetic(block, inst, .add), .addwrap => try sema.zirArithmetic(block, inst, .addwrap), + .add_sat => try sema.zirArithmetic(block, inst, .add_sat), .div => try sema.zirArithmetic(block, inst, .div), .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem), .mod => try sema.zirArithmetic(block, inst, .mod), .rem => try sema.zirArithmetic(block, inst, .rem), .mul => try sema.zirArithmetic(block, inst, .mul), .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap), + .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat), .sub => try sema.zirArithmetic(block, inst, .sub), .subwrap => try sema.zirArithmetic(block, inst, .subwrap), + .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat), + + .shl => try sema.zirShl(block, inst, .shl), + .shl_exact => try sema.zirShl(block, inst, .shl_exact), + .shl_sat => try sema.zirShl(block, inst, .shl_sat), // Instructions that we know to *always* be noreturn based solely on their tag. // These functions match the return type of analyzeBody so that we can @@ -694,11 +699,6 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .c_define => return sema.zirCDefine( block, extended), .wasm_memory_size => return sema.zirWasmMemorySize( block, extended), .wasm_memory_grow => return sema.zirWasmMemoryGrow( block, extended), - .add_with_saturation, - .sub_with_saturation, - .mul_with_saturation, - .shl_with_saturation, - => return sema.zirSatArithmetic( block, extended), // zig fmt: on } } @@ -5875,7 +5875,12 @@ fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirShl( + sema: *Sema, + block: *Scope.Block, + inst: Zir.Inst.Index, + air_tag: Air.Inst.Tag, +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5886,6 +5891,8 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); + // TODO coerce rhs if air_tag is not shl_sat + const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs); const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs); @@ -5901,6 +5908,12 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A return sema.addConstant(lhs_ty, lhs_val); } const val = try lhs_val.shl(rhs_val, sema.arena); + switch (air_tag) { + .shl_exact => return sema.mod.fail(&block.base, lhs_src, "TODO implement Sema for comptime shl_exact", .{}), + .shl_sat => return sema.mod.fail(&block.base, lhs_src, "TODO implement Sema for comptime shl_sat", .{}), + .shl => {}, + else => unreachable, + } return sema.addConstant(lhs_ty, val); } else rs: { if (maybe_rhs_val) |rhs_val| { @@ -5909,8 +5922,10 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A break :rs lhs_src; }; + // TODO: insert runtime safety check for shl_exact + try sema.requireRuntimeBlock(block, runtime_src); - return block.addBinOp(.shl, lhs, rhs); + return block.addBinOp(air_tag, lhs, rhs); } fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -6201,105 +6216,6 @@ fn zirOverflowArithmetic( return sema.mod.fail(&block.base, src, "TODO implement Sema.zirOverflowArithmetic", .{}); } -fn zirSatArithmetic( - sema: *Sema, - block: *Scope.Block, - extended: Zir.Inst.Extended.InstData, -) CompileError!Air.Inst.Ref { - const tracy = trace(@src()); - defer tracy.end(); - - const extra = sema.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data; - sema.src = .{ .node_offset_bin_op = extra.node }; - const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = extra.node }; - const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = extra.node }; - const lhs = sema.resolveInst(extra.lhs); - const rhs = sema.resolveInst(extra.rhs); - - return sema.analyzeSatArithmetic(block, lhs, rhs, sema.src, lhs_src, rhs_src, extended); -} - -fn analyzeSatArithmetic( - sema: *Sema, - block: *Scope.Block, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, - src: LazySrcLoc, - lhs_src: LazySrcLoc, - rhs_src: LazySrcLoc, - extended: Zir.Inst.Extended.InstData, -) CompileError!Air.Inst.Ref { - const lhs_ty = sema.typeOf(lhs); - const rhs_ty = sema.typeOf(rhs); - const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison(); - const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison(); - if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) { - if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { - return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs_ty.arrayLen(), rhs_ty.arrayLen(), - }); - } - return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBinOp", .{}); - } else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) { - return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs_ty, rhs_ty, - }); - } - - if (lhs_zig_ty_tag == .Pointer or rhs_zig_ty_tag == .Pointer) - return sema.mod.fail(&block.base, src, "TODO implement support for pointers in zirSatArithmetic", .{}); - - const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; - const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{ .override = &[_]LazySrcLoc{ lhs_src, rhs_src } }); - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); - const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - - const scalar_type = if (resolved_type.zigTypeTag() == .Vector) - resolved_type.elemType() - else - resolved_type; - - const scalar_tag = scalar_type.zigTypeTag(); - - const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; - - if (!is_int) - return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ - @tagName(lhs_zig_ty_tag), @tagName(rhs_zig_ty_tag), - }); - - if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| { - if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| { - if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.addConstUndef(resolved_type); - } - // incase rhs is 0, simply return lhs without doing any calculations - if (rhs_val.compareWithZero(.eq)) { - switch (extended.opcode) { - .add_with_saturation, .sub_with_saturation => return sema.addConstant(scalar_type, lhs_val), - else => {}, - } - } - - return sema.mod.fail(&block.base, src, "TODO implement comptime saturating arithmetic for operand '{s}'", .{@tagName(extended.opcode)}); - } else { - try sema.requireRuntimeBlock(block, rhs_src); - } - } else { - try sema.requireRuntimeBlock(block, lhs_src); - } - - const air_tag: Air.Inst.Tag = switch (extended.opcode) { - .add_with_saturation => .addsat, - .sub_with_saturation => .subsat, - .mul_with_saturation => .mulsat, - .shl_with_saturation => .shl_sat, - else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for extended opcode '{s}'", .{@tagName(extended.opcode)}), - }; - - return block.addBinOp(air_tag, casted_lhs, casted_rhs); -} - fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, @@ -6441,8 +6357,7 @@ fn analyzeArithmetic( }, .addwrap => { // Integers only; floats are checked above. - // If either of the operands are zero, then the other operand is - // returned, even if it is undefined. + // If either of the operands are zero, the other operand is returned. // If either of the operands are undefined, the result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { @@ -6464,6 +6379,30 @@ fn analyzeArithmetic( } else break :rs .{ .src = lhs_src, .air_tag = .addwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .addwrap }; }, + .add_sat => { + // For both integers and floats: + // If either of the operands are zero, then the other operand is returned. + // If either of the operands are undefined, the result is undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef() and lhs_val.compareWithZero(.eq)) { + return casted_rhs; + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + if (maybe_lhs_val) |lhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.numberAddSat(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = lhs_src, .air_tag = .add_sat }; + } else break :rs .{ .src = rhs_src, .air_tag = .add_sat }; + }, .sub => { // For integers: // If the rhs is zero, then the other operand is @@ -6531,6 +6470,30 @@ fn analyzeArithmetic( } else break :rs .{ .src = rhs_src, .air_tag = .subwrap }; } else break :rs .{ .src = lhs_src, .air_tag = .subwrap }; }, + .sub_sat => { + // For both integers and floats: + // If the RHS is zero, result is LHS. + // If either of the operands are undefined, result is undefined. + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return casted_lhs; + } + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (maybe_rhs_val) |rhs_val| { + return sema.addConstant( + scalar_type, + try lhs_val.numberSubSat(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = rhs_src, .air_tag = .sub_sat }; + } else break :rs .{ .src = lhs_src, .air_tag = .sub_sat }; + }, .div => { // For integers: // If the lhs is zero, then zero is returned regardless of rhs. @@ -6649,10 +6612,9 @@ fn analyzeArithmetic( }, .mulwrap => { // Integers only; floats are handled above. - // If either of the operands are zero, the result is zero. - // If either of the operands are one, the result is the other - // operand, even if it is undefined. - // If either of the operands are undefined, the result is undefined. + // If either of the operands are zero, result is zero. + // If either of the operands are one, result is the other operand. + // If either of the operands are undefined, result is undefined. if (maybe_lhs_val) |lhs_val| { if (!lhs_val.isUndef()) { if (lhs_val.compareWithZero(.eq)) { @@ -6684,6 +6646,42 @@ fn analyzeArithmetic( } else break :rs .{ .src = lhs_src, .air_tag = .mulwrap }; } else break :rs .{ .src = rhs_src, .air_tag = .mulwrap }; }, + .mul_sat => { + // For both integers and floats: + // If either of the operands are zero, result is zero. + // If either of the operands are one, result is the other operand. + // If either of the operands are undefined, result is undefined. + if (maybe_lhs_val) |lhs_val| { + if (!lhs_val.isUndef()) { + if (lhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (lhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_rhs; + } + } + } + if (maybe_rhs_val) |rhs_val| { + if (rhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + if (rhs_val.compareWithZero(.eq)) { + return sema.addConstant(scalar_type, Value.zero); + } + if (rhs_val.compare(.eq, Value.one, scalar_type)) { + return casted_lhs; + } + if (maybe_lhs_val) |lhs_val| { + if (lhs_val.isUndef()) { + return sema.addConstUndef(scalar_type); + } + return sema.addConstant( + scalar_type, + try lhs_val.numberMulSat(rhs_val, scalar_type, sema.arena, target), + ); + } else break :rs .{ .src = lhs_src, .air_tag = .mul_sat }; + } else break :rs .{ .src = rhs_src, .air_tag = .mul_sat }; + }, .mod_rem => { // For integers: // Either operand being undef is a compile error because there exists @@ -7933,7 +7931,7 @@ fn analyzeRet( fn floatOpAllowed(tag: Zir.Inst.Tag) bool { // extend this swich as additional operators are implemented return switch (tag) { - .add, .sub, .mul, .div, .mod, .rem, .mod_rem => true, + .add, .add_sat, .sub, .sub_sat, .mul, .mul_sat, .div, .mod, .rem, .mod_rem => true, else => false, }; } @@ -8600,12 +8598,6 @@ fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); -} - fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); diff --git a/src/Zir.zig b/src/Zir.zig index 7c171e736d..1da53a526e 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -126,6 +126,64 @@ pub const Inst = struct { /// Twos complement wrapping integer addition. /// Uses the `pl_node` union field. Payload is `Bin`. addwrap, + /// Saturating addition. + /// Uses the `pl_node` union field. Payload is `Bin`. + add_sat, + /// Arithmetic subtraction. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub, + /// Twos complement wrapping integer subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + subwrap, + /// Saturating subtraction. + /// Uses the `pl_node` union field. Payload is `Bin`. + sub_sat, + /// Arithmetic multiplication. Asserts no integer overflow. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul, + /// Twos complement wrapping integer multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mulwrap, + /// Saturating multiplication. + /// Uses the `pl_node` union field. Payload is `Bin`. + mul_sat, + /// Implements the `@divExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_exact, + /// Implements the `@divFloor` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_floor, + /// Implements the `@divTrunc` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + div_trunc, + /// Implements the `@mod` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + mod, + /// Implements the `@rem` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + rem, + /// Ambiguously remainder division or modulus. If the computation would possibly have + /// a different value depending on whether the operation is remainder division or modulus, + /// a compile error is emitted. Otherwise the computation is performed. + /// Uses the `pl_node` union field. Payload is `Bin`. + mod_rem, + /// Integer shift-left. Zeroes are shifted in from the right hand side. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl, + /// Implements the `@shlExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shl_exact, + /// Saturating shift-left. + /// Uses the `pl_node` union field. Payload is `Bin`. + shl_sat, + /// Integer shift-right. Arithmetic or logical depending on the signedness of + /// the integer type. + /// Uses the `pl_node` union field. Payload is `Bin`. + shr, + /// Implements the `@shrExact` builtin. + /// Uses the `pl_node` union field with payload `Bin`. + shr_exact, + /// Declares a parameter of the current function. Used for: /// * debug info /// * checking shadowing against declarations in the current namespace @@ -471,12 +529,6 @@ pub const Inst = struct { /// String Literal. Makes an anonymous Decl and then takes a pointer to it. /// Uses the `str` union field. str, - /// Arithmetic subtraction. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - sub, - /// Twos complement wrapping integer subtraction. - /// Uses the `pl_node` union field. Payload is `Bin`. - subwrap, /// Arithmetic negation. Asserts no integer overflow. /// Same as sub with a lhs of 0, split into a separate instruction to save memory. /// Uses `un_node`. @@ -802,46 +854,6 @@ pub const Inst = struct { /// Implements the `@bitReverse` builtin. Uses the `un_node` union field. bit_reverse, - /// Implements the `@divExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_exact, - /// Implements the `@divFloor` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_floor, - /// Implements the `@divTrunc` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - div_trunc, - /// Implements the `@mod` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - mod, - /// Implements the `@rem` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - rem, - /// Ambiguously remainder division or modulus. If the computation would possibly have - /// a different value depending on whether the operation is remainder division or modulus, - /// a compile error is emitted. Otherwise the computation is performed. - /// Uses the `pl_node` union field. Payload is `Bin`. - mod_rem, - /// Arithmetic multiplication. Asserts no integer overflow. - /// Uses the `pl_node` union field. Payload is `Bin`. - mul, - /// Twos complement wrapping integer multiplication. - /// Uses the `pl_node` union field. Payload is `Bin`. - mulwrap, - - /// Integer shift-left. Zeroes are shifted in from the right hand side. - /// Uses the `pl_node` union field. Payload is `Bin`. - shl, - /// Implements the `@shlExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - shl_exact, - /// Integer shift-right. Arithmetic or logical depending on the signedness of the integer type. - /// Uses the `pl_node` union field. Payload is `Bin`. - shr, - /// Implements the `@shrExact` builtin. - /// Uses the `pl_node` union field with payload `Bin`. - shr_exact, - /// Implements the `@bitOffsetOf` builtin. /// Uses the `pl_node` union field with payload `Bin`. bit_offset_of, @@ -961,6 +973,7 @@ pub const Inst = struct { .param_anytype_comptime, .add, .addwrap, + .add_sat, .alloc, .alloc_mut, .alloc_comptime, @@ -1035,8 +1048,10 @@ pub const Inst = struct { .mod_rem, .mul, .mulwrap, + .mul_sat, .ref, .shl, + .shl_sat, .shr, .store, .store_node, @@ -1045,6 +1060,7 @@ pub const Inst = struct { .str, .sub, .subwrap, + .sub_sat, .negate, .negate_wrap, .typeof, @@ -1218,6 +1234,14 @@ pub const Inst = struct { break :list std.enums.directEnumArray(Tag, Data.FieldEnum, 0, .{ .add = .pl_node, .addwrap = .pl_node, + .add_sat = .pl_node, + .sub = .pl_node, + .subwrap = .pl_node, + .sub_sat = .pl_node, + .mul = .pl_node, + .mulwrap = .pl_node, + .mul_sat = .pl_node, + .param = .pl_tok, .param_comptime = .pl_tok, .param_anytype = .str_tok, @@ -1297,8 +1321,6 @@ pub const Inst = struct { .repeat_inline = .node, .merge_error_sets = .pl_node, .mod_rem = .pl_node, - .mul = .pl_node, - .mulwrap = .pl_node, .ref = .un_tok, .ret_node = .un_node, .ret_load = .un_node, @@ -1315,8 +1337,6 @@ pub const Inst = struct { .store_to_block_ptr = .bin, .store_to_inferred_ptr = .bin, .str = .str, - .sub = .pl_node, - .subwrap = .pl_node, .negate = .un_node, .negate_wrap = .un_node, .typeof = .un_node, @@ -1437,6 +1457,7 @@ pub const Inst = struct { .shl = .pl_node, .shl_exact = .pl_node, + .shl_sat = .pl_node, .shr = .pl_node, .shr_exact = .pl_node, @@ -1593,22 +1614,6 @@ pub const Inst = struct { wasm_memory_size, /// `operand` is payload index to `BinNode`. wasm_memory_grow, - /// Implements the `@addWithSaturation` builtin. - /// `operand` is payload index to `SaturatingArithmetic`. - /// `small` is unused. - add_with_saturation, - /// Implements the `@subWithSaturation` builtin. - /// `operand` is payload index to `SaturatingArithmetic`. - /// `small` is unused. - sub_with_saturation, - /// Implements the `@mulWithSaturation` builtin. - /// `operand` is payload index to `SaturatingArithmetic`. - /// `small` is unused. - mul_with_saturation, - /// Implements the `@shlWithSaturation` builtin. - /// `operand` is payload index to `SaturatingArithmetic`. - /// `small` is unused. - shl_with_saturation, pub const InstData = struct { opcode: Extended, @@ -2788,12 +2793,6 @@ pub const Inst = struct { ptr: Ref, }; - pub const SaturatingArithmetic = struct { - node: i32, - lhs: Ref, - rhs: Ref, - }; - pub const Cmpxchg = struct { ptr: Ref, expected_value: Ref, diff --git a/src/codegen.zig b/src/codegen.zig index a1f812388f..79105dc4a7 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -824,18 +824,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (air_tags[inst]) { // zig fmt: off - .add, .ptr_add => try self.airAdd(inst), - .addwrap => try self.airAddWrap(inst), - .addsat => try self.airArithmeticOpSat(inst, "addsat"), - .sub, .ptr_sub => try self.airSub(inst), - .subwrap => try self.airSubWrap(inst), - .subsat => try self.airArithmeticOpSat(inst, "subsat"), - .mul => try self.airMul(inst), - .mulwrap => try self.airMulWrap(inst), - .mulsat => try self.airArithmeticOpSat(inst, "mulsat"), - .div => try self.airDiv(inst), - .rem => try self.airRem(inst), - .mod => try self.airMod(inst), + .add, .ptr_add => try self.airAdd(inst), + .addwrap => try self.airAddWrap(inst), + .add_sat => try self.airAddSat(inst), + .sub, .ptr_sub => try self.airSub(inst), + .subwrap => try self.airSubWrap(inst), + .sub_sat => try self.airSubSat(inst), + .mul => try self.airMul(inst), + .mulwrap => try self.airMulWrap(inst), + .mul_sat => try self.airMulSat(inst), + .div => try self.airDiv(inst), + .rem => try self.airRem(inst), + .mod => try self.airMod(inst), + .shl, .shl_exact => try self.airShl(inst), + .shl_sat => try self.airShlSat(inst), .cmp_lt => try self.airCmp(inst, .lt), .cmp_lte => try self.airCmp(inst, .lte), @@ -850,8 +852,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .bit_or => try self.airBitOr(inst), .xor => try self.airXor(inst), .shr => try self.airShr(inst), - .shl => try self.airShl(inst), - .shl_sat => try self.airArithmeticOpSat(inst, "shl_sat"), .alloc => try self.airAlloc(inst), .arg => try self.airArg(inst), @@ -1306,6 +1306,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airAddSat(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement add_sat for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn airSub(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { @@ -1324,10 +1332,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn airArithmeticOpSat(self: *Self, inst: Air.Inst.Index, comptime name: []const u8) !void { + fn airSubSat(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { - else => return self.fail("TODO implement " ++ name ++ " for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement sub_sat for {}", .{self.target.cpu.arch}), }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1350,6 +1358,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airMulSat(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement mul_sat for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { @@ -1412,6 +1428,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } + fn airShlSat(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement shl_sat for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + fn airShr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1afa81b70f..95ce95f2e5 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -883,25 +883,27 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO // TODO use a different strategy for add that communicates to the optimizer // that wrapping is UB. - .add, .ptr_add => try airBinOp( f, inst, " + "), - .addwrap => try airWrapOp(f, inst, " + ", "addw_"), - .addsat => return f.fail("TODO: C backend: implement codegen for addsat", .{}), + .add, .ptr_add => try airBinOp (f, inst, " + "), // TODO use a different strategy for sub that communicates to the optimizer // that wrapping is UB. - .sub, .ptr_sub => try airBinOp( f, inst, " - "), - .subwrap => try airWrapOp(f, inst, " - ", "subw_"), - .subsat => return f.fail("TODO: C backend: implement codegen for subsat", .{}), + .sub, .ptr_sub => try airBinOp (f, inst, " - "), // TODO use a different strategy for mul that communicates to the optimizer // that wrapping is UB. - .mul => try airBinOp( f, inst, " * "), - .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"), - .mulsat => return f.fail("TODO: C backend: implement codegen for mulsat", .{}), + .mul => try airBinOp (f, inst, " * "), // TODO use a different strategy for div that communicates to the optimizer // that wrapping is UB. .div => try airBinOp( f, inst, " / "), .rem => try airBinOp( f, inst, " % "), - // TODO implement modulus division - .mod => try airBinOp( f, inst, " mod "), + .mod => try airBinOp( f, inst, " mod "), // TODO implement modulus division + + .addwrap => try airWrapOp(f, inst, " + ", "addw_"), + .subwrap => try airWrapOp(f, inst, " - ", "subw_"), + .mulwrap => try airWrapOp(f, inst, " * ", "mulw_"), + + .add_sat => try airSatOp(f, inst, "adds_"), + .sub_sat => try airSatOp(f, inst, "subs_"), + .mul_sat => try airSatOp(f, inst, "muls_"), + .shl_sat => try airSatOp(f, inst, "shls_"), .cmp_eq => try airBinOp(f, inst, " == "), .cmp_gt => try airBinOp(f, inst, " > "), @@ -911,18 +913,14 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .cmp_neq => try airBinOp(f, inst, " != "), // bool_and and bool_or are non-short-circuit operations - .bool_and => try airBinOp(f, inst, " & "), - .bool_or => try airBinOp(f, inst, " | "), - .bit_and => try airBinOp(f, inst, " & "), - .bit_or => try airBinOp(f, inst, " | "), - .xor => try airBinOp(f, inst, " ^ "), - - .shr => try airBinOp(f, inst, " >> "), - .shl => try airBinOp(f, inst, " << "), - .shl_sat => return f.fail("TODO: C backend: implement codegen for mulsat", .{}), - - - .not => try airNot( f, inst), + .bool_and => try airBinOp(f, inst, " & "), + .bool_or => try airBinOp(f, inst, " | "), + .bit_and => try airBinOp(f, inst, " & "), + .bit_or => try airBinOp(f, inst, " | "), + .xor => try airBinOp(f, inst, " ^ "), + .shr => try airBinOp(f, inst, " >> "), + .shl, .shl_exact => try airBinOp(f, inst, " << "), + .not => try airNot (f, inst), .optional_payload => try airOptionalPayload(f, inst), .optional_payload_ptr => try airOptionalPayload(f, inst), @@ -1314,27 +1312,23 @@ fn airWrapOp( return ret; } -fn airSatOp( - o: *Object, - inst: Air.Inst.Index, - fn_op: [*:0]const u8, -) !CValue { - if (o.liveness.isUnused(inst)) +fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_op: [*:0]const u8) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; - const bin_op = o.air.instructions.items(.data)[inst].bin_op; - const inst_ty = o.air.typeOfIndex(inst); - const int_info = inst_ty.intInfo(o.dg.module.getTarget()); + const bin_op = f.air.instructions.items(.data)[inst].bin_op; + const inst_ty = f.air.typeOfIndex(inst); + const int_info = inst_ty.intInfo(f.object.dg.module.getTarget()); const bits = int_info.bits; switch (bits) { 8, 16, 32, 64, 128 => {}, - else => return o.dg.fail("TODO: C backend: airSatOp for non power of 2 integers", .{}), + else => return f.object.dg.fail("TODO: C backend: airSatOp for non power of 2 integers", .{}), } // if it's an unsigned int with non-arbitrary bit size then we can just add if (bits > 64) { - return o.dg.fail("TODO: C backend: airSatOp for large integers", .{}); + return f.object.dg.fail("TODO: C backend: airSatOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1382,11 +1376,11 @@ fn airSatOp( }, }; - const lhs = try o.resolveInst(bin_op.lhs); - const rhs = try o.resolveInst(bin_op.rhs); - const w = o.writer(); + const lhs = try f.resolveInst(bin_op.lhs); + const rhs = try f.resolveInst(bin_op.rhs); + const w = f.object.writer(); - const ret = try o.allocLocal(inst_ty, .Mut); + const ret = try f.allocLocal(inst_ty, .Mut); try w.print(" = zig_{s}", .{fn_op}); switch (inst_ty.tag()) { @@ -1412,16 +1406,16 @@ fn airSatOp( } try w.writeByte('('); - try o.writeCValue(w, lhs); + try f.writeCValue(w, lhs); try w.writeAll(", "); - try o.writeCValue(w, rhs); + try f.writeCValue(w, rhs); if (int_info.signedness == .signed) { try w.print(", {s}", .{min}); } try w.print(", {s});", .{max}); - try o.indent_writer.insertNewline(); + try f.object.indent_writer.insertNewline(); return ret; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index cdd19146b5..b27afa9b54 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1236,27 +1236,27 @@ pub const FuncGen = struct { for (body) |inst| { const opt_value: ?*const llvm.Value = switch (air_tags[inst]) { // zig fmt: off - .add => try self.airAdd(inst, .standard), - .addwrap => try self.airAdd(inst, .wrapping), - .addsat => try self.airAdd(inst, .saturated), - .sub => try self.airSub(inst, .standard), - .subwrap => try self.airSub(inst, .wrapping), - .subsat => try self.airSub(inst, .saturated), - .mul => try self.airMul(inst, .standard), - .mulwrap => try self.airMul(inst, .wrapping), - .mulsat => try self.airMul(inst, .saturated), - .div => try self.airDiv(inst), - .rem => try self.airRem(inst), - .mod => try self.airMod(inst), - .ptr_add => try self.airPtrAdd(inst), - .ptr_sub => try self.airPtrSub(inst), + .add => try self.airAdd(inst), + .addwrap => try self.airAddWrap(inst), + .add_sat => try self.airAddSat(inst), + .sub => try self.airSub(inst), + .subwrap => try self.airSubWrap(inst), + .sub_sat => try self.airSubSat(inst), + .mul => try self.airMul(inst), + .mulwrap => try self.airMulWrap(inst), + .mul_sat => try self.airMulSat(inst), + .div => try self.airDiv(inst), + .rem => try self.airRem(inst), + .mod => try self.airMod(inst), + .ptr_add => try self.airPtrAdd(inst), + .ptr_sub => try self.airPtrSub(inst), + .shl => try self.airShl(inst), + .shl_sat => try self.airShlSat(inst), + .shl_exact => try self.airShlExact(inst), .bit_and, .bool_and => try self.airAnd(inst), .bit_or, .bool_or => try self.airOr(inst), .xor => try self.airXor(inst), - - .shl => try self.airShl(inst, false), - .shl_sat => try self.airShl(inst, true), .shr => try self.airShr(inst), .cmp_eq => try self.airCmp(inst, .eq), @@ -2028,10 +2028,8 @@ pub const FuncGen = struct { return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{}); } - const ArithmeticType = enum { standard, wrapping, saturated }; - fn airAdd(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { - if (self.liveness.isUnused(inst)) - return null; + fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -2039,21 +2037,36 @@ pub const FuncGen = struct { const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFAdd(lhs, rhs, ""); - if (ty == .wrapping) - return self.builder.buildAdd(lhs, rhs, "") - else if (ty == .saturated) { - if (inst_ty.isSignedInt()) - return self.builder.buildSAddSat(lhs, rhs, "") - else - return self.builder.buildUAddSat(lhs, rhs, ""); - } if (inst_ty.isSignedInt()) return self.builder.buildNSWAdd(lhs, rhs, ""); return self.builder.buildNUWAdd(lhs, rhs, ""); } - fn airSub(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { - if (self.liveness.isUnused(inst)) - return null; + fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + return self.builder.buildAdd(lhs, rhs, ""); + } + + fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); + + if (inst_ty.isAnyFloat()) return self.todo("saturating float add", .{}); + if (inst_ty.isSignedInt()) return self.builder.buildSAddSat(lhs, rhs, ""); + + return self.builder.buildUAddSat(lhs, rhs, ""); + } + + fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -2061,21 +2074,35 @@ pub const FuncGen = struct { const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFSub(lhs, rhs, ""); - if (ty == .wrapping) - return self.builder.buildSub(lhs, rhs, "") - else if (ty == .saturated) { - if (inst_ty.isSignedInt()) - return self.builder.buildSSubSat(lhs, rhs, "") - else - return self.builder.buildUSubSat(lhs, rhs, ""); - } if (inst_ty.isSignedInt()) return self.builder.buildNSWSub(lhs, rhs, ""); return self.builder.buildNUWSub(lhs, rhs, ""); } - fn airMul(self: *FuncGen, inst: Air.Inst.Index, ty: ArithmeticType) !?*const llvm.Value { - if (self.liveness.isUnused(inst)) - return null; + fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + return self.builder.buildSub(lhs, rhs, ""); + } + + fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); + + if (inst_ty.isAnyFloat()) return self.todo("saturating float sub", .{}); + if (inst_ty.isSignedInt()) return self.builder.buildSSubSat(lhs, rhs, ""); + return self.builder.buildUSubSat(lhs, rhs, ""); + } + + fn airMul(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); @@ -2083,18 +2110,33 @@ pub const FuncGen = struct { const inst_ty = self.air.typeOfIndex(inst); if (inst_ty.isAnyFloat()) return self.builder.buildFMul(lhs, rhs, ""); - if (ty == .wrapping) - return self.builder.buildMul(lhs, rhs, "") - else if (ty == .saturated) { - if (inst_ty.isSignedInt()) - return self.builder.buildSMulFixSat(lhs, rhs, "") - else - return self.builder.buildUMulFixSat(lhs, rhs, ""); - } if (inst_ty.isSignedInt()) return self.builder.buildNSWMul(lhs, rhs, ""); return self.builder.buildNUWMul(lhs, rhs, ""); } + fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + return self.builder.buildMul(lhs, rhs, ""); + } + + fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); + + if (inst_ty.isAnyFloat()) return self.todo("saturating float mul", .{}); + if (inst_ty.isSignedInt()) return self.builder.buildSMulFixSat(lhs, rhs, ""); + return self.builder.buildUMulFixSat(lhs, rhs, ""); + } + fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; @@ -2200,9 +2242,25 @@ pub const FuncGen = struct { return self.builder.buildXor(lhs, rhs, ""); } - fn airShl(self: *FuncGen, inst: Air.Inst.Index, sat: bool) !?*const llvm.Value { - if (self.liveness.isUnused(inst)) - return null; + fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_type = self.air.typeOf(bin_op.lhs); + const tg = self.dg.module.getTarget(); + const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + else + rhs; + if (lhs_type.isSignedInt()) return self.builder.buildNSWShl(lhs, casted_rhs, ""); + return self.builder.buildNUWShl(lhs, casted_rhs, ""); + } + + fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -2212,15 +2270,25 @@ pub const FuncGen = struct { self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") else rhs; - if (sat) { - return if (lhs_type.isSignedInt()) - self.builder.buildSShlSat(lhs, casted_rhs, "") - else - self.builder.buildUShlSat(lhs, casted_rhs, ""); - } return self.builder.buildShl(lhs, casted_rhs, ""); } + fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_type = self.air.typeOf(bin_op.lhs); + const tg = self.dg.module.getTarget(); + const casted_rhs = if (self.air.typeOf(bin_op.rhs).bitSize(tg) < lhs_type.bitSize(tg)) + self.builder.buildZExt(rhs, try self.dg.llvmType(lhs_type), "") + else + rhs; + if (lhs_type.isSignedInt()) return self.builder.buildSShlSat(lhs, casted_rhs, ""); + return self.builder.buildUShlSat(lhs, casted_rhs, ""); + } + fn airShr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { if (self.liveness.isUnused(inst)) return null; diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 178c381235..4fac6656c8 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -469,6 +469,12 @@ pub const Builder = opaque { pub const buildShl = LLVMBuildShl; extern fn LLVMBuildShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildNUWShl = ZigLLVMBuildNUWShl; + extern fn ZigLLVMBuildNUWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + + pub const buildNSWShl = ZigLLVMBuildNSWShl; + extern fn ZigLLVMBuildNSWShl(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; + pub const buildSShlSat = ZigLLVMBuildSShlSat; extern fn ZigLLVMBuildSShlSat(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value; diff --git a/src/link/C/zig.h b/src/link/C/zig.h index 5c9d750729..72868e4400 100644 --- a/src/link/C/zig.h +++ b/src/link/C/zig.h @@ -356,9 +356,6 @@ static inline long long zig_subw_longlong(long long lhs, long long rhs, long lon return (long long)(((unsigned long long)lhs) - ((unsigned long long)rhs)); } -/* - * Saturating aritmetic operations: add, sub, mul, shl - */ #define zig_add_sat_u(ZT, T) static inline T zig_adds_##ZT(T x, T y, T max) { \ return (x > max - y) ? max : x + y; \ } @@ -449,7 +446,7 @@ zig_shl_sat_u(u32, uint32_t, 32) zig_shl_sat_s(i32, int32_t, 31) zig_shl_sat_u(u64, uint64_t, 64) zig_shl_sat_s(i64, int64_t, 63) -zig_shl_sat_s(isize, intptr_t, 63) -zig_shl_sat_s(short, short, 15) -zig_shl_sat_s(int, int, 31) -zig_shl_sat_s(long, long, 63) +zig_shl_sat_s(isize, intptr_t, ((sizeof(intptr_t)) * CHAR_BIT - 1)) +zig_shl_sat_s(short, short, ((sizeof(short )) * CHAR_BIT - 1)) +zig_shl_sat_s(int, int, ((sizeof(int )) * CHAR_BIT - 1)) +zig_shl_sat_s(long, long, ((sizeof(long )) * CHAR_BIT - 1)) diff --git a/src/print_air.zig b/src/print_air.zig index 7d178b52f3..885c1b62bd 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -104,13 +104,13 @@ const Writer = struct { .add, .addwrap, - .addsat, + .add_sat, .sub, .subwrap, - .subsat, + .sub_sat, .mul, .mulwrap, - .mulsat, + .mul_sat, .div, .rem, .mod, @@ -133,6 +133,7 @@ const Writer = struct { .ptr_elem_val, .ptr_ptr_elem_val, .shl, + .shl_exact, .shl_sat, .shr, .set_union_tag, diff --git a/src/print_zir.zig b/src/print_zir.zig index 3834a694e9..5ffd6619af 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -229,12 +229,15 @@ const Writer = struct { .add, .addwrap, + .add_sat, .array_cat, .array_mul, .mul, .mulwrap, + .mul_sat, .sub, .subwrap, + .sub_sat, .cmp_lt, .cmp_lte, .cmp_eq, @@ -247,6 +250,7 @@ const Writer = struct { .mod_rem, .shl, .shl_exact, + .shl_sat, .shr, .shr_exact, .xor, @@ -400,12 +404,6 @@ const Writer = struct { .shl_with_overflow, => try self.writeOverflowArithmetic(stream, extended), - .add_with_saturation, - .sub_with_saturation, - .mul_with_saturation, - .shl_with_saturation, - => try self.writeSaturatingArithmetic(stream, extended), - .struct_decl => try self.writeStructDecl(stream, extended), .union_decl => try self.writeUnionDecl(stream, extended), .enum_decl => try self.writeEnumDecl(stream, extended), @@ -854,18 +852,6 @@ const Writer = struct { try self.writeSrc(stream, src); } - fn writeSaturatingArithmetic(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { - const extra = self.code.extraData(Zir.Inst.SaturatingArithmetic, extended.operand).data; - const src: LazySrcLoc = .{ .node_offset = extra.node }; - - try self.writeInstRef(stream, extra.lhs); - try stream.writeAll(", "); - try self.writeInstRef(stream, extra.rhs); - try stream.writeAll(", "); - try stream.writeAll(") "); - try self.writeSrc(stream, src); - } - fn writePlNodeCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void { const inst_data = self.code.instructions.items(.data)[inst].pl_node; const extra = self.code.extraData(Zir.Inst.Call, inst_data.payload_index); diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index e31a7015b0..5b58766df9 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -1818,10 +1818,6 @@ enum BuiltinFnId { BuiltinFnIdReduce, BuiltinFnIdMaximum, BuiltinFnIdMinimum, - BuiltinFnIdSatAdd, - BuiltinFnIdSatSub, - BuiltinFnIdSatMul, - BuiltinFnIdSatShl, }; struct BuiltinFnEntry { diff --git a/src/stage1/astgen.cpp b/src/stage1/astgen.cpp index 14808dd0a2..8fbd02c688 100644 --- a/src/stage1/astgen.cpp +++ b/src/stage1/astgen.cpp @@ -4720,66 +4720,6 @@ static Stage1ZirInst *astgen_builtin_fn_call(Stage1AstGen *ag, Scope *scope, Ast Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMaximum, arg0_value, arg1_value, true); return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); } - case BuiltinFnIdSatAdd: - { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope); - if (arg0_value == ag->codegen->invalid_inst_src) - return arg0_value; - - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope); - if (arg1_value == ag->codegen->invalid_inst_src) - return arg1_value; - - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpAddSat, arg0_value, arg1_value, true); - return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); - } - case BuiltinFnIdSatSub: - { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope); - if (arg0_value == ag->codegen->invalid_inst_src) - return arg0_value; - - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope); - if (arg1_value == ag->codegen->invalid_inst_src) - return arg1_value; - - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpSubSat, arg0_value, arg1_value, true); - return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); - } - case BuiltinFnIdSatMul: - { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope); - if (arg0_value == ag->codegen->invalid_inst_src) - return arg0_value; - - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope); - if (arg1_value == ag->codegen->invalid_inst_src) - return arg1_value; - - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpMultSat, arg0_value, arg1_value, true); - return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); - } - case BuiltinFnIdSatShl: - { - AstNode *arg0_node = node->data.fn_call_expr.params.at(0); - Stage1ZirInst *arg0_value = astgen_node(ag, arg0_node, scope); - if (arg0_value == ag->codegen->invalid_inst_src) - return arg0_value; - - AstNode *arg1_node = node->data.fn_call_expr.params.at(1); - Stage1ZirInst *arg1_value = astgen_node(ag, arg1_node, scope); - if (arg1_value == ag->codegen->invalid_inst_src) - return arg1_value; - - Stage1ZirInst *bin_op = ir_build_bin_op(ag, scope, node, IrBinOpShlSat, arg0_value, arg1_value, true); - return ir_lval_wrap(ag, scope, bin_op, lval, result_loc); - } case BuiltinFnIdMemcpy: { AstNode *arg0_node = node->data.fn_call_expr.params.at(0); diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp index eade843354..a0f130b79e 100644 --- a/src/stage1/codegen.cpp +++ b/src/stage1/codegen.cpp @@ -9134,10 +9134,6 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdReduce, "reduce", 2); create_builtin_fn(g, BuiltinFnIdMaximum, "maximum", 2); create_builtin_fn(g, BuiltinFnIdMinimum, "minimum", 2); - create_builtin_fn(g, BuiltinFnIdSatAdd, "addWithSaturation", 2); - create_builtin_fn(g, BuiltinFnIdSatSub, "subWithSaturation", 2); - create_builtin_fn(g, BuiltinFnIdSatMul, "mulWithSaturation", 2); - create_builtin_fn(g, BuiltinFnIdSatShl, "shlWithSaturation", 2); } static const char *bool_to_str(bool b) { diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index d0fe6d1b31..dbd9367d1a 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -1462,10 +1462,10 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex { .mul_wrap_assign => return renderBinOp(c, node, .assign_mul_wrap, .asterisk_percent_equal, "*%="), .div => return renderBinOpGrouped(c, node, .div, .slash, "/"), .div_assign => return renderBinOp(c, node, .assign_div, .slash_equal, "/="), - .shl => return renderBinOpGrouped(c, node, .bit_shift_left, .angle_bracket_angle_bracket_left, "<<"), - .shl_assign => return renderBinOp(c, node, .assign_bit_shift_left, .angle_bracket_angle_bracket_left_equal, "<<="), - .shr => return renderBinOpGrouped(c, node, .bit_shift_right, .angle_bracket_angle_bracket_right, ">>"), - .shr_assign => return renderBinOp(c, node, .assign_bit_shift_right, .angle_bracket_angle_bracket_right_equal, ">>="), + .shl => return renderBinOpGrouped(c, node, .shl, .angle_bracket_angle_bracket_left, "<<"), + .shl_assign => return renderBinOp(c, node, .assign_shl, .angle_bracket_angle_bracket_left_equal, "<<="), + .shr => return renderBinOpGrouped(c, node, .shr, .angle_bracket_angle_bracket_right, ">>"), + .shr_assign => return renderBinOp(c, node, .assign_shr, .angle_bracket_angle_bracket_right_equal, ">>="), .mod => return renderBinOpGrouped(c, node, .mod, .percent, "%"), .mod_assign => return renderBinOp(c, node, .assign_mod, .percent_equal, "%="), .@"and" => return renderBinOpGrouped(c, node, .bool_and, .keyword_and, "and"), diff --git a/src/value.zig b/src/value.zig index 29d8fa8db9..73a2b3a49f 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1588,6 +1588,35 @@ pub const Value = extern union { return result; } + /// Supports both floats and ints; handles undefined. + pub fn numberAddSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + // TODO: handle outside float range + return floatAdd(lhs, rhs, ty, arena); + } + const result = try intAdd(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + return max; + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + return min; + } + + return result; + } + /// Supports both floats and ints; handles undefined. pub fn numberSubWrap( lhs: Value, @@ -1616,6 +1645,35 @@ pub const Value = extern union { return result; } + /// Supports both floats and ints; handles undefined. + pub fn numberSubSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + // TODO: handle outside float range + return floatSub(lhs, rhs, ty, arena); + } + const result = try intSub(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + return max; + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + return min; + } + + return result; + } + /// Supports both floats and ints; handles undefined. pub fn numberMulWrap( lhs: Value, @@ -1644,6 +1702,35 @@ pub const Value = extern union { return result; } + /// Supports both floats and ints; handles undefined. + pub fn numberMulSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: *Allocator, + target: Target, + ) !Value { + if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); + + if (ty.isAnyFloat()) { + // TODO: handle outside float range + return floatMul(lhs, rhs, ty, arena); + } + const result = try intMul(lhs, rhs, arena); + + const max = try ty.maxInt(arena, target); + if (compare(result, .gt, max, ty)) { + return max; + } + + const min = try ty.minInt(arena, target); + if (compare(result, .lt, min, ty)) { + return min; + } + + return result; + } + /// Supports both floats and ints; handles undefined. pub fn numberMax(lhs: Value, rhs: Value, arena: *Allocator) !Value { if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef); diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig index 5d7a229c3c..91f9c17fb9 100644 --- a/test/behavior/saturating_arithmetic.zig +++ b/test/behavior/saturating_arithmetic.zig @@ -32,7 +32,7 @@ fn testSaturatingOp(comptime op: Op, comptime T: type, test_data: [3]T) !void { } } -test "@addWithSaturation" { +test "saturating add" { const S = struct { fn doTheTest() !void { // .{a, b, expected a+b} @@ -50,22 +50,16 @@ test "@addWithSaturation" { try testSaturatingOp(.add, u128, .{ maxInt(u128), 1, maxInt(u128) }); const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 255, 255, 255 }, @addWithSaturation( - u8x3{ 255, 254, 1 }, - u8x3{ 1, 2, 255 }, - )); + try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 255, 254, 1 } +| u8x3{ 1, 2, 255 })); const i8x3 = std.meta.Vector(3, i8); - try expectEqual(i8x3{ 127, 127, 127 }, @addWithSaturation( - i8x3{ 127, 126, 1 }, - i8x3{ 1, 2, 127 }, - )); + try expectEqual(i8x3{ 127, 127, 127 }, (i8x3{ 127, 126, 1 } +| i8x3{ 1, 2, 127 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } -test "@subWithSaturation" { +test "saturating subtraction" { const S = struct { fn doTheTest() !void { // .{a, b, expected a-b} @@ -81,17 +75,14 @@ test "@subWithSaturation" { try testSaturatingOp(.sub, u128, .{ 0, maxInt(u128), 0 }); const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 0, 0, 0 }, @subWithSaturation( - u8x3{ 0, 0, 0 }, - u8x3{ 255, 255, 255 }, - )); + try expectEqual(u8x3{ 0, 0, 0 }, (u8x3{ 0, 0, 0 } -| u8x3{ 255, 255, 255 })); } }; try S.doTheTest(); comptime try S.doTheTest(); } -test "@mulWithSaturation" { +test "saturating multiplication" { // TODO: once #9660 has been solved, remove this line if (std.builtin.target.cpu.arch == .wasm32) return error.SkipZigTest; @@ -112,10 +103,7 @@ test "@mulWithSaturation" { try testSaturatingOp(.mul, u128, .{ maxInt(u128), maxInt(u128), maxInt(u128) }); const u8x3 = std.meta.Vector(3, u8); - try expectEqual(u8x3{ 255, 255, 255 }, @mulWithSaturation( - u8x3{ 2, 2, 2 }, - u8x3{ 255, 255, 255 }, - )); + try expectEqual(u8x3{ 255, 255, 255 }, (u8x3{ 2, 2, 2 } *| u8x3{ 255, 255, 255 })); } }; @@ -123,7 +111,7 @@ test "@mulWithSaturation" { comptime try S.doTheTest(); } -test "@shlWithSaturation" { +test "saturating shift-left" { const S = struct { fn doTheTest() !void { // .{a, b, expected a< Date: Tue, 28 Sep 2021 22:38:51 -0700 Subject: stage2: implement `@clz` and `@ctz` Also improve the LLVM backend to support lowering bigints to LLVM values. Moves over a bunch of math.zig test cases to the "passing for stage2" section. --- src/Air.zig | 10 +++ src/Liveness.zig | 2 + src/Sema.zig | 83 ++++++++++++------- src/codegen.zig | 18 +++++ src/codegen/c.zig | 19 +++++ src/codegen/llvm.zig | 49 ++++++++++-- src/codegen/llvm/bindings.zig | 7 +- src/print_air.zig | 2 + src/type.zig | 6 +- src/value.zig | 39 +++++++++ test/behavior/math.zig | 182 ++++++++++++++++++++++++++++++++++++++++++ test/behavior/math_stage1.zig | 178 ----------------------------------------- 12 files changed, 379 insertions(+), 216 deletions(-) (limited to 'src/codegen/c.zig') diff --git a/src/Air.zig b/src/Air.zig index cdc5ff2287..f7eccfd5a5 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -160,6 +160,14 @@ pub const Inst = struct { /// Result type is the return type of the function being called. /// Uses the `pl_op` field with the `Call` payload. operand is the callee. call, + /// Count leading zeroes of an integer according to its representation in twos complement. + /// Result type will always be an unsigned integer big enough to fit the answer. + /// Uses the `ty_op` field. + clz, + /// Count trailing zeroes of an integer according to its representation in twos complement. + /// Result type will always be an unsigned integer big enough to fit the answer. + /// Uses the `ty_op` field. + ctz, /// `<`. Result type is always bool. /// Uses the `bin_op` field. @@ -669,6 +677,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { .float_to_int, .int_to_float, .get_union_tag, + .clz, + .ctz, => return air.getRefType(datas[inst].ty_op.ty), .loop, diff --git a/src/Liveness.zig b/src/Liveness.zig index 93f28ad7b2..71a0414383 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -304,6 +304,8 @@ fn analyzeInst( .float_to_int, .int_to_float, .get_union_tag, + .clz, + .ctz, => { const o = inst_datas[inst].ty_op; return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); diff --git a/src/Sema.zig b/src/Sema.zig index 0fb93f3fbe..51ebb496f3 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4611,8 +4611,8 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); - const dest_is_comptime_int = try sema.requireIntegerType(block, dest_ty_src, dest_type); - _ = try sema.requireIntegerType(block, operand_src, sema.typeOf(operand)); + const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_type); + _ = try sema.checkIntType(block, operand_src, sema.typeOf(operand)); if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_type, operand, operand_src); @@ -8384,7 +8384,7 @@ fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - try sema.checkIntType(block, ty_src, dest_ty); + _ = try sema.checkIntType(block, ty_src, dest_ty); try sema.checkFloatType(block, operand_src, operand_ty); if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { @@ -8493,8 +8493,8 @@ fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); const mod = sema.mod; - const dest_is_comptime_int = try sema.requireIntegerType(block, dest_ty_src, dest_ty); - const src_is_comptime_int = try sema.requireIntegerType(block, operand_src, operand_ty); + const dest_is_comptime_int = try sema.checkIntType(block, dest_ty_src, dest_ty); + const src_is_comptime_int = try sema.checkIntType(block, operand_src, operand_ty); if (dest_is_comptime_int) { return sema.coerce(block, dest_ty, operand, operand_src); @@ -8552,14 +8552,56 @@ fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); + const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand = sema.resolveInst(inst_data.operand); + const operand_ty = sema.typeOf(operand); + // TODO implement support for vectors + if (operand_ty.zigTypeTag() != .Int) { + return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{ + operand_ty, + }); + } + const target = sema.mod.getTarget(); + const bits = operand_ty.intInfo(target).bits; + if (bits == 0) return Air.Inst.Ref.zero; + + const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); + + const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { + if (val.isUndef()) return sema.addConstUndef(result_ty); + return sema.addIntUnsigned(result_ty, val.clz(operand_ty, target)); + } else operand_src; + + try sema.requireRuntimeBlock(block, runtime_src); + return block.addTyOp(.clz, result_ty, operand); } fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); - return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); + const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; + const operand = sema.resolveInst(inst_data.operand); + const operand_ty = sema.typeOf(operand); + // TODO implement support for vectors + if (operand_ty.zigTypeTag() != .Int) { + return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{ + operand_ty, + }); + } + const target = sema.mod.getTarget(); + const bits = operand_ty.intInfo(target).bits; + if (bits == 0) return Air.Inst.Ref.zero; + + const result_ty = try Type.smallestUnsignedInt(sema.arena, bits); + + const runtime_src = if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| { + if (val.isUndef()) return sema.addConstUndef(result_ty); + return sema.mod.fail(&block.base, operand_src, "TODO: implement comptime @ctz", .{}); + } else operand_src; + + try sema.requireRuntimeBlock(block, runtime_src); + return block.addTyOp(.ctz, result_ty, operand); } fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -8616,17 +8658,12 @@ fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn checkIntType( - sema: *Sema, - block: *Scope.Block, - ty_src: LazySrcLoc, - ty: Type, -) CompileError!void { +/// Returns `true` if the type was a comptime_int. +fn checkIntType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!bool { switch (ty.zigTypeTag()) { - .ComptimeInt, .Int => {}, - else => return sema.mod.fail(&block.base, ty_src, "expected integer type, found '{}'", .{ - ty, - }), + .ComptimeInt => return true, + .Int => return false, + else => return sema.mod.fail(&block.base, src, "expected integer type, found '{}'", .{ty}), } } @@ -9416,14 +9453,6 @@ fn requireRuntimeBlock(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) !void try sema.requireFunctionBlock(block, src); } -fn requireIntegerType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) !bool { - switch (ty.zigTypeTag()) { - .ComptimeInt => return true, - .Int => return false, - else => return sema.mod.fail(&block.base, src, "expected integer type, found '{}'", .{ty}), - } -} - /// Emit a compile error if type cannot be used for a runtime variable. fn validateVarType( sema: *Sema, diff --git a/src/codegen.zig b/src/codegen.zig index 79105dc4a7..dfaedf041a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -896,6 +896,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memset => try self.airMemset(inst), .set_union_tag => try self.airSetUnionTag(inst), .get_union_tag => try self.airGetUnionTag(inst), + .clz => try self.airClz(inst), + .ctz => try self.airCtz(inst), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -1606,6 +1608,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + fn airClz(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airClz for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + + fn airCtz(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement airCtz for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 95ce95f2e5..d2ce9cc6de 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -962,6 +962,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO .memcpy => try airMemcpy(f, inst), .set_union_tag => try airSetUnionTag(f, inst), .get_union_tag => try airGetUnionTag(f, inst), + .clz => try airBuiltinCall(f, inst, "clz"), + .ctz => try airBuiltinCall(f, inst, "ctz"), .int_to_float, .float_to_int, @@ -2075,6 +2077,23 @@ fn airSimpleCast(f: *Function, inst: Air.Inst.Index) !CValue { return local; } +fn airBuiltinCall(f: *Function, inst: Air.Inst.Index, fn_name: [*:0]const u8) !CValue { + if (f.liveness.isUnused(inst)) return CValue.none; + + const inst_ty = f.air.typeOfIndex(inst); + const local = try f.allocLocal(inst_ty, .Const); + const ty_op = f.air.instructions.items(.data)[inst].ty_op; + const writer = f.object.writer(); + const operand = try f.resolveInst(ty_op.operand); + + // TODO implement the function in zig.h and call it here + + try writer.print(" = {s}(", .{fn_name}); + try f.writeCValue(writer, operand); + try writer.writeAll(");\n"); + return local; +} + fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { const ty_pl = f.air.instructions.items(.data)[inst].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b27afa9b54..2c278221ae 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -889,14 +889,14 @@ pub const DeclGen = struct { const llvm_type = try self.llvmType(tv.ty); if (bigint.eqZero()) return llvm_type.constNull(); - if (bigint.limbs.len != 1) { - return self.todo("implement bigger bigint", .{}); - } - const llvm_int = llvm_type.constInt(bigint.limbs[0], .False); + const unsigned_val = if (bigint.limbs.len == 1) + llvm_type.constInt(bigint.limbs[0], .False) + else + llvm_type.constIntOfArbitraryPrecision(@intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr); if (!bigint.positive) { - return llvm.constNeg(llvm_int); + return llvm.constNeg(unsigned_val); } - return llvm_int; + return unsigned_val; }, .Enum => { const llvm_type = try self.llvmType(tv.ty); @@ -1310,6 +1310,8 @@ pub const FuncGen = struct { .memcpy => try self.airMemcpy(inst), .set_union_tag => try self.airSetUnionTag(inst), .get_union_tag => try self.airGetUnionTag(inst), + .clz => try self.airClzCtz(inst, "ctlz"), + .ctz => try self.airClzCtz(inst, "cttz"), .atomic_store_unordered => try self.airAtomicStore(inst, .Unordered), .atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic), @@ -2699,6 +2701,41 @@ pub const FuncGen = struct { return self.builder.buildExtractValue(un, 1, ""); } + fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, prefix: [*:0]const u8) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_ty = self.air.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const target = self.dg.module.getTarget(); + const bits = operand_ty.intInfo(target).bits; + + var fn_name_buf: [100]u8 = undefined; + const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.i{d}", .{ + prefix, bits, + }) catch unreachable; + const llvm_i1 = self.context.intType(1); + const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: { + const operand_llvm_ty = try self.dg.llvmType(operand_ty); + const param_types = [_]*const llvm.Type{ operand_llvm_ty, llvm_i1 }; + const fn_type = llvm.functionType(operand_llvm_ty, ¶m_types, param_types.len, .False); + break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type); + }; + + const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() }; + const wrong_size_result = self.builder.buildCall(fn_val, ¶ms, params.len, ""); + const result_ty = self.air.typeOfIndex(inst); + const result_llvm_ty = try self.dg.llvmType(result_ty); + const result_bits = result_ty.intInfo(target).bits; + if (bits > result_bits) { + return self.builder.buildTrunc(wrong_size_result, result_llvm_ty, ""); + } else if (bits < result_bits) { + return self.builder.buildZExt(wrong_size_result, result_llvm_ty, ""); + } else { + return wrong_size_result; + } + } + fn fieldPtr( self: *FuncGen, inst: Air.Inst.Index, diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 4fac6656c8..68d91f6c68 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -172,6 +172,9 @@ pub const Type = opaque { pub const constInt = LLVMConstInt; extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: Bool) *const Value; + pub const constIntOfArbitraryPrecision = LLVMConstIntOfArbitraryPrecision; + extern fn LLVMConstIntOfArbitraryPrecision(IntTy: *const Type, NumWords: c_uint, Words: [*]const u64) *const Value; + pub const constReal = LLVMConstReal; extern fn LLVMConstReal(RealTy: *const Type, N: f64) *const Value; @@ -300,7 +303,7 @@ extern fn LLVMGetInlineAsm( pub const functionType = LLVMFunctionType; extern fn LLVMFunctionType( ReturnType: *const Type, - ParamTypes: [*]*const Type, + ParamTypes: [*]const *const Type, ParamCount: c_uint, IsVarArg: Bool, ) *const Type; @@ -346,7 +349,7 @@ pub const Builder = opaque { extern fn LLVMBuildCall( *const Builder, Fn: *const Value, - Args: [*]*const Value, + Args: [*]const *const Value, NumArgs: c_uint, Name: [*:0]const u8, ) *const Value; diff --git a/src/print_air.zig b/src/print_air.zig index 885c1b62bd..dda3b4458b 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -186,6 +186,8 @@ const Writer = struct { .int_to_float, .float_to_int, .get_union_tag, + .clz, + .ctz, => try w.writeTyOp(s, inst), .block, diff --git a/src/type.zig b/src/type.zig index e13ede852a..cb2cc6d58a 100644 --- a/src/type.zig +++ b/src/type.zig @@ -3902,16 +3902,16 @@ pub const Type = extern union { const bits = bits: { if (max == 0) break :bits 0; const base = std.math.log2(max); - const upper = (@as(u64, 1) << base) - 1; + const upper = (@as(u64, 1) << @intCast(u6, base)) - 1; break :bits base + @boolToInt(upper < max); }; - return switch (bits) { + return switch (@intCast(u16, bits)) { 1 => initTag(.u1), 8 => initTag(.u8), 16 => initTag(.u16), 32 => initTag(.u32), 64 => initTag(.u64), - else => return Tag.int_unsigned.create(arena, bits), + else => |b| return Tag.int_unsigned.create(arena, b), }; } }; diff --git a/src/value.zig b/src/value.zig index 0ead2ff1d9..ac52654041 100644 --- a/src/value.zig +++ b/src/value.zig @@ -962,6 +962,45 @@ pub const Value = extern union { }; } + pub fn clz(val: Value, ty: Type, target: Target) u64 { + const ty_bits = ty.intInfo(target).bits; + switch (val.tag()) { + .zero, .bool_false => return ty_bits, + .one, .bool_true => return ty_bits - 1, + + .int_u64 => { + const big = @clz(u64, val.castTag(.int_u64).?.data); + return big + ty_bits - 64; + }, + .int_i64 => { + @panic("TODO implement i64 Value clz"); + }, + .int_big_positive => { + // TODO: move this code into std lib big ints + const bigint = val.castTag(.int_big_positive).?.asBigInt(); + // Limbs are stored in little-endian order but we need + // to iterate big-endian. + var total_limb_lz: u64 = 0; + var i: usize = bigint.limbs.len; + const bits_per_limb = @sizeOf(std.math.big.Limb) * 8; + while (i != 0) { + i -= 1; + const limb = bigint.limbs[i]; + const this_limb_lz = @clz(std.math.big.Limb, limb); + total_limb_lz += this_limb_lz; + if (this_limb_lz != bits_per_limb) break; + } + const total_limb_bits = bigint.limbs.len * bits_per_limb; + return total_limb_lz + ty_bits - total_limb_bits; + }, + .int_big_negative => { + @panic("TODO implement int_big_negative Value clz"); + }, + + else => unreachable, + } + } + /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. pub fn intBitCountTwosComp(self: Value) usize { diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 510cc3d438..56fbdc124d 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -53,3 +53,185 @@ fn testThreeExprInARow(f: bool, t: bool) !void { fn assertFalse(b: bool) !void { try expect(!b); } + +test "@clz" { + try testClz(); + comptime try testClz(); +} + +fn testClz() !void { + try expect(testOneClz(u8, 0b10001010) == 0); + try expect(testOneClz(u8, 0b00001010) == 4); + try expect(testOneClz(u8, 0b00011010) == 3); + try expect(testOneClz(u8, 0b00000000) == 8); + try expect(testOneClz(u128, 0xffffffffffffffff) == 64); + try expect(testOneClz(u128, 0x10000000000000000) == 63); +} + +fn testOneClz(comptime T: type, x: T) u32 { + return @clz(T, x); +} + +test "const number literal" { + const one = 1; + const eleven = ten + one; + + try expect(eleven == 11); +} +const ten = 10; + +test "float equality" { + const x: f64 = 0.012; + const y: f64 = x + 1.0; + + try testFloatEqualityImpl(x, y); + comptime try testFloatEqualityImpl(x, y); +} + +fn testFloatEqualityImpl(x: f64, y: f64) !void { + const y2 = x + 1.0; + try expect(y == y2); +} + +test "hex float literal parsing" { + comptime try expect(0x1.0 == 1.0); +} + +test "quad hex float literal parsing in range" { + const a = 0x1.af23456789bbaaab347645365cdep+5; + const b = 0x1.dedafcff354b6ae9758763545432p-9; + const c = 0x1.2f34dd5f437e849b4baab754cdefp+4534; + const d = 0x1.edcbff8ad76ab5bf46463233214fp-435; + _ = a; + _ = b; + _ = c; + _ = d; +} + +test "underscore separator parsing" { + try expect(0_0_0_0 == 0); + try expect(1_234_567 == 1234567); + try expect(001_234_567 == 1234567); + try expect(0_0_1_2_3_4_5_6_7 == 1234567); + + try expect(0b0_0_0_0 == 0); + try expect(0b1010_1010 == 0b10101010); + try expect(0b0000_1010_1010 == 0b10101010); + try expect(0b1_0_1_0_1_0_1_0 == 0b10101010); + + try expect(0o0_0_0_0 == 0); + try expect(0o1010_1010 == 0o10101010); + try expect(0o0000_1010_1010 == 0o10101010); + try expect(0o1_0_1_0_1_0_1_0 == 0o10101010); + + try expect(0x0_0_0_0 == 0); + try expect(0x1010_1010 == 0x10101010); + try expect(0x0000_1010_1010 == 0x10101010); + try expect(0x1_0_1_0_1_0_1_0 == 0x10101010); + + try expect(123_456.789_000e1_0 == 123456.789000e10); + try expect(0_1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10); + + try expect(0x1234_5678.9ABC_DEF0p-1_0 == 0x12345678.9ABCDEF0p-10); + try expect(0x1_2_3_4_5_6_7_8.9_A_B_C_D_E_F_0p-0_0_0_1_0 == 0x12345678.9ABCDEF0p-10); +} + +test "hex float literal within range" { + const a = 0x1.0p16383; + const b = 0x0.1p16387; + const c = 0x1.0p-16382; + _ = a; + _ = b; + _ = c; +} + +test "comptime_int addition" { + comptime { + try expect(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950); + try expect(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380); + } +} + +test "comptime_int multiplication" { + comptime { + try expect( + 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567, + ); + try expect( + 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016, + ); + } +} + +test "comptime_int shifting" { + comptime { + try expect((@as(u128, 1) << 127) == 0x80000000000000000000000000000000); + } +} + +test "comptime_int multi-limb shift and mask" { + comptime { + var a = 0xefffffffa0000001eeeeeeefaaaaaaab; + + try expect(@as(u32, a & 0xffffffff) == 0xaaaaaaab); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xeeeeeeef); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xa0000001); + a >>= 32; + try expect(@as(u32, a & 0xffffffff) == 0xefffffff); + a >>= 32; + + try expect(a == 0); + } +} + +test "comptime_int multi-limb partial shift right" { + comptime { + var a = 0x1ffffffffeeeeeeee; + a >>= 16; + try expect(a == 0x1ffffffffeeee); + } +} + +test "xor" { + try test_xor(); + comptime try test_xor(); +} + +fn test_xor() !void { + try testOneXor(0xFF, 0x00, 0xFF); + try testOneXor(0xF0, 0x0F, 0xFF); + try testOneXor(0xFF, 0xF0, 0x0F); + try testOneXor(0xFF, 0x0F, 0xF0); + try testOneXor(0xFF, 0xFF, 0x00); +} + +fn testOneXor(a: u8, b: u8, c: u8) !void { + try expect(a ^ b == c); +} + +test "comptime_int xor" { + comptime { + try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x0000000000000000FFFFFFFFFFFFFFFF); + try expect(0x0000000000000000FFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFF0000000000000000); + try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000000000000000000000000000); + try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0x00000000FFFFFFFF00000000FFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); + try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000FFFFFFFF00000000FFFFFFFF); + try expect(0x00000000FFFFFFFF00000000FFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFF00000000FFFFFFFF00000000); + } +} + +test "comptime_int param and return" { + const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702); + try expect(a == 137114567242441932203689521744947848950); + + const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768); + try expect(b == 985095453608931032642182098849559179469148836107390954364380); +} + +fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int { + return a + b; +} diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig index 9f412930b5..f0c160ebc4 100644 --- a/test/behavior/math_stage1.zig +++ b/test/behavior/math_stage1.zig @@ -117,20 +117,6 @@ test "@*WithOverflow with u0 values" { try expect(!@shlWithOverflow(u0, 0, 0, &result)); } -test "@clz" { - try testClz(); - comptime try testClz(); -} - -fn testClz() !void { - try expect(@clz(u8, 0b10001010) == 0); - try expect(@clz(u8, 0b00001010) == 4); - try expect(@clz(u8, 0b00011010) == 3); - try expect(@clz(u8, 0b00000000) == 8); - try expect(@clz(u128, 0xffffffffffffffff) == 64); - try expect(@clz(u128, 0x10000000000000000) == 63); -} - test "@clz vectors" { try testClzVectors(); comptime try testClzVectors(); @@ -171,14 +157,6 @@ fn testCtzVectors() !void { try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16))); } -test "const number literal" { - const one = 1; - const eleven = ten + one; - - try expect(eleven == 11); -} -const ten = 10; - test "unsigned wrapping" { try testUnsignedWrappingEval(maxInt(u32)); comptime try testUnsignedWrappingEval(maxInt(u32)); @@ -274,19 +252,6 @@ test "small int addition" { try expect(result == 0); } -test "float equality" { - const x: f64 = 0.012; - const y: f64 = x + 1.0; - - try testFloatEqualityImpl(x, y); - comptime try testFloatEqualityImpl(x, y); -} - -fn testFloatEqualityImpl(x: f64, y: f64) !void { - const y2 = x + 1.0; - try expect(y == y2); -} - test "allow signed integer division/remainder when values are comptime known and positive or exact" { try expect(5 / 3 == 1); try expect(-5 / -3 == 1); @@ -296,23 +261,6 @@ test "allow signed integer division/remainder when values are comptime known and try expect(-6 % 3 == 0); } -test "hex float literal parsing" { - comptime try expect(0x1.0 == 1.0); -} - -test "quad hex float literal parsing in range" { - const a = 0x1.af23456789bbaaab347645365cdep+5; - const b = 0x1.dedafcff354b6ae9758763545432p-9; - const c = 0x1.2f34dd5f437e849b4baab754cdefp+4534; - const d = 0x1.edcbff8ad76ab5bf46463233214fp-435; - if (false) { - a; - b; - c; - d; - } -} - test "quad hex float literal parsing accurate" { const a: f128 = 0x1.1111222233334444555566667777p+0; @@ -403,45 +351,6 @@ test "quad hex float literal parsing accurate" { comptime try S.doTheTest(); } -test "underscore separator parsing" { - try expect(0_0_0_0 == 0); - try expect(1_234_567 == 1234567); - try expect(001_234_567 == 1234567); - try expect(0_0_1_2_3_4_5_6_7 == 1234567); - - try expect(0b0_0_0_0 == 0); - try expect(0b1010_1010 == 0b10101010); - try expect(0b0000_1010_1010 == 0b10101010); - try expect(0b1_0_1_0_1_0_1_0 == 0b10101010); - - try expect(0o0_0_0_0 == 0); - try expect(0o1010_1010 == 0o10101010); - try expect(0o0000_1010_1010 == 0o10101010); - try expect(0o1_0_1_0_1_0_1_0 == 0o10101010); - - try expect(0x0_0_0_0 == 0); - try expect(0x1010_1010 == 0x10101010); - try expect(0x0000_1010_1010 == 0x10101010); - try expect(0x1_0_1_0_1_0_1_0 == 0x10101010); - - try expect(123_456.789_000e1_0 == 123456.789000e10); - try expect(0_1_2_3_4_5_6.7_8_9_0_0_0e0_0_1_0 == 123456.789000e10); - - try expect(0x1234_5678.9ABC_DEF0p-1_0 == 0x12345678.9ABCDEF0p-10); - try expect(0x1_2_3_4_5_6_7_8.9_A_B_C_D_E_F_0p-0_0_0_1_0 == 0x12345678.9ABCDEF0p-10); -} - -test "hex float literal within range" { - const a = 0x1.0p16383; - const b = 0x0.1p16387; - const c = 0x1.0p-16382; - if (false) { - a; - b; - c; - } -} - test "truncating shift left" { try testShlTrunc(maxInt(u16)); comptime try testShlTrunc(maxInt(u16)); @@ -497,81 +406,6 @@ test "shift left/right on u0 operand" { comptime try S.doTheTest(); } -test "comptime_int addition" { - comptime { - try expect(35361831660712422535336160538497375248 + 101752735581729509668353361206450473702 == 137114567242441932203689521744947848950); - try expect(594491908217841670578297176641415611445982232488944558774612 + 390603545391089362063884922208143568023166603618446395589768 == 985095453608931032642182098849559179469148836107390954364380); - } -} - -test "comptime_int multiplication" { - comptime { - try expect( - 45960427431263824329884196484953148229 * 128339149605334697009938835852565949723 == 5898522172026096622534201617172456926982464453350084962781392314016180490567, - ); - try expect( - 594491908217841670578297176641415611445982232488944558774612 * 390603545391089362063884922208143568023166603618446395589768 == 232210647056203049913662402532976186578842425262306016094292237500303028346593132411865381225871291702600263463125370016, - ); - } -} - -test "comptime_int shifting" { - comptime { - try expect((@as(u128, 1) << 127) == 0x80000000000000000000000000000000); - } -} - -test "comptime_int multi-limb shift and mask" { - comptime { - var a = 0xefffffffa0000001eeeeeeefaaaaaaab; - - try expect(@as(u32, a & 0xffffffff) == 0xaaaaaaab); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xeeeeeeef); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xa0000001); - a >>= 32; - try expect(@as(u32, a & 0xffffffff) == 0xefffffff); - a >>= 32; - - try expect(a == 0); - } -} - -test "comptime_int multi-limb partial shift right" { - comptime { - var a = 0x1ffffffffeeeeeeee; - a >>= 16; - try expect(a == 0x1ffffffffeeee); - } -} - -test "xor" { - try test_xor(); - comptime try test_xor(); -} - -fn test_xor() !void { - try expect(0xFF ^ 0x00 == 0xFF); - try expect(0xF0 ^ 0x0F == 0xFF); - try expect(0xFF ^ 0xF0 == 0x0F); - try expect(0xFF ^ 0x0F == 0xF0); - try expect(0xFF ^ 0xFF == 0x00); -} - -test "comptime_int xor" { - comptime { - try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0x00000000000000000000000000000000 == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0x0000000000000000FFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFFFFFFFFFF0000000000000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x0000000000000000FFFFFFFFFFFFFFFF); - try expect(0x0000000000000000FFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFFFFFFFFFF0000000000000000); - try expect(0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000000000000000000000000000); - try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0x00000000FFFFFFFF00000000FFFFFFFF == 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF); - try expect(0xFFFFFFFF00000000FFFFFFFF00000000 ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0x00000000FFFFFFFF00000000FFFFFFFF); - try expect(0x00000000FFFFFFFF00000000FFFFFFFF ^ 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF == 0xFFFFFFFF00000000FFFFFFFF00000000); - } -} - test "f128" { try test_f128(); comptime try test_f128(); @@ -757,18 +591,6 @@ fn testRound(comptime T: type, x: T) !void { try expectEqual(x, z); } -test "comptime_int param and return" { - const a = comptimeAdd(35361831660712422535336160538497375248, 101752735581729509668353361206450473702); - try expect(a == 137114567242441932203689521744947848950); - - const b = comptimeAdd(594491908217841670578297176641415611445982232488944558774612, 390603545391089362063884922208143568023166603618446395589768); - try expect(b == 985095453608931032642182098849559179469148836107390954364380); -} - -fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int { - return a + b; -} - test "vector integer addition" { const S = struct { fn doTheTest() !void { -- cgit v1.2.3