diff options
| author | Jakub Konka <kubkon@jakubkonka.com> | 2022-01-18 01:25:48 +0100 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2022-01-18 01:25:48 +0100 |
| commit | e69cb9105a716dfd4a8cc2684417545b2438f606 (patch) | |
| tree | d80540e1ff8f325f753ee3537daac3d8cbf0f975 | |
| parent | f4e051e35d8019c9a8d99ccae8f2e9d8f032629a (diff) | |
| parent | 3145fa97c21704d8822db928e5f988f22497b1b8 (diff) | |
| download | zig-e69cb9105a716dfd4a8cc2684417545b2438f606.tar.gz zig-e69cb9105a716dfd4a8cc2684417545b2438f606.zip | |
Merge pull request #10616 from ziglang/stage2-x86_64-array-to-slice
stage2: implement airArrayToSlice for x86_64
| -rw-r--r-- | src/arch/x86_64/CodeGen.zig | 33 | ||||
| -rw-r--r-- | test/behavior.zig | 4 | ||||
| -rw-r--r-- | test/behavior/align.zig | 104 | ||||
| -rw-r--r-- | test/behavior/array.zig | 42 |
4 files changed, 137 insertions, 46 deletions
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 8f7f4746ec..2cf585fe4e 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1659,6 +1659,18 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type }, } }, + .register => |src_reg| { + const abi_size = value_ty.abiSize(self.target.*); + _ = try self.addInst(.{ + .tag = .mov, + .ops = (Mir.Ops{ + .reg1 = reg.to64(), + .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)), + .flags = 0b10, + }).encode(), + .data = .{ .imm = 0 }, + }); + }, else => |other| { return self.fail("TODO implement set pointee with {}", .{other}); }, @@ -1822,7 +1834,7 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: const dst_ty = self.air.typeOfIndex(inst); const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { - .add, .addwrap => try self.genBinMathOpMir(.add, dst_ty, .unsigned, dst_mcv, src_mcv), + .add, .addwrap, .ptr_add => try self.genBinMathOpMir(.add, dst_ty, .unsigned, dst_mcv, src_mcv), .bool_or, .bit_or => try self.genBinMathOpMir(.@"or", dst_ty, .unsigned, dst_mcv, src_mcv), .bool_and, .bit_and => try self.genBinMathOpMir(.@"and", dst_ty, .unsigned, dst_mcv, src_mcv), .sub, .subwrap => try self.genBinMathOpMir(.sub, dst_ty, .unsigned, dst_mcv, src_mcv), @@ -3125,7 +3137,6 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { switch (mcv) { .dead => unreachable, - .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .unreach, .none => return, // Nothing to do. .undef => { @@ -3241,6 +3252,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro } return self.fail("TODO implement memcpy for setting stack from {}", .{mcv}); }, + .ptr_stack_offset => { + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + }, .stack_offset => |off| { if (stack_offset == off) { // Copy stack variable to itself; nothing to do. @@ -3618,10 +3633,16 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - return self.fail("TODO implement airArrayToSlice for {}", .{self.target.cpu.arch}); + const ptr_ty = self.air.typeOf(ty_op.operand); + const ptr = try self.resolveInst(ty_op.operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { + const stack_offset = try self.allocMem(inst, 16, 16); + const array_ty = ptr_ty.childType(); + const array_len = array_ty.arrayLenIncludingSentinel(); + try self.genSetStack(Type.initTag(.usize), stack_offset + 8, ptr); + try self.genSetStack(Type.initTag(.u64), stack_offset + 16, .{ .immediate = array_len }); + break :blk .{ .stack_offset = stack_offset }; + }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/test/behavior.zig b/test/behavior.zig index 59377a753a..a89a36b879 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -16,11 +16,11 @@ test { _ = @import("behavior/type.zig"); _ = @import("behavior/bugs/655.zig"); _ = @import("behavior/bool.zig"); + _ = @import("behavior/align.zig"); + _ = @import("behavior/array.zig"); if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) { // Tests that pass for stage1, llvm backend, C backend, wasm backend. - _ = @import("behavior/align.zig"); - _ = @import("behavior/array.zig"); _ = @import("behavior/basic.zig"); _ = @import("behavior/bitcast.zig"); _ = @import("behavior/bugs/624.zig"); diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 77d2eea6a8..fe57c79353 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -6,6 +6,8 @@ const native_arch = builtin.target.cpu.arch; var foo: u8 align(4) = 100; test "global variable alignment" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + comptime try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4); comptime try expect(@TypeOf(&foo) == *align(4) u8); { @@ -20,10 +22,14 @@ test "global variable alignment" { } test "default alignment allows unspecified in type syntax" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(*u32 == *align(@alignOf(u32)) u32); } test "implicitly decreasing pointer alignment" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const a: u32 align(4) = 3; const b: u32 align(8) = 4; try expect(addUnaligned(&a, &b) == 7); @@ -33,16 +39,9 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 { return a.* + b.*; } -test "implicitly decreasing slice alignment" { - const a: u32 align(4) = 3; - const b: u32 align(8) = 4; - try expect(addUnalignedSlice(@as(*const [1]u32, &a)[0..], @as(*const [1]u32, &b)[0..]) == 7); -} -fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { - return a[0] + b[0]; -} - test "@alignCast pointers" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var x: u32 align(4) = 1; expectsOnly1(&x); try expect(x == 2); @@ -54,48 +53,25 @@ fn expects4(x: *align(4) u32) void { x.* += 1; } -test "specifying alignment allows pointer cast" { - try testBytesAlign(0x33); -} -fn testBytesAlign(b: u8) !void { - var bytes align(4) = [_]u8{ b, b, b, b }; - const ptr = @ptrCast(*u32, &bytes[0]); - try expect(ptr.* == 0x33333333); -} - -test "@alignCast slices" { - var array align(4) = [_]u32{ 1, 1 }; - const slice = array[0..]; - sliceExpectsOnly1(slice); - try expect(slice[0] == 2); -} -fn sliceExpectsOnly1(slice: []align(1) u32) void { - sliceExpects4(@alignCast(4, slice)); -} -fn sliceExpects4(slice: []align(4) u32) void { - slice[0] += 1; -} - test "alignment of structs" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(@alignOf(struct { a: i32, b: *i32, }) == @alignOf(usize)); } -test "return error union with 128-bit integer" { - try expect(3 == try give()); -} -fn give() anyerror!u128 { - return 3; -} - test "alignment of >= 128-bit integer type" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(@alignOf(u128) == 16); try expect(@alignOf(u129) == 16); } test "alignment of struct with 128-bit field" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(@alignOf(struct { x: u128, }) == 16); @@ -108,6 +84,8 @@ test "alignment of struct with 128-bit field" { } test "size of extern struct with 128-bit field" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(@sizeOf(extern struct { x: u128, y: u8, @@ -122,12 +100,16 @@ test "size of extern struct with 128-bit field" { } test "@ptrCast preserves alignment of bigger source" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var x: u32 align(16) = 1234; const ptr = @ptrCast(*u8, &x); try expect(@TypeOf(ptr) == *align(16) u8); } test "alignstack" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(fnWithAlignedStack() == 1234); } @@ -135,3 +117,49 @@ fn fnWithAlignedStack() i32 { @setAlignStack(256); return 1234; } + +test "implicitly decreasing slice alignment" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + const a: u32 align(4) = 3; + const b: u32 align(8) = 4; + try expect(addUnalignedSlice(@as(*const [1]u32, &a)[0..], @as(*const [1]u32, &b)[0..]) == 7); +} +fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 { + return a[0] + b[0]; +} + +test "specifying alignment allows pointer cast" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + try testBytesAlign(0x33); +} +fn testBytesAlign(b: u8) !void { + var bytes align(4) = [_]u8{ b, b, b, b }; + const ptr = @ptrCast(*u32, &bytes[0]); + try expect(ptr.* == 0x33333333); +} + +test "@alignCast slices" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + var array align(4) = [_]u32{ 1, 1 }; + const slice = array[0..]; + sliceExpectsOnly1(slice); + try expect(slice[0] == 2); +} +fn sliceExpectsOnly1(slice: []align(1) u32) void { + sliceExpects4(@alignCast(4, slice)); +} +fn sliceExpects4(slice: []align(4) u32) void { + slice[0] += 1; +} + +test "return error union with 128-bit integer" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + try expect(3 == try give()); +} +fn give() anyerror!u128 { + return 3; +} diff --git a/test/behavior/array.zig b/test/behavior/array.zig index cd2d029e22..017d15a64b 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -5,7 +5,23 @@ const mem = std.mem; const expect = testing.expect; const expectEqual = testing.expectEqual; +test "array to slice" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + const a: u32 align(4) = 3; + const b: u32 align(8) = 4; + const a_slice: []align(1) const u32 = @as(*const [1]u32, &a)[0..]; + const b_slice: []align(1) const u32 = @as(*const [1]u32, &b)[0..]; + try expect(a_slice[0] + b_slice[0] == 7); + + const d: []const u32 = &[2]u32{ 1, 2 }; + const e: []const u32 = &[3]u32{ 3, 4, 5 }; + try expect(d[0] + e[0] + d[1] + e[1] == 10); +} + test "arrays" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var array: [5]u32 = undefined; var i: u32 = 0; @@ -30,6 +46,8 @@ fn getArrayLen(a: []const u32) usize { } test "array init with mult" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const a = 'a'; var i: [8]u8 = [2]u8{ a, 'b' } ** 4; try expect(std.mem.eql(u8, &i, "abababab")); @@ -39,6 +57,8 @@ test "array init with mult" { } test "array literal with explicit type" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 }; try expect(hex_mult.len == 4); @@ -46,6 +66,8 @@ test "array literal with explicit type" { } test "array literal with inferred length" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const hex_mult = [_]u16{ 4096, 256, 16, 1 }; try expect(hex_mult.len == 4); @@ -53,6 +75,8 @@ test "array literal with inferred length" { } test "array dot len const expr" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try expect(comptime x: { break :x some_array.len == 4; }); @@ -64,12 +88,16 @@ const ArrayDotLenConstExpr = struct { const some_array = [_]u8{ 0, 1, 2, 3 }; test "array literal with specified size" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var array = [2]u8{ 1, 2 }; try expect(array[0] == 1); try expect(array[1] == 2); } test "array len field" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var arr = [4]u8{ 0, 0, 0, 0 }; var ptr = &arr; try expect(arr.len == 4); @@ -79,6 +107,8 @@ test "array len field" { } test "array with sentinels" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const S = struct { fn doTheTest(is_ct: bool) !void { if (is_ct or builtin.zig_is_stage2) { @@ -106,6 +136,8 @@ test "array with sentinels" { } test "void arrays" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var array: [4]void = undefined; array[0] = void{}; array[1] = array[2]; @@ -114,6 +146,8 @@ test "void arrays" { } test "nested arrays" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) { // TODO this is a recent stage2 test case regression due to an enhancement; // now arrays are properly detected as comptime. This exercised a new code @@ -132,6 +166,8 @@ test "nested arrays" { } test "implicit comptime in array type size" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + var arr: [plusOne(10)]bool = undefined; try expect(arr.len == 11); } @@ -141,6 +177,8 @@ fn plusOne(x: u32) u32 { } test "single-item pointer to array indexing and slicing" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + try testSingleItemPtrArrayIndexSlice(); comptime try testSingleItemPtrArrayIndexSlice(); } @@ -164,6 +202,8 @@ fn doSomeMangling(array: *[4]u8) void { } test "implicit cast zero sized array ptr to slice" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + { var b = "".*; const c: []const u8 = &b; @@ -177,6 +217,8 @@ test "implicit cast zero sized array ptr to slice" { } test "anonymous list literal syntax" { + if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + const S = struct { fn doTheTest() !void { var array: [4]u8 = .{ 1, 2, 3, 4 }; |
