aboutsummaryrefslogtreecommitdiff
path: root/src/arch
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-04-29 00:19:55 -0700
committerGitHub <noreply@github.com>2023-04-29 00:19:55 -0700
commitd65b42e07caa00dfe2f2fbf221c593ce57882784 (patch)
tree7926cbea1499e0affe930bf6d7455dc24adf014e /src/arch
parentfd6200eda6d4fe19c34a59430a88a9ce38d6d7a4 (diff)
parentfa200ca0cad2705bad40eb723dedf4e3bf11f2ff (diff)
downloadzig-d65b42e07caa00dfe2f2fbf221c593ce57882784.tar.gz
zig-d65b42e07caa00dfe2f2fbf221c593ce57882784.zip
Merge pull request #15481 from ziglang/use-mem-intrinsics
actually use the new memory intrinsics
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/aarch64/CodeGen.zig8
-rw-r--r--src/arch/arm/CodeGen.zig8
-rw-r--r--src/arch/riscv64/CodeGen.zig6
-rw-r--r--src/arch/sparc64/CodeGen.zig6
-rw-r--r--src/arch/x86_64/CodeGen.zig4
-rw-r--r--src/arch/x86_64/Encoding.zig8
-rw-r--r--src/arch/x86_64/abi.zig2
-rw-r--r--src/arch/x86_64/encoder.zig4
8 files changed, 24 insertions, 22 deletions
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 948dad73b9..649edd3b9c 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1630,7 +1630,7 @@ fn allocRegs(
const read_locks = locks[0..read_args.len];
const write_locks = locks[read_args.len..];
- std.mem.set(?RegisterLock, locks, null);
+ @memset(locks, null);
defer for (locks) |lock| {
if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
};
@@ -4395,7 +4395,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len + 1 <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -5348,7 +5348,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -6055,7 +6055,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 3676b2a865..5353b78e4d 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -3114,7 +3114,7 @@ fn allocRegs(
const read_locks = locks[0..read_args.len];
const write_locks = locks[read_args.len..];
- std.mem.set(?RegisterLock, locks, null);
+ @memset(locks, null);
defer for (locks) |lock| {
if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
};
@@ -4341,7 +4341,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len <= Liveness.bpi - 2) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -5263,7 +5263,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -6000,7 +6000,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a0ebc1becc..d4c7eb0c70 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1784,7 +1784,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len <= Liveness.bpi - 2) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -2225,7 +2225,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -2500,7 +2500,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index cc5c9e9832..2686852bab 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -843,7 +843,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
@@ -987,7 +987,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
@@ -1314,7 +1314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len + 1 <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index df0db882ba..be972d7aea 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -7117,7 +7117,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = self.liveness.iterateBigTomb(inst);
@@ -8505,7 +8505,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = self.liveness.iterateBigTomb(inst);
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index 21899b912b..a977af7842 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -546,7 +546,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
};
- std.mem.copy(Operand, &inst.ops, ops);
+ @memcpy(inst.ops[0..ops.len], ops);
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
@@ -575,8 +575,10 @@ const mnemonic_to_encodings_map = init: {
.modrm_ext = entry[4],
.mode = entry[5],
};
- std.mem.copy(Op, &data.ops, entry[2]);
- std.mem.copy(u8, &data.opc, entry[3]);
+ // TODO: use `@memcpy` for these. When I did that, I got a false positive
+ // compile error for this copy happening at compile time.
+ std.mem.copyForwards(Op, &data.ops, entry[2]);
+ std.mem.copyForwards(u8, &data.opc, entry[3]);
while (mnemonic_int < @enumToInt(entry[0])) : (mnemonic_int += 1) {
mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..data_index];
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index e9da09b999..ff1a0ee520 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -321,7 +321,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
byte_i = 0;
result_i += 1;
}
- std.mem.copy(Class, result[result_i..], field_class);
+ @memcpy(result[result_i..][0..field_class.len], field_class);
result_i += field_class.len;
// If there are any bytes leftover, we have to try to combine
// the next field with them.
diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig
index 73b40ea3be..329dfca924 100644
--- a/src/arch/x86_64/encoder.zig
+++ b/src/arch/x86_64/encoder.zig
@@ -182,7 +182,7 @@ pub const Instruction = struct {
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
};
- std.mem.copy(Operand, &inst.ops, ops);
+ @memcpy(inst.ops[0..ops.len], ops);
return inst;
}
@@ -859,7 +859,7 @@ fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []co
const idx = std.mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
var padding = try testing.allocator.alloc(u8, idx + 5);
defer testing.allocator.free(padding);
- std.mem.set(u8, padding, ' ');
+ @memset(padding, ' ');
std.debug.print("\nASM: {s}\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{
assembly,
expected_fmt,