aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/AstGen.zig2
-rw-r--r--src/Autodoc.zig2
-rw-r--r--src/Compilation.zig6
-rw-r--r--src/Liveness.zig14
-rw-r--r--src/Sema.zig52
-rw-r--r--src/arch/aarch64/CodeGen.zig8
-rw-r--r--src/arch/arm/CodeGen.zig8
-rw-r--r--src/arch/riscv64/CodeGen.zig6
-rw-r--r--src/arch/sparc64/CodeGen.zig6
-rw-r--r--src/arch/x86_64/CodeGen.zig4
-rw-r--r--src/arch/x86_64/Encoding.zig8
-rw-r--r--src/arch/x86_64/abi.zig2
-rw-r--r--src/arch/x86_64/encoder.zig4
-rw-r--r--src/codegen.zig2
-rw-r--r--src/codegen/c.zig117
-rw-r--r--src/codegen/llvm.zig82
-rw-r--r--src/link/Coff.zig42
-rw-r--r--src/link/Dwarf.zig27
-rw-r--r--src/link/Elf.zig2
-rw-r--r--src/link/MachO.zig15
-rw-r--r--src/link/MachO/CodeSignature.zig2
-rw-r--r--src/link/MachO/Object.zig12
-rw-r--r--src/link/MachO/Trie.zig2
-rw-r--r--src/link/MachO/UnwindInfo.zig2
-rw-r--r--src/link/MachO/zld.zig20
-rw-r--r--src/link/Plan9.zig2
-rw-r--r--src/link/Wasm.zig11
-rw-r--r--src/objcopy.zig38
-rw-r--r--src/print_air.zig2
-rw-r--r--src/print_zir.zig2
-rw-r--r--src/translate_c.zig2
-rw-r--r--src/type.zig2
-rw-r--r--src/value.zig53
33 files changed, 358 insertions, 201 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 6c9972bc95..aece3eafec 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -3604,7 +3604,7 @@ const WipMembers = struct {
fn appendToDeclSlice(self: *Self, data: []const u32) void {
assert(self.decls_end + data.len <= self.field_bits_start);
- mem.copy(u32, self.payload.items[self.decls_end..], data);
+ @memcpy(self.payload.items[self.decls_end..][0..data.len], data);
self.decls_end += @intCast(u32, data.len);
}
diff --git a/src/Autodoc.zig b/src/Autodoc.zig
index 91e252a3f8..467ff55994 100644
--- a/src/Autodoc.zig
+++ b/src/Autodoc.zig
@@ -1146,7 +1146,7 @@ fn walkInstruction(
const limb_bytes = file.zir.string_bytes[str.start..][0..byte_count];
var limbs = try self.arena.alloc(std.math.big.Limb, str.len);
- std.mem.copy(u8, std.mem.sliceAsBytes(limbs), limb_bytes);
+ @memcpy(std.mem.sliceAsBytes(limbs)[0..limb_bytes.len], limb_bytes);
const big_int = std.math.big.int.Const{
.limbs = limbs,
diff --git a/src/Compilation.zig b/src/Compilation.zig
index f11f158e1b..a5b785cc67 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -2165,7 +2165,7 @@ fn wholeCacheModeSetBinFilePath(comp: *Compilation, digest: *const [Cache.hex_di
const digest_start = 2; // "o/[digest]/[basename]"
if (comp.whole_bin_sub_path) |sub_path| {
- mem.copy(u8, sub_path[digest_start..], digest);
+ @memcpy(sub_path[digest_start..][0..digest.len], digest);
comp.bin_file.options.emit = .{
.directory = comp.local_cache_directory,
@@ -2174,7 +2174,7 @@ fn wholeCacheModeSetBinFilePath(comp: *Compilation, digest: *const [Cache.hex_di
}
if (comp.whole_implib_sub_path) |sub_path| {
- mem.copy(u8, sub_path[digest_start..], digest);
+ @memcpy(sub_path[digest_start..][0..digest.len], digest);
comp.bin_file.options.implib_emit = .{
.directory = comp.local_cache_directory,
@@ -4432,7 +4432,7 @@ pub fn addCCArgs(
assert(prefix.len == prefix_len);
var march_buf: [prefix_len + letters.len + 1]u8 = undefined;
var march_index: usize = prefix_len;
- mem.copy(u8, &march_buf, prefix);
+ @memcpy(march_buf[0..prefix.len], prefix);
if (std.Target.riscv.featureSetHas(target.cpu.features, .e)) {
march_buf[march_index] = 'e';
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 6990ade327..a1bfb73e2a 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -156,7 +156,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
errdefer a.special.deinit(gpa);
defer a.extra.deinit(gpa);
- std.mem.set(usize, a.tomb_bits, 0);
+ @memset(a.tomb_bits, 0);
const main_body = air.getMainBody();
@@ -1150,7 +1150,7 @@ fn analyzeInst(
if (args.len + 1 <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return analyzeOperands(a, pass, data, inst, buf);
}
@@ -1189,7 +1189,7 @@ fn analyzeInst(
if (elements.len <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return analyzeOperands(a, pass, data, inst, buf);
}
@@ -1255,7 +1255,7 @@ fn analyzeInst(
if (buf_index + inputs.len > buf.len) {
break :simple buf_index + inputs.len;
}
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return analyzeOperands(a, pass, data, inst, buf);
};
@@ -1841,7 +1841,7 @@ fn analyzeInstSwitchBr(
var case_infos = try gpa.alloc(ControlBranchInfo, ncases + 1); // +1 for else
defer gpa.free(case_infos);
- std.mem.set(ControlBranchInfo, case_infos, .{});
+ @memset(case_infos, .{});
defer for (case_infos) |*info| {
info.branch_deaths.deinit(gpa);
info.live_set.deinit(gpa);
@@ -1898,7 +1898,7 @@ fn analyzeInstSwitchBr(
const mirrored_deaths = try gpa.alloc(DeathList, ncases + 1);
defer gpa.free(mirrored_deaths);
- std.mem.set(DeathList, mirrored_deaths, .{});
+ @memset(mirrored_deaths, .{});
defer for (mirrored_deaths) |*md| md.deinit(gpa);
{
@@ -1993,7 +1993,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
};
errdefer a.gpa.free(extra_tombs);
- std.mem.set(u32, extra_tombs, 0);
+ @memset(extra_tombs, 0);
const will_die_immediately: bool = switch (pass) {
.loop_analysis => false, // track everything, since we don't have full liveness information yet
diff --git a/src/Sema.zig b/src/Sema.zig
index 8b47f1877b..327ff3800f 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -206,9 +206,9 @@ pub const InstMap = struct {
const start_diff = old_start - better_start;
const new_items = try allocator.alloc(Air.Inst.Ref, better_capacity);
- mem.set(Air.Inst.Ref, new_items[0..start_diff], .none);
- mem.copy(Air.Inst.Ref, new_items[start_diff..], map.items);
- mem.set(Air.Inst.Ref, new_items[start_diff + map.items.len ..], .none);
+ @memset(new_items[0..start_diff], .none);
+ @memcpy(new_items[start_diff..][0..map.items.len], map.items);
+ @memset(new_items[start_diff + map.items.len ..], .none);
allocator.free(map.items);
map.items = new_items;
@@ -4307,7 +4307,7 @@ fn validateStructInit(
// Maps field index to field_ptr index of where it was already initialized.
const found_fields = try gpa.alloc(Zir.Inst.Index, struct_ty.structFieldCount());
defer gpa.free(found_fields);
- mem.set(Zir.Inst.Index, found_fields, 0);
+ @memset(found_fields, 0);
var struct_ptr_zir_ref: Zir.Inst.Ref = undefined;
@@ -5113,7 +5113,7 @@ fn zirIntBig(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const byte_count = int.len * @sizeOf(std.math.big.Limb);
const limb_bytes = sema.code.string_bytes[int.start..][0..byte_count];
const limbs = try arena.alloc(std.math.big.Limb, int.len);
- mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes);
+ @memcpy(mem.sliceAsBytes(limbs), limb_bytes);
return sema.addConstant(
Type.initTag(.comptime_int),
@@ -5967,7 +5967,7 @@ fn addDbgVar(
const elements_used = name.len / 4 + 1;
try sema.air_extra.ensureUnusedCapacity(sema.gpa, elements_used);
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
- mem.copy(u8, buffer, name);
+ @memcpy(buffer[0..name.len], name);
buffer[name.len] = 0;
sema.air_extra.items.len += elements_used;
@@ -10354,7 +10354,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.Enum => {
seen_enum_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount());
empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum();
- mem.set(?Module.SwitchProngSrc, seen_enum_fields, null);
+ @memset(seen_enum_fields, null);
// `range_set` is used for non-exhaustive enum values that do not correspond to any tags.
var extra_index: usize = special.end;
@@ -12809,8 +12809,8 @@ fn analyzeTupleMul(
}
i = 0;
while (i < factor) : (i += 1) {
- mem.copy(Type, types[tuple_len * i ..], types[0..tuple_len]);
- mem.copy(Value, values[tuple_len * i ..], values[0..tuple_len]);
+ mem.copyForwards(Type, types[tuple_len * i ..], types[0..tuple_len]);
+ mem.copyForwards(Value, values[tuple_len * i ..], values[0..tuple_len]);
}
break :rs runtime_src;
};
@@ -12835,7 +12835,7 @@ fn analyzeTupleMul(
}
i = 1;
while (i < factor) : (i += 1) {
- mem.copy(Air.Inst.Ref, element_refs[tuple_len * i ..], element_refs[0..tuple_len]);
+ @memcpy(element_refs[tuple_len * i ..][0..tuple_len], element_refs[0..tuple_len]);
}
return block.addAggregateInit(tuple_ty, element_refs);
@@ -15057,29 +15057,29 @@ fn zirAsm(
sema.appendRefsAssumeCapacity(args);
for (outputs) |o| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
- mem.copy(u8, buffer, o.c);
+ @memcpy(buffer[0..o.c.len], o.c);
buffer[o.c.len] = 0;
- mem.copy(u8, buffer[o.c.len + 1 ..], o.n);
+ @memcpy(buffer[o.c.len + 1 ..][0..o.n.len], o.n);
buffer[o.c.len + 1 + o.n.len] = 0;
sema.air_extra.items.len += (o.c.len + o.n.len + (2 + 3)) / 4;
}
for (inputs) |input| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
- mem.copy(u8, buffer, input.c);
+ @memcpy(buffer[0..input.c.len], input.c);
buffer[input.c.len] = 0;
- mem.copy(u8, buffer[input.c.len + 1 ..], input.n);
+ @memcpy(buffer[input.c.len + 1 ..][0..input.n.len], input.n);
buffer[input.c.len + 1 + input.n.len] = 0;
sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4;
}
for (clobbers) |clobber| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
- mem.copy(u8, buffer, clobber);
+ @memcpy(buffer[0..clobber.len], clobber);
buffer[clobber.len] = 0;
sema.air_extra.items.len += clobber.len / 4 + 1;
}
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
- mem.copy(u8, buffer, asm_source);
+ @memcpy(buffer[0..asm_source.len], asm_source);
sema.air_extra.items.len += (asm_source.len + 3) / 4;
}
return asm_air;
@@ -17582,7 +17582,7 @@ fn structInitEmpty(
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(Air.Inst.Ref, struct_ty.structFieldCount());
defer gpa.free(field_inits);
- mem.set(Air.Inst.Ref, field_inits, .none);
+ @memset(field_inits, .none);
return sema.finishStructInit(block, init_src, dest_src, field_inits, struct_ty, false);
}
@@ -17675,7 +17675,7 @@ fn zirStructInit(
// The init values to use for the struct instance.
const field_inits = try gpa.alloc(Air.Inst.Ref, resolved_ty.structFieldCount());
defer gpa.free(field_inits);
- mem.set(Air.Inst.Ref, field_inits, .none);
+ @memset(field_inits, .none);
var field_i: u32 = 0;
var extra_index = extra.end;
@@ -22039,7 +22039,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
if (try sema.resolveMaybeUndefVal(uncoerced_elem)) |_| {
for (0..len) |i| {
const elem_index = try sema.addIntUnsigned(Type.usize, i);
- const elem_ptr = try sema.elemPtr(
+ const elem_ptr = try sema.elemPtrOneLayerOnly(
block,
src,
dest_ptr,
@@ -26953,9 +26953,13 @@ fn storePtrVal(
defer sema.gpa.free(buffer);
reinterpret.val_ptr.*.writeToMemory(mut_kit.ty, sema.mod, buffer) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable,
+ error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
+ error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}),
};
operand_val.writeToMemory(operand_ty, sema.mod, buffer[reinterpret.byte_offset..]) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable,
+ error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
+ error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{mut_kit.ty.fmt(sema.mod)}),
};
const arena = mut_kit.beginArena(sema.mod);
@@ -27075,7 +27079,7 @@ fn beginComptimePtrMutation(
const array_len_including_sentinel =
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
const elems = try arena.alloc(Value, array_len_including_sentinel);
- mem.set(Value, elems, Value.undef);
+ @memset(elems, Value.undef);
val_ptr.* = try Value.Tag.aggregate.create(arena, elems);
@@ -27273,7 +27277,7 @@ fn beginComptimePtrMutation(
switch (parent.ty.zigTypeTag()) {
.Struct => {
const fields = try arena.alloc(Value, parent.ty.structFieldCount());
- mem.set(Value, fields, Value.undef);
+ @memset(fields, Value.undef);
val_ptr.* = try Value.Tag.aggregate.create(arena, fields);
@@ -27905,6 +27909,8 @@ fn bitCastVal(
defer sema.gpa.free(buffer);
val.writeToMemory(old_ty, sema.mod, buffer) catch |err| switch (err) {
error.ReinterpretDeclRef => return null,
+ error.IllDefinedMemoryLayout => unreachable, // Sema was supposed to emit a compile error already
+ error.Unimplemented => return sema.fail(block, src, "TODO: implement writeToMemory for type '{}'", .{old_ty.fmt(sema.mod)}),
};
return try Value.readFromMemory(new_ty, sema.mod, buffer[buffer_offset..], sema.arena);
}
@@ -28419,7 +28425,7 @@ fn coerceTupleToStruct(
const fields = struct_ty.structFields();
const field_vals = try sema.arena.alloc(Value, fields.count());
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
- mem.set(Air.Inst.Ref, field_refs, .none);
+ @memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
var runtime_src: ?LazySrcLoc = null;
@@ -28508,7 +28514,7 @@ fn coerceTupleToTuple(
const dest_field_count = tuple_ty.structFieldCount();
const field_vals = try sema.arena.alloc(Value, dest_field_count);
const field_refs = try sema.arena.alloc(Air.Inst.Ref, field_vals.len);
- mem.set(Air.Inst.Ref, field_refs, .none);
+ @memset(field_refs, .none);
const inst_ty = sema.typeOf(inst);
const inst_field_count = inst_ty.structFieldCount();
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 948dad73b9..649edd3b9c 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -1630,7 +1630,7 @@ fn allocRegs(
const read_locks = locks[0..read_args.len];
const write_locks = locks[read_args.len..];
- std.mem.set(?RegisterLock, locks, null);
+ @memset(locks, null);
defer for (locks) |lock| {
if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
};
@@ -4395,7 +4395,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len + 1 <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -5348,7 +5348,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -6055,7 +6055,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 3676b2a865..5353b78e4d 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -3114,7 +3114,7 @@ fn allocRegs(
const read_locks = locks[0..read_args.len];
const write_locks = locks[read_args.len..];
- std.mem.set(?RegisterLock, locks, null);
+ @memset(locks, null);
defer for (locks) |lock| {
if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
};
@@ -4341,7 +4341,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len <= Liveness.bpi - 2) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -5263,7 +5263,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -6000,7 +6000,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a0ebc1becc..d4c7eb0c70 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -1784,7 +1784,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len <= Liveness.bpi - 2) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
@@ -2225,7 +2225,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len);
@@ -2500,7 +2500,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index cc5c9e9832..2686852bab 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -843,7 +843,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, elements.len);
@@ -987,7 +987,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
@@ -1314,7 +1314,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len + 1 <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
buf[0] = callee;
- std.mem.copy(Air.Inst.Ref, buf[1..], args);
+ @memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index df0db882ba..be972d7aea 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -7117,7 +7117,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
buf_index += 1;
}
if (buf_index + inputs.len > buf.len) break :simple;
- std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs);
+ @memcpy(buf[buf_index..][0..inputs.len], inputs);
return self.finishAir(inst, result, buf);
}
var bt = self.liveness.iterateBigTomb(inst);
@@ -8505,7 +8505,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
if (elements.len <= Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
- std.mem.copy(Air.Inst.Ref, &buf, elements);
+ @memcpy(buf[0..elements.len], elements);
return self.finishAir(inst, result, buf);
}
var bt = self.liveness.iterateBigTomb(inst);
diff --git a/src/arch/x86_64/Encoding.zig b/src/arch/x86_64/Encoding.zig
index 21899b912b..a977af7842 100644
--- a/src/arch/x86_64/Encoding.zig
+++ b/src/arch/x86_64/Encoding.zig
@@ -546,7 +546,7 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
};
- std.mem.copy(Operand, &inst.ops, ops);
+ @memcpy(inst.ops[0..ops.len], ops);
var cwriter = std.io.countingWriter(std.io.null_writer);
inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
@@ -575,8 +575,10 @@ const mnemonic_to_encodings_map = init: {
.modrm_ext = entry[4],
.mode = entry[5],
};
- std.mem.copy(Op, &data.ops, entry[2]);
- std.mem.copy(u8, &data.opc, entry[3]);
+ // TODO: use `@memcpy` for these. When I did that, I got a false positive
+ // compile error for this copy happening at compile time.
+ std.mem.copyForwards(Op, &data.ops, entry[2]);
+ std.mem.copyForwards(u8, &data.opc, entry[3]);
while (mnemonic_int < @enumToInt(entry[0])) : (mnemonic_int += 1) {
mnemonic_map[mnemonic_int] = data_storage[mnemonic_start..data_index];
diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig
index e9da09b999..ff1a0ee520 100644
--- a/src/arch/x86_64/abi.zig
+++ b/src/arch/x86_64/abi.zig
@@ -321,7 +321,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
byte_i = 0;
result_i += 1;
}
- std.mem.copy(Class, result[result_i..], field_class);
+ @memcpy(result[result_i..][0..field_class.len], field_class);
result_i += field_class.len;
// If there are any bytes leftover, we have to try to combine
// the next field with them.
diff --git a/src/arch/x86_64/encoder.zig b/src/arch/x86_64/encoder.zig
index 73b40ea3be..329dfca924 100644
--- a/src/arch/x86_64/encoder.zig
+++ b/src/arch/x86_64/encoder.zig
@@ -182,7 +182,7 @@ pub const Instruction = struct {
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
};
- std.mem.copy(Operand, &inst.ops, ops);
+ @memcpy(inst.ops[0..ops.len], ops);
return inst;
}
@@ -859,7 +859,7 @@ fn expectEqualHexStrings(expected: []const u8, given: []const u8, assembly: []co
const idx = std.mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
var padding = try testing.allocator.alloc(u8, idx + 5);
defer testing.allocator.free(padding);
- std.mem.set(u8, padding, ' ');
+ @memset(padding, ' ');
std.debug.print("\nASM: {s}\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{
assembly,
expected_fmt,
diff --git a/src/codegen.zig b/src/codegen.zig
index dbcd76118a..bf80a90cc3 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -552,7 +552,7 @@ pub fn generateSymbol(
.ty = field_ty,
.val = field_val,
}, &tmp_list, debug_output, reloc_info)) {
- .ok => mem.copy(u8, code.items[current_pos..], tmp_list.items),
+ .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 385094e495..f69cec960e 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -2411,9 +2411,9 @@ pub fn genErrDecls(o: *Object) !void {
const name_buf = try o.dg.gpa.alloc(u8, name_prefix.len + max_name_len);
defer o.dg.gpa.free(name_buf);
- mem.copy(u8, name_buf, name_prefix);
+ @memcpy(name_buf[0..name_prefix.len], name_prefix);
for (o.dg.module.error_name_list.items) |name| {
- mem.copy(u8, name_buf[name_prefix.len..], name);
+ @memcpy(name_buf[name_prefix.len..][0..name.len], name);
const identifier = name_buf[0 .. name_prefix.len + name.len];
var name_ty_pl = Type.Payload.Len{ .base = .{ .tag = .array_u8_sentinel_0 }, .data = name.len };
@@ -3858,7 +3858,7 @@ fn airCmpOp(
try reap(f, inst, &.{ data.lhs, data.rhs });
const rhs_ty = f.air.typeOf(data.rhs);
- const need_cast = lhs_ty.isSinglePointer() != rhs_ty.isSinglePointer();
+ const need_cast = lhs_ty.isSinglePointer() or rhs_ty.isSinglePointer();
const writer = f.object.writer();
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, writer, lhs_ty);
@@ -4419,51 +4419,94 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
const dest_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
- try reap(f, inst, &.{ty_op.operand});
const operand_ty = f.air.typeOf(ty_op.operand);
- const target = f.object.dg.module.getTarget();
- const writer = f.object.writer();
- const local = try f.allocLocal(inst, dest_ty);
+ const bitcasted = try bitcast(f, dest_ty, operand, operand_ty);
+ try reap(f, inst, &.{ty_op.operand});
+ return bitcasted.move(f, inst, dest_ty);
+}
+
+const LocalResult = struct {
+ c_value: CValue,
+ need_free: bool,
+
+ fn move(lr: LocalResult, f: *Function, inst: Air.Inst.Index, dest_ty: Type) !CValue {
+ if (lr.need_free) {
+ // Move the freshly allocated local to be owned by this instruction,
+ // by returning it here instead of freeing it.
+ return lr.c_value;
+ }
+
+ const local = try f.allocLocal(inst, dest_ty);
+ try lr.free(f);
+ const writer = f.object.writer();
+ try f.writeCValue(writer, local, .Other);
+ if (dest_ty.isAbiInt()) {
+ try writer.writeAll(" = ");
+ } else {
+ try writer.writeAll(" = (");
+ try f.renderType(writer, dest_ty);
+ try writer.writeByte(')');
+ }
+ try f.writeCValue(writer, lr.c_value, .Initializer);
+ try writer.writeAll(";\n");
+ return local;
+ }
+
+ fn free(lr: LocalResult, f: *Function) !void {
+ if (lr.need_free) {
+ try freeLocal(f, 0, lr.c_value.new_local, 0);
+ }
+ }
+};
- // If the assignment looks like 'x = x', we don't need it
- const can_elide = operand == .local and operand.local == local.new_local;
+fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult {
+ const target = f.object.dg.module.getTarget();
+ const writer = f.object.writer();
if (operand_ty.isAbiInt() and dest_ty.isAbiInt()) {
- if (can_elide) return local;
const src_info = dest_ty.intInfo(target);
const dest_info = operand_ty.intInfo(target);
if (src_info.signedness == dest_info.signedness and
src_info.bits == dest_info.bits)
{
- try f.writeCValue(writer, local, .Other);
- try writer.writeAll(" = ");
- try f.writeCValue(writer, operand, .Initializer);
- try writer.writeAll(";\n");
- return local;
+ return .{
+ .c_value = operand,
+ .need_free = false,
+ };
}
}
if (dest_ty.isPtrAtRuntime() and operand_ty.isPtrAtRuntime()) {
- if (can_elide) return local;
+ const local = try f.allocLocal(0, dest_ty);
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = (");
try f.renderType(writer, dest_ty);
try writer.writeByte(')');
try f.writeCValue(writer, operand, .Other);
try writer.writeAll(";\n");
- return local;
+ return .{
+ .c_value = local,
+ .need_free = true,
+ };
}
const operand_lval = if (operand == .constant) blk: {
- const operand_local = try f.allocLocal(inst, operand_ty);
+ const operand_local = try f.allocLocal(0, operand_ty);
try f.writeCValue(writer, operand_local, .Other);
- try writer.writeAll(" = ");
+ if (operand_ty.isAbiInt()) {
+ try writer.writeAll(" = ");
+ } else {
+ try writer.writeAll(" = (");
+ try f.renderType(writer, operand_ty);
+ try writer.writeByte(')');
+ }
try f.writeCValue(writer, operand, .Initializer);
try writer.writeAll(";\n");
break :blk operand_local;
} else operand;
+ const local = try f.allocLocal(0, dest_ty);
try writer.writeAll("memcpy(&");
try f.writeCValue(writer, local, .Other);
try writer.writeAll(", &");
@@ -4528,10 +4571,13 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
}
if (operand == .constant) {
- try freeLocal(f, inst, operand_lval.new_local, 0);
+ try freeLocal(f, 0, operand_lval.new_local, 0);
}
- return local;
+ return .{
+ .c_value = local,
+ .need_free = true,
+ };
}
fn airTrap(writer: anytype) !CValue {
@@ -4831,7 +4877,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const literal = mem.sliceTo(asm_source[src_i..], '%');
src_i += literal.len;
- mem.copy(u8, fixed_asm_source[dst_i..], literal);
+ @memcpy(fixed_asm_source[dst_i..][0..literal.len], literal);
dst_i += literal.len;
if (src_i >= asm_source.len) break;
@@ -4856,9 +4902,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const name = desc[0..colon];
const modifier = desc[colon + 1 ..];
- mem.copy(u8, fixed_asm_source[dst_i..], modifier);
+ @memcpy(fixed_asm_source[dst_i..][0..modifier.len], modifier);
dst_i += modifier.len;
- mem.copy(u8, fixed_asm_source[dst_i..], name);
+ @memcpy(fixed_asm_source[dst_i..][0..name.len], name);
dst_i += name.len;
src_i += desc.len;
@@ -6288,15 +6334,19 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
}
try writer.writeAll("; ++");
try f.writeCValue(writer, index, .Other);
- try writer.writeAll(") ((");
+ try writer.writeAll(") ");
+
+ const a = try Assignment.start(f, writer, elem_ty);
+ try writer.writeAll("((");
try f.renderType(writer, elem_ptr_ty);
try writer.writeByte(')');
try writeSliceOrPtr(f, writer, dest_slice, dest_ty);
try writer.writeAll(")[");
try f.writeCValue(writer, index, .Other);
- try writer.writeAll("] = ");
- try f.writeCValue(writer, value, .FunctionArgument);
- try writer.writeAll(";\n");
+ try writer.writeByte(']');
+ try a.assign(f, writer);
+ try f.writeCValue(writer, value, .Other);
+ try a.end(f, writer);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
try freeLocal(f, inst, index.new_local, 0);
@@ -6304,12 +6354,14 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
return .none;
}
+ const bitcasted = try bitcast(f, Type.u8, value, elem_ty);
+
try writer.writeAll("memset(");
switch (dest_ty.ptrSize()) {
.Slice => {
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "ptr" });
try writer.writeAll(", ");
- try f.writeCValue(writer, value, .FunctionArgument);
+ try f.writeCValue(writer, bitcasted.c_value, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValueMember(writer, dest_slice, .{ .identifier = "len" });
try writer.writeAll(");\n");
@@ -6320,11 +6372,12 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try f.writeCValue(writer, dest_slice, .FunctionArgument);
try writer.writeAll(", ");
- try f.writeCValue(writer, value, .FunctionArgument);
+ try f.writeCValue(writer, bitcasted.c_value, .FunctionArgument);
try writer.print(", {d});\n", .{len});
},
.Many, .C => unreachable,
}
+ try bitcasted.free(f);
try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs });
return .none;
}
@@ -7394,7 +7447,7 @@ fn formatIntLiteral(
var int_buf: Value.BigIntSpace = undefined;
const int = if (data.val.isUndefDeep()) blk: {
undef_limbs = try allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits));
- mem.set(BigIntLimb, undef_limbs, undefPattern(BigIntLimb));
+ @memset(undef_limbs, undefPattern(BigIntLimb));
var undef_int = BigInt.Mutable{
.limbs = undef_limbs,
@@ -7489,7 +7542,7 @@ fn formatIntLiteral(
} else {
try data.cty.renderLiteralPrefix(writer, data.kind);
wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits);
- mem.set(BigIntLimb, wrap.limbs[wrap.len..], 0);
+ @memset(wrap.limbs[wrap.len..], 0);
wrap.len = wrap.limbs.len;
const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 94f49e801d..d697a41988 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -7939,11 +7939,15 @@ pub const FuncGen = struct {
return self.builder.buildPtrToInt(operand_ptr, dest_llvm_ty, "");
}
- fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
+ fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const inst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
+ return self.bitCast(operand, operand_ty, inst_ty);
+ }
+
+ fn bitCast(self: *FuncGen, operand: *llvm.Value, operand_ty: Type, inst_ty: Type) !*llvm.Value {
const operand_is_ref = isByRef(operand_ty);
const result_is_ref = isByRef(inst_ty);
const llvm_dest_ty = try self.dg.lowerType(inst_ty);
@@ -7954,6 +7958,12 @@ pub const FuncGen = struct {
return operand;
}
+ if (llvm_dest_ty.getTypeKind() == .Integer and
+ operand.typeOf().getTypeKind() == .Integer)
+ {
+ return self.builder.buildZExtOrBitCast(operand, llvm_dest_ty, "");
+ }
+
if (operand_ty.zigTypeTag() == .Int and inst_ty.isPtrAtRuntime()) {
return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
}
@@ -8414,27 +8424,45 @@ pub const FuncGen = struct {
const dest_slice = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = self.air.typeOf(bin_op.rhs);
- const target = self.dg.module.getTarget();
- const val_is_undef = if (self.air.value(bin_op.rhs)) |val| val.isUndefDeep() else false;
+ const module = self.dg.module;
+ const target = module.getTarget();
const dest_ptr_align = ptr_ty.ptrAlignment(target);
const u8_llvm_ty = self.context.intType(8);
const dest_ptr = self.sliceOrArrayPtr(dest_slice, ptr_ty);
+ const is_volatile = ptr_ty.isVolatilePtr();
+
+ if (self.air.value(bin_op.rhs)) |elem_val| {
+ if (elem_val.isUndefDeep()) {
+ // Even if safety is disabled, we still emit a memset to undefined since it conveys
+ // extra information to LLVM. However, safety makes the difference between using
+ // 0xaa or actual undefined for the fill byte.
+ const fill_byte = if (safety)
+ u8_llvm_ty.constInt(0xaa, .False)
+ else
+ u8_llvm_ty.getUndef();
+ const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+ _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
- if (val_is_undef) {
- // Even if safety is disabled, we still emit a memset to undefined since it conveys
- // extra information to LLVM. However, safety makes the difference between using
- // 0xaa or actual undefined for the fill byte.
- const fill_byte = if (safety)
- u8_llvm_ty.constInt(0xaa, .False)
- else
- u8_llvm_ty.getUndef();
- const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+ if (safety and module.comp.bin_file.options.valgrind) {
+ self.valgrindMarkUndef(dest_ptr, len);
+ }
+ return null;
+ }
- if (safety and self.dg.module.comp.bin_file.options.valgrind) {
- self.valgrindMarkUndef(dest_ptr, len);
+ // Test if the element value is compile-time known to be a
+ // repeating byte pattern, for example, `@as(u64, 0)` has a
+ // repeating byte pattern of 0 bytes. In such case, the memset
+ // intrinsic can be used.
+ var value_buffer: Value.Payload.U64 = undefined;
+ if (try elem_val.hasRepeatedByteRepr(elem_ty, module, &value_buffer)) |byte_val| {
+ const fill_byte = try self.resolveValue(.{
+ .ty = Type.u8,
+ .val = byte_val,
+ });
+ const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
+ _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
+ return null;
}
- return null;
}
const value = try self.resolveInst(bin_op.rhs);
@@ -8442,9 +8470,9 @@ pub const FuncGen = struct {
if (elem_abi_size == 1) {
// In this case we can take advantage of LLVM's intrinsic.
- const fill_byte = self.builder.buildBitCast(value, u8_llvm_ty, "");
+ const fill_byte = try self.bitCast(value, elem_ty, Type.u8);
const len = self.sliceOrArrayLenInBytes(dest_slice, ptr_ty);
- _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, ptr_ty.isVolatilePtr());
+ _ = self.builder.buildMemSet(dest_ptr, fill_byte, len, dest_ptr_align, is_volatile);
return null;
}
@@ -8486,8 +8514,22 @@ pub const FuncGen = struct {
_ = self.builder.buildCondBr(end, body_block, end_block);
self.builder.positionBuilderAtEnd(body_block);
- const store_inst = self.builder.buildStore(value, it_ptr);
- store_inst.setAlignment(@min(elem_ty.abiAlignment(target), dest_ptr_align));
+ const elem_abi_alignment = elem_ty.abiAlignment(target);
+ const it_ptr_alignment = @min(elem_abi_alignment, dest_ptr_align);
+ if (isByRef(elem_ty)) {
+ _ = self.builder.buildMemCpy(
+ it_ptr,
+ it_ptr_alignment,
+ value,
+ elem_abi_alignment,
+ llvm_usize_ty.constInt(elem_abi_size, .False),
+ is_volatile,
+ );
+ } else {
+ const store_inst = self.builder.buildStore(value, it_ptr);
+ store_inst.setAlignment(it_ptr_alignment);
+ store_inst.setVolatile(llvm.Bool.fromBool(is_volatile));
+ }
const one_gep = [_]*llvm.Value{llvm_usize_ty.constInt(1, .False)};
const next_ptr = self.builder.buildInBoundsGEP(elem_llvm_ty, it_ptr, &one_gep, one_gep.len, "");
_ = self.builder.buildBr(loop_block);
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index ed3e213b0e..0af681bb5e 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1916,7 +1916,7 @@ fn writeImportTables(self: *Coff) !void {
.name_rva = header.virtual_address + dll_names_offset,
.import_address_table_rva = header.virtual_address + iat_offset,
};
- mem.copy(u8, buffer.items[dir_table_offset..], mem.asBytes(&lookup_header));
+ @memcpy(buffer.items[dir_table_offset..][0..@sizeOf(coff.ImportDirectoryEntry)], mem.asBytes(&lookup_header));
dir_table_offset += dir_header_size;
for (itable.entries.items) |entry| {
@@ -1924,15 +1924,21 @@ fn writeImportTables(self: *Coff) !void {
// IAT and lookup table entry
const lookup = coff.ImportLookupEntry64.ByName{ .name_table_rva = @intCast(u31, header.virtual_address + names_table_offset) };
- mem.copy(u8, buffer.items[iat_offset..], mem.asBytes(&lookup));
+ @memcpy(
+ buffer.items[iat_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)],
+ mem.asBytes(&lookup),
+ );
iat_offset += lookup_entry_size;
- mem.copy(u8, buffer.items[lookup_table_offset..], mem.asBytes(&lookup));
+ @memcpy(
+ buffer.items[lookup_table_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)],
+ mem.asBytes(&lookup),
+ );
lookup_table_offset += lookup_entry_size;
// Names table entry
mem.writeIntLittle(u16, buffer.items[names_table_offset..][0..2], 0); // Hint set to 0 until we learn how to parse DLLs
names_table_offset += 2;
- mem.copy(u8, buffer.items[names_table_offset..], import_name);
+ @memcpy(buffer.items[names_table_offset..][0..import_name.len], import_name);
names_table_offset += @intCast(u32, import_name.len);
buffer.items[names_table_offset] = 0;
names_table_offset += 1;
@@ -1947,13 +1953,16 @@ fn writeImportTables(self: *Coff) !void {
iat_offset += 8;
// Lookup table sentinel
- mem.copy(u8, buffer.items[lookup_table_offset..], mem.asBytes(&coff.ImportLookupEntry64.ByName{ .name_table_rva = 0 }));
+ @memcpy(
+ buffer.items[lookup_table_offset..][0..@sizeOf(coff.ImportLookupEntry64.ByName)],
+ mem.asBytes(&coff.ImportLookupEntry64.ByName{ .name_table_rva = 0 }),
+ );
lookup_table_offset += lookup_entry_size;
// DLL name
- mem.copy(u8, buffer.items[dll_names_offset..], lib_name);
+ @memcpy(buffer.items[dll_names_offset..][0..lib_name.len], lib_name);
dll_names_offset += @intCast(u32, lib_name.len);
- mem.copy(u8, buffer.items[dll_names_offset..], ext);
+ @memcpy(buffer.items[dll_names_offset..][0..ext.len], ext);
dll_names_offset += @intCast(u32, ext.len);
buffer.items[dll_names_offset] = 0;
dll_names_offset += 1;
@@ -1967,7 +1976,10 @@ fn writeImportTables(self: *Coff) !void {
.name_rva = 0,
.import_address_table_rva = 0,
};
- mem.copy(u8, buffer.items[dir_table_offset..], mem.asBytes(&lookup_header));
+ @memcpy(
+ buffer.items[dir_table_offset..][0..@sizeOf(coff.ImportDirectoryEntry)],
+ mem.asBytes(&lookup_header),
+ );
dir_table_offset += dir_header_size;
assert(dll_names_offset == needed_size);
@@ -2366,13 +2378,13 @@ pub fn getAtomIndexForSymbol(self: *const Coff, sym_loc: SymbolWithLoc) ?Atom.In
fn setSectionName(self: *Coff, header: *coff.SectionHeader, name: []const u8) !void {
if (name.len <= 8) {
- mem.copy(u8, &header.name, name);
- mem.set(u8, header.name[name.len..], 0);
+ @memcpy(header.name[0..name.len], name);
+ @memset(header.name[name.len..], 0);
return;
}
const offset = try self.strtab.insert(self.base.allocator, name);
const name_offset = fmt.bufPrint(&header.name, "/{d}", .{offset}) catch unreachable;
- mem.set(u8, header.name[name_offset.len..], 0);
+ @memset(header.name[name_offset.len..], 0);
}
fn getSectionName(self: *const Coff, header: *const coff.SectionHeader) []const u8 {
@@ -2385,17 +2397,17 @@ fn getSectionName(self: *const Coff, header: *const coff.SectionHeader) []const
fn setSymbolName(self: *Coff, symbol: *coff.Symbol, name: []const u8) !void {
if (name.len <= 8) {
- mem.copy(u8, &symbol.name, name);
- mem.set(u8, symbol.name[name.len..], 0);
+ @memcpy(symbol.name[0..name.len], name);
+ @memset(symbol.name[name.len..], 0);
return;
}
const offset = try self.strtab.insert(self.base.allocator, name);
- mem.set(u8, symbol.name[0..4], 0);
+ @memset(symbol.name[0..4], 0);
mem.writeIntLittle(u32, symbol.name[4..8], offset);
}
fn logSymAttributes(sym: *const coff.Symbol, buf: *[4]u8) []const u8 {
- mem.set(u8, buf[0..4], '_');
+ @memset(buf[0..4], '_');
switch (sym.section_number) {
.UNDEFINED => {
buf[3] = 'u';
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 0f2dfbda0e..7a008ca732 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -1189,7 +1189,7 @@ pub fn commitDeclState(
if (needed_size > segment_size) {
log.debug(" allocating {d} bytes for 'debug line' information", .{needed_size - segment_size});
try debug_line.resize(self.allocator, needed_size);
- mem.set(u8, debug_line.items[segment_size..], 0);
+ @memset(debug_line.items[segment_size..], 0);
}
debug_line.items.len = needed_size;
}
@@ -1458,7 +1458,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
if (needed_size > segment_size) {
log.debug(" allocating {d} bytes for 'debug info' information", .{needed_size - segment_size});
try debug_info.resize(self.allocator, needed_size);
- mem.set(u8, debug_info.items[segment_size..], 0);
+ @memset(debug_info.items[segment_size..], 0);
}
debug_info.items.len = needed_size;
}
@@ -1515,7 +1515,7 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De
const wasm_file = self.bin_file.cast(File.Wasm).?;
const offset = atom.off + self.getRelocDbgLineOff();
const line_atom_index = wasm_file.debug_line_atom.?;
- mem.copy(u8, wasm_file.getAtomPtr(line_atom_index).code.items[offset..], &data);
+ wasm_file.getAtomPtr(line_atom_index).code.items[offset..][0..data.len].* = data;
},
else => unreachable,
}
@@ -1734,7 +1734,7 @@ pub fn writeDbgAbbrev(self: *Dwarf) !void {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_abbrev = &wasm_file.getAtomPtr(wasm_file.debug_abbrev_atom.?).code;
try debug_abbrev.resize(wasm_file.base.allocator, needed_size);
- mem.copy(u8, debug_abbrev.items, &abbrev_buf);
+ debug_abbrev.items[0..abbrev_buf.len].* = abbrev_buf;
},
else => unreachable,
}
@@ -1976,7 +1976,7 @@ fn writeDbgLineNopsBuffered(
}
}
- mem.copy(u8, buf[offset..], content);
+ @memcpy(buf[offset..][0..content.len], content);
{
var padding_left = next_padding_size;
@@ -2076,9 +2076,9 @@ fn writeDbgInfoNopsToArrayList(
buffer.items.len,
offset + content.len + next_padding_size + 1,
));
- mem.set(u8, buffer.items[offset - prev_padding_size .. offset], @enumToInt(AbbrevKind.pad1));
- mem.copy(u8, buffer.items[offset..], content);
- mem.set(u8, buffer.items[offset + content.len ..][0..next_padding_size], @enumToInt(AbbrevKind.pad1));
+ @memset(buffer.items[offset - prev_padding_size .. offset], @enumToInt(AbbrevKind.pad1));
+ @memcpy(buffer.items[offset..][0..content.len], content);
+ @memset(buffer.items[offset + content.len ..][0..next_padding_size], @enumToInt(AbbrevKind.pad1));
if (trailing_zero) {
buffer.items[offset + content.len + next_padding_size] = 0;
@@ -2168,7 +2168,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_ranges = &wasm_file.getAtomPtr(wasm_file.debug_ranges_atom.?).code;
try debug_ranges.resize(wasm_file.base.allocator, needed_size);
- mem.copy(u8, debug_ranges.items, di_buf.items);
+ @memcpy(debug_ranges.items[0..di_buf.items.len], di_buf.items);
},
else => unreachable,
}
@@ -2341,9 +2341,12 @@ pub fn writeDbgLineHeader(self: *Dwarf) !void {
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_line = &wasm_file.getAtomPtr(wasm_file.debug_line_atom.?).code;
- mem.copy(u8, buffer, debug_line.items[first_fn.off..]);
+ {
+ const src = debug_line.items[first_fn.off..];
+ @memcpy(buffer[0..src.len], src);
+ }
try debug_line.resize(self.allocator, debug_line.items.len + delta);
- mem.copy(u8, debug_line.items[first_fn.off + delta ..], buffer);
+ @memcpy(debug_line.items[first_fn.off + delta ..][0..buffer.len], buffer);
},
else => unreachable,
}
@@ -2537,7 +2540,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
.wasm => {
const wasm_file = self.bin_file.cast(File.Wasm).?;
const debug_info = wasm_file.getAtomPtr(wasm_file.debug_info_atom.?).code;
- mem.copy(u8, debug_info.items[atom.off + reloc.offset ..], &buf);
+ debug_info.items[atom.off + reloc.offset ..][0..buf.len].* = buf;
},
else => unreachable,
}
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 4a6bb99818..48d952b6cc 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1997,7 +1997,7 @@ fn writeElfHeader(self: *Elf) !void {
// OS ABI, often set to 0 regardless of target platform
// ABI Version, possibly used by glibc but not by static executables
// padding
- mem.set(u8, hdr_buf[index..][0..9], 0);
+ @memset(hdr_buf[index..][0..9], 0);
index += 9;
assert(index == 16);
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index df9b8a768a..21633dea64 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -1454,7 +1454,7 @@ fn createThreadLocalDescriptorAtom(self: *MachO, sym_name: []const u8, target: S
});
var code: [size]u8 = undefined;
- mem.set(u8, &code, 0);
+ @memset(&code, 0);
try self.writeAtom(atom_index, &code);
return atom_index;
@@ -3234,7 +3234,7 @@ fn writeDyldInfoData(self: *MachO) !void {
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
- mem.set(u8, buffer, 0);
+ @memset(buffer, 0);
var stream = std.io.fixedBufferStream(buffer);
const writer = stream.writer();
@@ -3389,8 +3389,8 @@ fn writeStrtab(self: *MachO) !void {
const buffer = try gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer gpa.free(buffer);
- mem.set(u8, buffer, 0);
- mem.copy(u8, buffer, self.strtab.buffer.items);
+ @memcpy(buffer[0..self.strtab.buffer.items.len], self.strtab.buffer.items);
+ @memset(buffer[self.strtab.buffer.items.len..], 0);
try self.base.file.?.pwriteAll(buffer, offset);
@@ -3668,8 +3668,7 @@ fn addUndefined(self: *MachO, name: []const u8, action: ResolveAction.Kind) !u32
pub fn makeStaticString(bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
- assert(bytes.len <= buf.len);
- mem.copy(u8, &buf, bytes);
+ @memcpy(buf[0..bytes.len], bytes);
return buf;
}
@@ -4096,8 +4095,8 @@ pub fn logSections(self: *MachO) void {
}
fn logSymAttributes(sym: macho.nlist_64, buf: *[4]u8) []const u8 {
- mem.set(u8, buf[0..4], '_');
- mem.set(u8, buf[4..], ' ');
+ @memset(buf[0..4], '_');
+ @memset(buf[4..], ' ');
if (sym.sect()) {
buf[0] = 's';
}
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 6d1cd7b536..59b3e50b07 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -100,7 +100,7 @@ const CodeDirectory = struct {
fn addSpecialHash(self: *CodeDirectory, index: u32, hash: [hash_size]u8) void {
assert(index > 0);
self.inner.nSpecialSlots = std.math.max(self.inner.nSpecialSlots, index);
- mem.copy(u8, &self.special_slots[index - 1], &hash);
+ self.special_slots[index - 1] = hash;
}
fn slotType(self: CodeDirectory) u32 {
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index e407457e03..7cc6f78c7d 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -156,7 +156,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
// Prepopulate relocations per section lookup table.
try self.section_relocs_lookup.resize(allocator, nsects);
- mem.set(u32, self.section_relocs_lookup.items, 0);
+ @memset(self.section_relocs_lookup.items, 0);
// Parse symtab.
const symtab = while (it.next()) |cmd| switch (cmd.cmd()) {
@@ -189,10 +189,10 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
};
}
- mem.set(i64, self.globals_lookup, -1);
- mem.set(AtomIndex, self.atom_by_index_table, 0);
- mem.set(Entry, self.source_section_index_lookup, .{});
- mem.set(Entry, self.relocs_lookup, .{});
+ @memset(self.globals_lookup, -1);
+ @memset(self.atom_by_index_table, 0);
+ @memset(self.source_section_index_lookup, .{});
+ @memset(self.relocs_lookup, .{});
// You would expect that the symbol table is at least pre-sorted based on symbol's type:
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
@@ -252,7 +252,7 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
self.unwind_info_sect_id = self.getSourceSectionIndexByName("__LD", "__compact_unwind");
if (self.hasUnwindRecords()) {
self.unwind_relocs_lookup = try allocator.alloc(Record, self.getUnwindRecords().len);
- mem.set(Record, self.unwind_relocs_lookup, .{ .dead = true, .reloc = .{} });
+ @memset(self.unwind_relocs_lookup, .{ .dead = true, .reloc = .{} });
}
}
diff --git a/src/link/MachO/Trie.zig b/src/link/MachO/Trie.zig
index a97e18a186..34200db7dc 100644
--- a/src/link/MachO/Trie.zig
+++ b/src/link/MachO/Trie.zig
@@ -499,7 +499,7 @@ fn expectEqualHexStrings(expected: []const u8, given: []const u8) !void {
const idx = mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
var padding = try testing.allocator.alloc(u8, idx + 5);
defer testing.allocator.free(padding);
- mem.set(u8, padding, ' ');
+ @memset(padding, ' ');
std.debug.print("\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{ expected_fmt, given_fmt, padding });
return error.TestFailed;
}
diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig
index e59f5fe250..0071657f8b 100644
--- a/src/link/MachO/UnwindInfo.zig
+++ b/src/link/MachO/UnwindInfo.zig
@@ -659,7 +659,7 @@ pub fn write(info: *UnwindInfo, zld: *Zld) !void {
const padding = buffer.items.len - cwriter.bytes_written;
if (padding > 0) {
const offset = math.cast(usize, cwriter.bytes_written) orelse return error.Overflow;
- mem.set(u8, buffer.items[offset..], 0);
+ @memset(buffer.items[offset..], 0);
}
try zld.file.pwriteAll(buffer.items, sect.offset);
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index bc658fc8d2..7e6870ecbc 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -2140,7 +2140,7 @@ pub const Zld = struct {
var buffer = try gpa.alloc(u8, needed_size);
defer gpa.free(buffer);
- mem.set(u8, buffer, 0);
+ @memset(buffer, 0);
var stream = std.io.fixedBufferStream(buffer);
const writer = stream.writer();
@@ -2352,8 +2352,11 @@ pub const Zld = struct {
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
- mem.set(u8, buffer, 0);
- mem.copy(u8, buffer, mem.sliceAsBytes(out_dice.items));
+ {
+ const src = mem.sliceAsBytes(out_dice.items);
+ @memcpy(buffer[0..src.len], src);
+ @memset(buffer[src.len..], 0);
+ }
log.debug("writing data-in-code from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@@ -2484,8 +2487,8 @@ pub const Zld = struct {
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
defer self.gpa.free(buffer);
- mem.set(u8, buffer, 0);
- mem.copy(u8, buffer, self.strtab.buffer.items);
+ @memcpy(buffer[0..self.strtab.buffer.items.len], self.strtab.buffer.items);
+ @memset(buffer[self.strtab.buffer.items.len..], 0);
try self.file.pwriteAll(buffer, offset);
@@ -2805,8 +2808,7 @@ pub const Zld = struct {
pub fn makeStaticString(bytes: []const u8) [16]u8 {
var buf = [_]u8{0} ** 16;
- assert(bytes.len <= buf.len);
- mem.copy(u8, &buf, bytes);
+ @memcpy(buf[0..bytes.len], bytes);
return buf;
}
@@ -3199,7 +3201,7 @@ pub const Zld = struct {
scoped_log.debug(" object({d}): {s}", .{ id, object.name });
if (object.in_symtab == null) continue;
for (object.symtab, 0..) |sym, sym_id| {
- mem.set(u8, &buf, '_');
+ @memset(&buf, '_');
scoped_log.debug(" %{d}: {s} @{x} in sect({d}), {s}", .{
sym_id,
object.getSymbolName(@intCast(u32, sym_id)),
@@ -4007,7 +4009,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
log.debug("zeroing out zerofill area of length {x} at {x}", .{ size, start });
var padding = try zld.gpa.alloc(u8, size);
defer zld.gpa.free(padding);
- mem.set(u8, padding, 0);
+ @memset(padding, 0);
try zld.file.pwriteAll(padding, start);
}
}
diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig
index e7b401e8af..bef06d1c87 100644
--- a/src/link/Plan9.zig
+++ b/src/link/Plan9.zig
@@ -681,7 +681,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
.pcsz = @intCast(u32, linecountinfo.items.len),
.entry = @intCast(u32, self.entry_val.?),
};
- std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]);
+ @memcpy(hdr_slice, self.hdr.toU8s()[0..hdr_size]);
// write the fat header for 64 bit entry points
if (self.sixtyfour_bit) {
mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_val.?);
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index 0fe9ec5e3b..b6f4a4cc59 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -1976,7 +1976,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
// We do not have to do this when exporting the memory (the default) because the runtime
// will do it for us, and we do not emit the bss segment at all.
if ((wasm.base.options.output_mode == .Obj or wasm.base.options.import_memory) and kind.data == .uninitialized) {
- std.mem.set(u8, atom.code.items, 0);
+ @memset(atom.code.items, 0);
}
const should_merge = wasm.base.options.output_mode != .Obj;
@@ -3852,7 +3852,10 @@ fn writeToFile(
// Only when writing all sections executed properly we write the magic
// bytes. This allows us to easily detect what went wrong while generating
// the final binary.
- mem.copy(u8, binary_bytes.items, &(std.wasm.magic ++ std.wasm.version));
+ {
+ const src = std.wasm.magic ++ std.wasm.version;
+ binary_bytes.items[0..src.len].* = src;
+ }
// finally, write the entire binary into the file.
var iovec = [_]std.os.iovec_const{.{
@@ -4559,14 +4562,14 @@ fn writeVecSectionHeader(buffer: []u8, offset: u32, section: std.wasm.Section, s
buf[0] = @enumToInt(section);
leb.writeUnsignedFixed(5, buf[1..6], size);
leb.writeUnsignedFixed(5, buf[6..], items);
- mem.copy(u8, buffer[offset..], &buf);
+ buffer[offset..][0..buf.len].* = buf;
}
fn writeCustomSectionHeader(buffer: []u8, offset: u32, size: u32) !void {
var buf: [1 + 5]u8 = undefined;
buf[0] = 0; // 0 = 'custom' section
leb.writeUnsignedFixed(5, buf[1..6], size);
- mem.copy(u8, buffer[offset..], &buf);
+ buffer[offset..][0..buf.len].* = buf;
}
fn emitLinkSection(wasm: *Wasm, binary_bytes: *std.ArrayList(u8), symbol_table: *std.AutoArrayHashMap(SymbolLoc, u32)) !void {
diff --git a/src/objcopy.zig b/src/objcopy.zig
index 4a15af88e3..12129aba9c 100644
--- a/src/objcopy.zig
+++ b/src/objcopy.zig
@@ -860,7 +860,7 @@ fn ElfFile(comptime is_64: bool) type {
if (section.payload) |data| {
switch (section.section.sh_type) {
elf.DT_VERSYM => {
- std.debug.assert(section.section.sh_entsize == @sizeOf(Elf_Verdef));
+ assert(section.section.sh_entsize == @sizeOf(Elf_Verdef));
const defs = @ptrCast([*]const Elf_Verdef, data)[0 .. @intCast(usize, section.section.sh_size) / @sizeOf(Elf_Verdef)];
for (defs) |def| {
if (def.vd_ndx != elf.SHN_UNDEF)
@@ -868,7 +868,7 @@ fn ElfFile(comptime is_64: bool) type {
}
},
elf.SHT_SYMTAB, elf.SHT_DYNSYM => {
- std.debug.assert(section.section.sh_entsize == @sizeOf(Elf_Sym));
+ assert(section.section.sh_entsize == @sizeOf(Elf_Sym));
const syms = @ptrCast([*]const Elf_Sym, data)[0 .. @intCast(usize, section.section.sh_size) / @sizeOf(Elf_Sym)];
for (syms) |sym| {
@@ -952,11 +952,11 @@ fn ElfFile(comptime is_64: bool) type {
const name: []const u8 = ".gnu_debuglink";
const new_offset = @intCast(u32, strtab.payload.?.len);
const buf = try allocator.alignedAlloc(u8, section_memory_align, new_offset + name.len + 1);
- std.mem.copy(u8, buf[0..new_offset], strtab.payload.?);
- std.mem.copy(u8, buf[new_offset .. new_offset + name.len], name);
+ @memcpy(buf[0..new_offset], strtab.payload.?);
+ @memcpy(buf[new_offset..][0..name.len], name);
buf[new_offset + name.len] = 0;
- std.debug.assert(update.action == .keep);
+ assert(update.action == .keep);
update.payload = buf;
break :blk new_offset;
@@ -978,9 +978,9 @@ fn ElfFile(comptime is_64: bool) type {
// program header as-is.
// nb: for only-debug files, removing it appears to work, but is invalid by ELF specifcation.
{
- std.debug.assert(updated_elf_header.e_phoff == @sizeOf(Elf_Ehdr));
+ assert(updated_elf_header.e_phoff == @sizeOf(Elf_Ehdr));
const data = std.mem.sliceAsBytes(self.program_segments);
- std.debug.assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum);
+ assert(data.len == @as(usize, updated_elf_header.e_phentsize) * updated_elf_header.e_phnum);
cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = data, .out_offset = updated_elf_header.e_phoff } });
eof_offset = updated_elf_header.e_phoff + @intCast(Elf_OffSize, data.len);
}
@@ -1006,7 +1006,7 @@ fn ElfFile(comptime is_64: bool) type {
var dest_section_idx: u32 = 1;
for (self.sections[1..], sections_update[1..]) |section, update| {
if (update.action == .strip) continue;
- std.debug.assert(update.remap_idx == dest_section_idx);
+ assert(update.remap_idx == dest_section_idx);
const src = if (update.section) |*s| s else &section.section;
const dest = &dest_sections[dest_section_idx];
@@ -1032,7 +1032,7 @@ fn ElfFile(comptime is_64: bool) type {
fatal("zig objcopy: cannot adjust program segments", .{});
}
}
- std.debug.assert(dest.sh_addr % addralign == dest.sh_offset % addralign);
+ assert(dest.sh_addr % addralign == dest.sh_offset % addralign);
if (update.action == .empty)
dest.sh_type = elf.SHT_NOBITS;
@@ -1043,7 +1043,7 @@ fn ElfFile(comptime is_64: bool) type {
const dest_data = switch (src.sh_type) {
elf.DT_VERSYM => dst_data: {
const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
- std.mem.copy(u8, data, src_data);
+ @memcpy(data, src_data);
const defs = @ptrCast([*]Elf_Verdef, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Verdef)];
for (defs) |*def| {
@@ -1055,7 +1055,7 @@ fn ElfFile(comptime is_64: bool) type {
},
elf.SHT_SYMTAB, elf.SHT_DYNSYM => dst_data: {
const data = try allocator.alignedAlloc(u8, section_memory_align, src_data.len);
- std.mem.copy(u8, data, src_data);
+ @memcpy(data, src_data);
const syms = @ptrCast([*]Elf_Sym, data)[0 .. @intCast(usize, src.sh_size) / @sizeOf(Elf_Sym)];
for (syms) |*sym| {
@@ -1068,7 +1068,7 @@ fn ElfFile(comptime is_64: bool) type {
else => src_data,
};
- std.debug.assert(dest_data.len == dest.sh_size);
+ assert(dest_data.len == dest.sh_size);
cmdbuf.appendAssumeCapacity(.{ .write_data = .{ .data = dest_data, .out_offset = dest.sh_offset } });
eof_offset = dest.sh_offset + dest.sh_size;
} else {
@@ -1087,9 +1087,9 @@ fn ElfFile(comptime is_64: bool) type {
const payload = payload: {
const crc_offset = std.mem.alignForward(link.name.len + 1, 4);
const buf = try allocator.alignedAlloc(u8, 4, crc_offset + 4);
- std.mem.copy(u8, buf[0..link.name.len], link.name);
- std.mem.set(u8, buf[link.name.len..crc_offset], 0);
- std.mem.copy(u8, buf[crc_offset..], std.mem.asBytes(&link.crc32));
+ @memcpy(buf[0..link.name.len], link.name);
+ @memset(buf[link.name.len..crc_offset], 0);
+ @memcpy(buf[crc_offset..], std.mem.asBytes(&link.crc32));
break :payload buf;
};
@@ -1111,7 +1111,7 @@ fn ElfFile(comptime is_64: bool) type {
eof_offset += @intCast(Elf_OffSize, payload.len);
}
- std.debug.assert(dest_section_idx == new_shnum);
+ assert(dest_section_idx == new_shnum);
break :blk dest_sections;
};
@@ -1120,7 +1120,7 @@ fn ElfFile(comptime is_64: bool) type {
const offset = std.mem.alignForwardGeneric(Elf_OffSize, eof_offset, @alignOf(Elf_Shdr));
const data = std.mem.sliceAsBytes(updated_section_header);
- std.debug.assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum);
+ assert(data.len == @as(usize, updated_elf_header.e_shentsize) * new_shnum);
updated_elf_header.e_shoff = offset;
updated_elf_header.e_shnum = new_shnum;
@@ -1215,7 +1215,7 @@ const ElfFileHelper = struct {
for (cmds) |cmd| {
switch (cmd) {
.write_data => |data| {
- std.debug.assert(data.out_offset >= offset);
+ assert(data.out_offset >= offset);
if (fused_cmd) |prev| {
consolidated.appendAssumeCapacity(prev);
fused_cmd = null;
@@ -1227,7 +1227,7 @@ const ElfFileHelper = struct {
offset = data.out_offset + data.data.len;
},
.copy_range => |range| {
- std.debug.assert(range.out_offset >= offset);
+ assert(range.out_offset >= offset);
if (fused_cmd) |prev| {
if (range.in_offset >= prev.copy_range.in_offset + prev.copy_range.len and (range.out_offset - prev.copy_range.out_offset == range.in_offset - prev.copy_range.in_offset)) {
fused_cmd = .{ .copy_range = .{
diff --git a/src/print_air.zig b/src/print_air.zig
index db3e47c0dd..2d7995842f 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -846,7 +846,7 @@ const Writer = struct {
else blk: {
const slice = w.gpa.alloc([]const Air.Inst.Index, switch_br.data.cases_len + 1) catch
@panic("out of memory");
- std.mem.set([]const Air.Inst.Index, slice, &.{});
+ @memset(slice, &.{});
break :blk Liveness.SwitchBrTable{ .deaths = slice };
};
defer w.gpa.free(liveness.deaths);
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 30bfdba347..922366dc85 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -682,7 +682,7 @@ const Writer = struct {
const limbs = try self.gpa.alloc(std.math.big.Limb, inst_data.len);
defer self.gpa.free(limbs);
- mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes);
+ @memcpy(mem.sliceAsBytes(limbs), limb_bytes);
const big_int: std.math.big.int.Const = .{
.limbs = limbs,
.positive = true,
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 7ad58329df..bc7a1138da 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -113,7 +113,7 @@ const Scope = struct {
const alloc_len = self.statements.items.len + @boolToInt(self.base.parent.?.id == .do_loop);
var stmts = try c.arena.alloc(Node, alloc_len);
stmts.len = self.statements.items.len;
- mem.copy(Node, stmts, self.statements.items);
+ @memcpy(stmts[0..self.statements.items.len], self.statements.items);
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = stmts,
diff --git a/src/type.zig b/src/type.zig
index c7b2844970..816a1ee2e0 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -4767,7 +4767,7 @@ pub const Type = extern union {
.fn_ccc_void_no_args => return,
.function => {
const payload = self.castTag(.function).?.data;
- std.mem.copy(Type, types, payload.param_types);
+ @memcpy(types[0..payload.param_types.len], payload.param_types);
},
else => unreachable,
diff --git a/src/value.zig b/src/value.zig
index 05e9d24ee2..16ccc0c642 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -875,7 +875,7 @@ pub const Value = extern union {
.repeated => {
const byte = @intCast(u8, val.castTag(.repeated).?.data.toUnsignedInt(target));
const result = try allocator.alloc(u8, @intCast(usize, ty.arrayLen()));
- std.mem.set(u8, result, byte);
+ @memset(result, byte);
return result;
},
.decl_ref => {
@@ -1278,12 +1278,16 @@ pub const Value = extern union {
///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory.
- pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ReinterpretDeclRef}!void {
+ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
+ ReinterpretDeclRef,
+ IllDefinedMemoryLayout,
+ Unimplemented,
+ }!void {
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef()) {
const size = @intCast(usize, ty.abiSize(target));
- std.mem.set(u8, buffer[0..size], 0xaa);
+ @memset(buffer[0..size], 0xaa);
return;
}
switch (ty.zigTypeTag()) {
@@ -1345,7 +1349,7 @@ pub const Value = extern union {
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
.Struct => switch (ty.containerLayout()) {
- .Auto => unreachable, // Sema is supposed to have emitted a compile error already
+ .Auto => return error.IllDefinedMemoryLayout,
.Extern => {
const fields = ty.structFields().values();
const field_vals = val.castTag(.aggregate).?.data;
@@ -1366,20 +1370,20 @@ pub const Value = extern union {
std.mem.writeInt(Int, buffer[0..@sizeOf(Int)], @intCast(Int, int), endian);
},
.Union => switch (ty.containerLayout()) {
- .Auto => unreachable,
- .Extern => @panic("TODO implement writeToMemory for extern unions"),
+ .Auto => return error.IllDefinedMemoryLayout,
+ .Extern => return error.Unimplemented,
.Packed => {
const byte_count = (@intCast(usize, ty.bitSize(target)) + 7) / 8;
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
},
},
.Pointer => {
- assert(!ty.isSlice()); // No well defined layout.
+ if (ty.isSlice()) return error.IllDefinedMemoryLayout;
if (val.isDeclRef()) return error.ReinterpretDeclRef;
return val.writeToMemory(Type.usize, mod, buffer);
},
.Optional => {
- assert(ty.isPtrLikeOptional());
+ if (!ty.isPtrLikeOptional()) return error.IllDefinedMemoryLayout;
var buf: Type.Payload.ElemType = undefined;
const child = ty.optionalChild(&buf);
const opt_val = val.optionalValue();
@@ -1389,7 +1393,7 @@ pub const Value = extern union {
return writeToMemory(Value.zero, Type.usize, mod, buffer);
}
},
- else => @panic("TODO implement writeToMemory for more types"),
+ else => return error.Unimplemented,
}
}
@@ -2785,6 +2789,7 @@ pub const Value = extern union {
.field_ptr => isComptimeMutablePtr(val.castTag(.field_ptr).?.data.container_ptr),
.eu_payload_ptr => isComptimeMutablePtr(val.castTag(.eu_payload_ptr).?.data.container_ptr),
.opt_payload_ptr => isComptimeMutablePtr(val.castTag(.opt_payload_ptr).?.data.container_ptr),
+ .slice => isComptimeMutablePtr(val.castTag(.slice).?.data.ptr),
else => false,
};
@@ -5381,6 +5386,36 @@ pub const Value = extern union {
}
}
+ /// If the value is represented in-memory as a series of bytes that all
+ /// have the same value, return that byte value, otherwise null.
+ pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module, value_buffer: *Payload.U64) !?Value {
+ const target = mod.getTarget();
+ const abi_size = ty.abiSize(target);
+ assert(abi_size >= 1);
+ const byte_buffer = try mod.gpa.alloc(u8, abi_size);
+ defer mod.gpa.free(byte_buffer);
+
+ writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) {
+ error.ReinterpretDeclRef => return null,
+ // TODO: The writeToMemory function was originally created for the purpose
+ // of comptime pointer casting. However, it is now additionally being used
+ // for checking the actual memory layout that will be generated by machine
+ // code late in compilation. So, this error handling is too aggressive and
+ // causes some false negatives, causing less-than-ideal code generation.
+ error.IllDefinedMemoryLayout => return null,
+ error.Unimplemented => return null,
+ };
+ const first_byte = byte_buffer[0];
+ for (byte_buffer[1..]) |byte| {
+ if (byte != first_byte) return null;
+ }
+ value_buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = first_byte,
+ };
+ return initPayload(&value_buffer.base);
+ }
+
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,