aboutsummaryrefslogtreecommitdiff
path: root/src/link
diff options
context:
space:
mode:
authorMotiejus Jakštys <motiejus@uber.com>2023-06-09 16:02:18 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-06-17 12:49:13 -0700
commitd41111d7ef531f6f55a19c56205d6d2f1134c224 (patch)
tree14d7b7764a64fa2d4d274c0726a1a587484c4999 /src/link
parent5baa05664e6dac0f473c8411f6e9d8e0f62555a9 (diff)
downloadzig-d41111d7ef531f6f55a19c56205d6d2f1134c224.tar.gz
zig-d41111d7ef531f6f55a19c56205d6d2f1134c224.zip
mem: rename align*Generic to mem.align*
Anecdote 1: The generic version is way more popular than the non-generic one in Zig codebase: git grep -w alignForward | wc -l 56 git grep -w alignForwardGeneric | wc -l 149 git grep -w alignBackward | wc -l 6 git grep -w alignBackwardGeneric | wc -l 15 Anecdote 2: In my project (turbonss) that does much arithmetic and alignment I exclusively use the Generic functions. Anecdote 3: we used only the Generic versions in the Macho Man's linker workshop.
Diffstat (limited to 'src/link')
-rw-r--r--src/link/Coff.zig30
-rw-r--r--src/link/Dwarf.zig2
-rw-r--r--src/link/Elf.zig10
-rw-r--r--src/link/MachO.zig50
-rw-r--r--src/link/MachO/CodeSignature.zig8
-rw-r--r--src/link/MachO/DebugSymbols.zig18
-rw-r--r--src/link/MachO/load_commands.zig8
-rw-r--r--src/link/MachO/thunks.zig6
-rw-r--r--src/link/MachO/zld.zig34
-rw-r--r--src/link/Wasm.zig16
10 files changed, 91 insertions, 91 deletions
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index f7785858dd..202bb71e9b 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -437,10 +437,10 @@ fn allocateSection(self: *Coff, name: []const u8, size: u32, flags: coff.Section
const vaddr = blk: {
if (index == 0) break :blk self.page_size;
const prev_header = self.sections.items(.header)[index - 1];
- break :blk mem.alignForwardGeneric(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size);
+ break :blk mem.alignForward(u32, prev_header.virtual_address + prev_header.virtual_size, self.page_size);
};
// We commit more memory than needed upfront so that we don't have to reallocate too soon.
- const memsz = mem.alignForwardGeneric(u32, size, self.page_size) * 100;
+ const memsz = mem.alignForward(u32, size, self.page_size) * 100;
log.debug("found {s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
name,
off,
@@ -505,8 +505,8 @@ fn growSection(self: *Coff, sect_id: u32, needed_size: u32) !void {
fn growSectionVirtualMemory(self: *Coff, sect_id: u32, needed_size: u32) !void {
const header = &self.sections.items(.header)[sect_id];
const increased_size = padToIdeal(needed_size);
- const old_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, header.virtual_size, self.page_size);
- const new_aligned_end = header.virtual_address + mem.alignForwardGeneric(u32, increased_size, self.page_size);
+ const old_aligned_end = header.virtual_address + mem.alignForward(u32, header.virtual_size, self.page_size);
+ const new_aligned_end = header.virtual_address + mem.alignForward(u32, increased_size, self.page_size);
const diff = new_aligned_end - old_aligned_end;
log.debug("growing {s} in virtual memory by {x}", .{ self.getSectionName(header), diff });
@@ -567,7 +567,7 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme
const ideal_capacity_end_vaddr = math.add(u32, sym.value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
- const new_start_vaddr = mem.alignBackwardGeneric(u32, new_start_vaddr_unaligned, alignment);
+ const new_start_vaddr = mem.alignBackward(u32, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the atom that it points to has grown to take up
@@ -596,11 +596,11 @@ fn allocateAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignme
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (header.isCode()) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.value + ideal_capacity;
- const new_start_vaddr = mem.alignForwardGeneric(u32, ideal_capacity_end_vaddr, alignment);
+ const new_start_vaddr = mem.alignForward(u32, ideal_capacity_end_vaddr, alignment);
atom_placement = last_index;
break :blk new_start_vaddr;
} else {
- break :blk mem.alignForwardGeneric(u32, header.virtual_address, alignment);
+ break :blk mem.alignForward(u32, header.virtual_address, alignment);
}
};
@@ -722,7 +722,7 @@ pub fn createAtom(self: *Coff) !Atom.Index {
fn growAtom(self: *Coff, atom_index: Atom.Index, new_atom_size: u32, alignment: u32) !u32 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
- const align_ok = mem.alignBackwardGeneric(u32, sym.value, alignment) == sym.value;
+ const align_ok = mem.alignBackward(u32, sym.value, alignment) == sym.value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.value;
return self.allocateAtom(atom_index, new_atom_size, alignment);
@@ -1798,7 +1798,7 @@ fn writeBaseRelocations(self: *Coff) !void {
for (offsets.items) |offset| {
const rva = sym.value + offset;
- const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
+ const page = mem.alignBackward(u32, rva, self.page_size);
const gop = try page_table.getOrPut(page);
if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
@@ -1819,7 +1819,7 @@ fn writeBaseRelocations(self: *Coff) !void {
if (sym.section_number == .UNDEFINED) continue;
const rva = @intCast(u32, header.virtual_address + index * self.ptr_width.size());
- const page = mem.alignBackwardGeneric(u32, rva, self.page_size);
+ const page = mem.alignBackward(u32, rva, self.page_size);
const gop = try page_table.getOrPut(page);
if (!gop.found_existing) {
gop.value_ptr.* = std.ArrayList(coff.BaseRelocation).init(gpa);
@@ -1907,7 +1907,7 @@ fn writeImportTables(self: *Coff) !void {
lookup_table_size += @intCast(u32, itable.entries.items.len + 1) * @sizeOf(coff.ImportLookupEntry64.ByName);
for (itable.entries.items) |entry| {
const sym_name = self.getSymbolName(entry);
- names_table_size += 2 + mem.alignForwardGeneric(u32, @intCast(u32, sym_name.len + 1), 2);
+ names_table_size += 2 + mem.alignForward(u32, @intCast(u32, sym_name.len + 1), 2);
}
dll_names_size += @intCast(u32, lib_name.len + ext.len + 1);
}
@@ -2102,7 +2102,7 @@ fn writeHeader(self: *Coff) !void {
};
const subsystem: coff.Subsystem = .WINDOWS_CUI;
const size_of_image: u32 = self.getSizeOfImage();
- const size_of_headers: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), default_file_alignment);
+ const size_of_headers: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), default_file_alignment);
const image_base = self.getImageBase();
const base_of_code = self.sections.get(self.text_section_index.?).header.virtual_address;
@@ -2247,7 +2247,7 @@ fn allocatedSize(self: *Coff, start: u32) u32 {
fn findFreeSpace(self: *Coff, object_size: u32, min_alignment: u32) u32 {
var start: u32 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
- start = mem.alignForwardGeneric(u32, item_end, min_alignment);
+ start = mem.alignForward(u32, item_end, min_alignment);
}
return start;
}
@@ -2294,9 +2294,9 @@ inline fn getSectionHeadersOffset(self: Coff) u32 {
}
inline fn getSizeOfImage(self: Coff) u32 {
- var image_size: u32 = mem.alignForwardGeneric(u32, self.getSizeOfHeaders(), self.page_size);
+ var image_size: u32 = mem.alignForward(u32, self.getSizeOfHeaders(), self.page_size);
for (self.sections.items(.header)) |header| {
- image_size += mem.alignForwardGeneric(u32, header.virtual_size, self.page_size);
+ image_size += mem.alignForward(u32, header.virtual_size, self.page_size);
}
return image_size;
}
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index b9b7772260..3cb1c213e9 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -2152,7 +2152,7 @@ pub fn writeDbgAranges(self: *Dwarf, addr: u64, size: u64) !void {
di_buf.appendAssumeCapacity(0); // segment_selector_size
const end_header_offset = di_buf.items.len;
- const begin_entries_offset = mem.alignForward(end_header_offset, ptr_width_bytes * 2);
+ const begin_entries_offset = mem.alignForward(usize, end_header_offset, ptr_width_bytes * 2);
di_buf.appendNTimesAssumeCapacity(0, begin_entries_offset - end_header_offset);
// Currently only one compilation unit is supported, so the address range is simply
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index 15ba9ebecc..e0d0dfc75f 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -439,7 +439,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
- start = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
}
@@ -1173,7 +1173,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align));
}
- phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align);
+ phdr_table_load.p_offset = mem.alignBackward(u64, phdr_table.p_offset, phdr_table_load.p_align);
const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset;
phdr_table_load.p_filesz = load_align_offset + needed_size;
phdr_table_load.p_memsz = load_align_offset + needed_size;
@@ -2215,7 +2215,7 @@ fn shrinkAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64) void {
fn growAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignment: u64) !u64 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
- const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
+ const align_ok = mem.alignBackward(u64, sym.st_value, alignment) == sym.st_value;
const need_realloc = !align_ok or new_block_size > atom.capacity(self);
if (!need_realloc) return sym.st_value;
return self.allocateAtom(atom_index, new_block_size, alignment);
@@ -2269,7 +2269,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom_sym.st_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = big_atom_sym.st_value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
- const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
+ const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
@@ -2298,7 +2298,7 @@ fn allocateAtom(self: *Elf, atom_index: Atom.Index, new_block_size: u64, alignme
const last_sym = last.getSymbol(self);
const ideal_capacity = padToIdeal(last_sym.st_size);
const ideal_capacity_end_vaddr = last_sym.st_value + ideal_capacity;
- const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
+ const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last_index;
break :blk new_start_vaddr;
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index a3f67bc70a..024fe1f8d9 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -1777,7 +1777,7 @@ fn shrinkAtom(self: *MachO, atom_index: Atom.Index, new_block_size: u64) void {
fn growAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignment: u64) !u64 {
const atom = self.getAtom(atom_index);
const sym = atom.getSymbol(self);
- const align_ok = mem.alignBackwardGeneric(u64, sym.n_value, alignment) == sym.n_value;
+ const align_ok = mem.alignBackward(u64, sym.n_value, alignment) == sym.n_value;
const need_realloc = !align_ok or new_atom_size > atom.capacity(self);
if (!need_realloc) return sym.n_value;
return self.allocateAtom(atom_index, new_atom_size, alignment);
@@ -2598,7 +2598,7 @@ fn populateMissingMetadata(self: *MachO) !void {
// The first __TEXT segment is immovable and covers MachO header and load commands.
self.header_segment_cmd_index = @intCast(u8, self.segments.items.len);
const ideal_size = @max(self.base.options.headerpad_size orelse 0, default_headerpad_size);
- const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size);
+ const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
log.debug("found __TEXT segment (header-only) free space 0x{x} to 0x{x}", .{ 0, needed_size });
@@ -2735,7 +2735,7 @@ fn populateMissingMetadata(self: *MachO) !void {
fn calcPagezeroSize(self: *MachO) u64 {
const pagezero_vmsize = self.base.options.pagezero_size orelse default_pagezero_vmsize;
- const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size);
+ const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size);
if (self.base.options.output_mode == .Lib) return 0;
if (aligned_pagezero_vmsize == 0) return 0;
if (aligned_pagezero_vmsize != pagezero_vmsize) {
@@ -2759,10 +2759,10 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
const section_id = @intCast(u8, self.sections.slice().len);
const vmaddr = blk: {
const prev_segment = self.segments.items[segment_id - 1];
- break :blk mem.alignForwardGeneric(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size);
+ break :blk mem.alignForward(u64, prev_segment.vmaddr + prev_segment.vmsize, self.page_size);
};
// We commit more memory than needed upfront so that we don't have to reallocate too soon.
- const vmsize = mem.alignForwardGeneric(u64, opts.size, self.page_size);
+ const vmsize = mem.alignForward(u64, opts.size, self.page_size);
const off = self.findFreeSpace(opts.size, self.page_size);
log.debug("found {s},{s} free space 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
@@ -2790,8 +2790,8 @@ fn allocateSection(self: *MachO, segname: []const u8, sectname: []const u8, opts
var section = macho.section_64{
.sectname = makeStaticString(sectname),
.segname = makeStaticString(segname),
- .addr = mem.alignForwardGeneric(u64, vmaddr, opts.alignment),
- .offset = mem.alignForwardGeneric(u32, @intCast(u32, off), opts.alignment),
+ .addr = mem.alignForward(u64, vmaddr, opts.alignment),
+ .offset = mem.alignForward(u32, @intCast(u32, off), opts.alignment),
.size = opts.size,
.@"align" = math.log2(opts.alignment),
.flags = opts.flags,
@@ -2846,8 +2846,8 @@ fn growSection(self: *MachO, sect_id: u8, needed_size: u64) !void {
}
header.size = needed_size;
- segment.filesize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
- segment.vmsize = mem.alignForwardGeneric(u64, needed_size, self.page_size);
+ segment.filesize = mem.alignForward(u64, needed_size, self.page_size);
+ segment.vmsize = mem.alignForward(u64, needed_size, self.page_size);
}
fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
@@ -2855,7 +2855,7 @@ fn growSectionVirtualMemory(self: *MachO, sect_id: u8, needed_size: u64) !void {
const segment = self.getSegmentPtr(sect_id);
const increased_size = padToIdeal(needed_size);
const old_aligned_end = segment.vmaddr + segment.vmsize;
- const new_aligned_end = segment.vmaddr + mem.alignForwardGeneric(u64, increased_size, self.page_size);
+ const new_aligned_end = segment.vmaddr + mem.alignForward(u64, increased_size, self.page_size);
const diff = new_aligned_end - old_aligned_end;
log.debug("shifting every segment after {s},{s} in virtual memory by {x}", .{
header.segName(),
@@ -2927,7 +2927,7 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const ideal_capacity_end_vaddr = math.add(u64, sym.n_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.n_value + capacity;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
- const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
+ const new_start_vaddr = mem.alignBackward(u64, new_start_vaddr_unaligned, alignment);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the atom that it points to has grown to take up
@@ -2956,11 +2956,11 @@ fn allocateAtom(self: *MachO, atom_index: Atom.Index, new_atom_size: u64, alignm
const last_symbol = last.getSymbol(self);
const ideal_capacity = if (requires_padding) padToIdeal(last.size) else last.size;
const ideal_capacity_end_vaddr = last_symbol.n_value + ideal_capacity;
- const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
+ const new_start_vaddr = mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
atom_placement = last_index;
break :blk new_start_vaddr;
} else {
- break :blk mem.alignForwardGeneric(u64, segment.vmaddr, alignment);
+ break :blk mem.alignForward(u64, segment.vmaddr, alignment);
}
};
@@ -3034,17 +3034,17 @@ fn writeLinkeditSegmentData(self: *MachO) !void {
for (self.segments.items, 0..) |segment, id| {
if (self.linkedit_segment_cmd_index.? == @intCast(u8, id)) continue;
if (seg.vmaddr < segment.vmaddr + segment.vmsize) {
- seg.vmaddr = mem.alignForwardGeneric(u64, segment.vmaddr + segment.vmsize, self.page_size);
+ seg.vmaddr = mem.alignForward(u64, segment.vmaddr + segment.vmsize, self.page_size);
}
if (seg.fileoff < segment.fileoff + segment.filesize) {
- seg.fileoff = mem.alignForwardGeneric(u64, segment.fileoff + segment.filesize, self.page_size);
+ seg.fileoff = mem.alignForward(u64, segment.fileoff + segment.filesize, self.page_size);
}
}
try self.writeDyldInfoData();
try self.writeSymtabs();
- seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
+ seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
}
fn collectRebaseDataFromTableSection(self: *MachO, sect_id: u8, rebase: *Rebase, table: anytype) !void {
@@ -3236,17 +3236,17 @@ fn writeDyldInfoData(self: *MachO) !void {
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff;
const rebase_size = rebase.size();
- const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
+ const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = rebase_off + rebase_size_aligned;
const bind_size = bind.size();
- const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
+ const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = lazy_bind.size();
- const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
+ const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned,
@@ -3254,7 +3254,7 @@ fn writeDyldInfoData(self: *MachO) !void {
const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
- const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
+ const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
@@ -3412,7 +3412,7 @@ fn writeStrtab(self: *MachO) !void {
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@@ -3447,7 +3447,7 @@ fn writeDysymtab(self: *MachO, ctx: SymtabCtx) !void {
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@@ -3514,10 +3514,10 @@ fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16);
+ const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
const needed_size = code_sig.estimateSize(offset);
seg.filesize = offset + needed_size - seg.fileoff;
- seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
+ seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data.
@@ -3630,7 +3630,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 {
fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
- start = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
}
diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig
index 4709560ba7..02511dbe29 100644
--- a/src/link/MachO/CodeSignature.zig
+++ b/src/link/MachO/CodeSignature.zig
@@ -282,7 +282,7 @@ pub fn writeAdhocSignature(
self.code_directory.inner.execSegFlags = if (opts.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
self.code_directory.inner.codeLimit = opts.file_size;
- const total_pages = @intCast(u32, mem.alignForward(opts.file_size, self.page_size) / self.page_size);
+ const total_pages = @intCast(u32, mem.alignForward(usize, opts.file_size, self.page_size) / self.page_size);
try self.code_directory.code_slots.ensureTotalCapacityPrecise(gpa, total_pages);
self.code_directory.code_slots.items.len = total_pages;
@@ -357,7 +357,7 @@ fn parallelHash(
) !void {
var wg: WaitGroup = .{};
- const total_num_chunks = mem.alignForward(file_size, self.page_size) / self.page_size;
+ const total_num_chunks = mem.alignForward(usize, file_size, self.page_size) / self.page_size;
assert(self.code_directory.code_slots.items.len >= total_num_chunks);
const buffer = try gpa.alloc(u8, self.page_size * total_num_chunks);
@@ -421,7 +421,7 @@ pub fn size(self: CodeSignature) u32 {
pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
var ssize: u64 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex) + self.code_directory.size();
// Approx code slots
- const total_pages = mem.alignForwardGeneric(u64, file_size, self.page_size) / self.page_size;
+ const total_pages = mem.alignForward(u64, file_size, self.page_size) / self.page_size;
ssize += total_pages * hash_size;
var n_special_slots: u32 = 0;
if (self.requirements) |req| {
@@ -436,7 +436,7 @@ pub fn estimateSize(self: CodeSignature, file_size: u64) u32 {
ssize += @sizeOf(macho.BlobIndex) + sig.size();
}
ssize += n_special_slots * hash_size;
- return @intCast(u32, mem.alignForwardGeneric(u64, ssize, @sizeOf(u64)));
+ return @intCast(u32, mem.alignForward(u64, ssize, @sizeOf(u64)));
}
pub fn clear(self: *CodeSignature, allocator: Allocator) void {
diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig
index 24a0c9ea34..fdb8c9c816 100644
--- a/src/link/MachO/DebugSymbols.zig
+++ b/src/link/MachO/DebugSymbols.zig
@@ -68,7 +68,7 @@ pub fn populateMissingMetadata(self: *DebugSymbols) !void {
const off = @intCast(u64, self.page_size);
const ideal_size: u16 = 200 + 128 + 160 + 250;
- const needed_size = mem.alignForwardGeneric(u64, padToIdeal(ideal_size), self.page_size);
+ const needed_size = mem.alignForward(u64, padToIdeal(ideal_size), self.page_size);
log.debug("found __DWARF segment free space 0x{x} to 0x{x}", .{ off, off + needed_size });
@@ -213,7 +213,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64
const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
- offset = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ offset = mem.alignForward(u64, item_end, min_alignment);
}
return offset;
}
@@ -355,18 +355,18 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
file_size = @max(file_size, header.offset + header.size);
}
- const aligned_size = mem.alignForwardGeneric(u64, file_size, self.page_size);
+ const aligned_size = mem.alignForward(u64, file_size, self.page_size);
dwarf_segment.vmaddr = base_vmaddr;
dwarf_segment.filesize = aligned_size;
dwarf_segment.vmsize = aligned_size;
const linkedit = self.getLinkeditSegmentPtr();
- linkedit.vmaddr = mem.alignForwardGeneric(
+ linkedit.vmaddr = mem.alignForward(
u64,
dwarf_segment.vmaddr + aligned_size,
self.page_size,
);
- linkedit.fileoff = mem.alignForwardGeneric(
+ linkedit.fileoff = mem.alignForward(
u64,
dwarf_segment.fileoff + aligned_size,
self.page_size,
@@ -458,7 +458,7 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
try self.writeStrtab();
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
- const aligned_size = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
+ const aligned_size = mem.alignForward(u64, seg.filesize, self.page_size);
seg.vmsize = aligned_size;
}
@@ -497,7 +497,7 @@ fn writeSymtab(self: *DebugSymbols, macho_file: *MachO) !void {
const nsyms = nlocals + nexports;
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
- const offset = mem.alignForwardGeneric(u64, seg.fileoff, @alignOf(macho.nlist_64));
+ const offset = mem.alignForward(u64, seg.fileoff, @alignOf(macho.nlist_64));
const needed_size = nsyms * @sizeOf(macho.nlist_64);
seg.filesize = offset + needed_size - seg.fileoff;
@@ -522,8 +522,8 @@ fn writeStrtab(self: *DebugSymbols) !void {
const seg = &self.segments.items[self.linkedit_segment_cmd_index.?];
const symtab_size = @intCast(u32, self.symtab_cmd.nsyms * @sizeOf(macho.nlist_64));
- const offset = mem.alignForwardGeneric(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
- const needed_size = mem.alignForwardGeneric(u64, self.strtab.buffer.items.len, @alignOf(u64));
+ const offset = mem.alignForward(u64, self.symtab_cmd.symoff + symtab_size, @alignOf(u64));
+ const needed_size = mem.alignForward(u64, self.strtab.buffer.items.len, @alignOf(u64));
seg.filesize = offset + needed_size - seg.fileoff;
self.symtab_cmd.stroff = @intCast(u32, offset);
diff --git a/src/link/MachO/load_commands.zig b/src/link/MachO/load_commands.zig
index 228a1ccfaf..5111f53f2a 100644
--- a/src/link/MachO/load_commands.zig
+++ b/src/link/MachO/load_commands.zig
@@ -17,7 +17,7 @@ pub const default_dyld_path: [*:0]const u8 = "/usr/lib/dyld";
fn calcInstallNameLen(cmd_size: u64, name: []const u8, assume_max_path_len: bool) u64 {
const darwin_path_max = 1024;
const name_len = if (assume_max_path_len) darwin_path_max else name.len + 1;
- return mem.alignForwardGeneric(u64, cmd_size + name_len, @alignOf(u64));
+ return mem.alignForward(u64, cmd_size + name_len, @alignOf(u64));
}
const CalcLCsSizeCtx = struct {
@@ -149,7 +149,7 @@ pub fn calcNumOfLCs(lc_buffer: []const u8) u32 {
pub fn writeDylinkerLC(lc_writer: anytype) !void {
const name_len = mem.sliceTo(default_dyld_path, 0).len;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ const cmdsize = @intCast(u32, mem.alignForward(
u64,
@sizeOf(macho.dylinker_command) + name_len,
@sizeOf(u64),
@@ -176,7 +176,7 @@ const WriteDylibLCCtx = struct {
fn writeDylibLC(ctx: WriteDylibLCCtx, lc_writer: anytype) !void {
const name_len = ctx.name.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ const cmdsize = @intCast(u32, mem.alignForward(
u64,
@sizeOf(macho.dylib_command) + name_len,
@sizeOf(u64),
@@ -253,7 +253,7 @@ pub fn writeRpathLCs(gpa: Allocator, options: *const link.Options, lc_writer: an
while (try it.next()) |rpath| {
const rpath_len = rpath.len + 1;
- const cmdsize = @intCast(u32, mem.alignForwardGeneric(
+ const cmdsize = @intCast(u32, mem.alignForward(
u64,
@sizeOf(macho.rpath_command) + rpath_len,
@sizeOf(u64),
diff --git a/src/link/MachO/thunks.zig b/src/link/MachO/thunks.zig
index 48d1faac6b..7895190005 100644
--- a/src/link/MachO/thunks.zig
+++ b/src/link/MachO/thunks.zig
@@ -109,7 +109,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
while (true) {
const atom = zld.getAtom(group_end);
- offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment));
+ offset = mem.alignForward(u64, offset, try math.powi(u32, 2, atom.alignment));
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = offset;
@@ -153,7 +153,7 @@ pub fn createThunks(zld: *Zld, sect_id: u8) !void {
} else break;
}
- offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment());
+ offset = mem.alignForward(u64, offset, Thunk.getAlignment());
allocateThunk(zld, thunk_index, offset, header);
offset += zld.thunks.items[thunk_index].getSize();
@@ -193,7 +193,7 @@ fn allocateThunk(
var offset = base_offset;
while (true) {
const atom = zld.getAtom(atom_index);
- offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment());
+ offset = mem.alignForward(u64, offset, Thunk.getAlignment());
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
sym.n_value = offset;
diff --git a/src/link/MachO/zld.zig b/src/link/MachO/zld.zig
index 4f7e615c79..7902d67d87 100644
--- a/src/link/MachO/zld.zig
+++ b/src/link/MachO/zld.zig
@@ -1207,7 +1207,7 @@ pub const Zld = struct {
fn createSegments(self: *Zld) !void {
const pagezero_vmsize = self.options.pagezero_size orelse MachO.default_pagezero_vmsize;
- const aligned_pagezero_vmsize = mem.alignBackwardGeneric(u64, pagezero_vmsize, self.page_size);
+ const aligned_pagezero_vmsize = mem.alignBackward(u64, pagezero_vmsize, self.page_size);
if (self.options.output_mode != .Lib and aligned_pagezero_vmsize > 0) {
if (aligned_pagezero_vmsize != pagezero_vmsize) {
log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_vmsize});
@@ -1466,7 +1466,7 @@ pub const Zld = struct {
while (true) {
const atom = self.getAtom(atom_index);
const atom_alignment = try math.powi(u32, 2, atom.alignment);
- const atom_offset = mem.alignForwardGeneric(u64, header.size, atom_alignment);
+ const atom_offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = atom_offset - header.size;
const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
@@ -1534,7 +1534,7 @@ pub const Zld = struct {
const slice = self.sections.slice();
for (slice.items(.header)[indexes.start..indexes.end], 0..) |*header, sect_id| {
const alignment = try math.powi(u32, 2, header.@"align");
- const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
+ const start_aligned = mem.alignForward(u64, start, alignment);
const n_sect = @intCast(u8, indexes.start + sect_id + 1);
header.offset = if (header.isZerofill())
@@ -1598,8 +1598,8 @@ pub const Zld = struct {
segment.vmsize = start;
}
- segment.filesize = mem.alignForwardGeneric(u64, segment.filesize, self.page_size);
- segment.vmsize = mem.alignForwardGeneric(u64, segment.vmsize, self.page_size);
+ segment.filesize = mem.alignForward(u64, segment.filesize, self.page_size);
+ segment.vmsize = mem.alignForward(u64, segment.vmsize, self.page_size);
}
const InitSectionOpts = struct {
@@ -1709,7 +1709,7 @@ pub const Zld = struct {
try self.writeSymtabs();
const seg = self.getLinkeditSegmentPtr();
- seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
+ seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
}
fn collectRebaseDataFromContainer(
@@ -2112,17 +2112,17 @@ pub const Zld = struct {
assert(mem.isAlignedGeneric(u64, link_seg.fileoff, @alignOf(u64)));
const rebase_off = link_seg.fileoff;
const rebase_size = rebase.size();
- const rebase_size_aligned = mem.alignForwardGeneric(u64, rebase_size, @alignOf(u64));
+ const rebase_size_aligned = mem.alignForward(u64, rebase_size, @alignOf(u64));
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size_aligned });
const bind_off = rebase_off + rebase_size_aligned;
const bind_size = bind.size();
- const bind_size_aligned = mem.alignForwardGeneric(u64, bind_size, @alignOf(u64));
+ const bind_size_aligned = mem.alignForward(u64, bind_size, @alignOf(u64));
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size_aligned });
const lazy_bind_off = bind_off + bind_size_aligned;
const lazy_bind_size = lazy_bind.size();
- const lazy_bind_size_aligned = mem.alignForwardGeneric(u64, lazy_bind_size, @alignOf(u64));
+ const lazy_bind_size_aligned = mem.alignForward(u64, lazy_bind_size, @alignOf(u64));
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{
lazy_bind_off,
lazy_bind_off + lazy_bind_size_aligned,
@@ -2130,7 +2130,7 @@ pub const Zld = struct {
const export_off = lazy_bind_off + lazy_bind_size_aligned;
const export_size = trie.size;
- const export_size_aligned = mem.alignForwardGeneric(u64, export_size, @alignOf(u64));
+ const export_size_aligned = mem.alignForward(u64, export_size, @alignOf(u64));
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size_aligned });
const needed_size = math.cast(usize, export_off + export_size_aligned - rebase_off) orelse
@@ -2268,7 +2268,7 @@ pub const Zld = struct {
const offset = link_seg.fileoff + link_seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = buffer.items.len;
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
const padding = math.cast(usize, needed_size_aligned - needed_size) orelse return error.Overflow;
if (padding > 0) {
try buffer.ensureUnusedCapacity(padding);
@@ -2347,7 +2347,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = out_dice.items.len * @sizeOf(macho.data_in_code_entry);
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
const buffer = try self.gpa.alloc(u8, math.cast(usize, needed_size_aligned) orelse return error.Overflow);
@@ -2480,7 +2480,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = self.strtab.buffer.items.len;
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing string table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@@ -2515,7 +2515,7 @@ pub const Zld = struct {
const offset = seg.fileoff + seg.filesize;
assert(mem.isAlignedGeneric(u64, offset, @alignOf(u64)));
const needed_size = nindirectsyms * @sizeOf(u32);
- const needed_size_aligned = mem.alignForwardGeneric(u64, needed_size, @alignOf(u64));
+ const needed_size_aligned = mem.alignForward(u64, needed_size, @alignOf(u64));
seg.filesize = offset + needed_size_aligned - seg.fileoff;
log.debug("writing indirect symbol table from 0x{x} to 0x{x}", .{ offset, offset + needed_size_aligned });
@@ -2690,7 +2690,7 @@ pub const Zld = struct {
for (subsections[0..count]) |cut| {
const size = cut.end - cut.start;
- const num_chunks = mem.alignForward(size, chunk_size) / chunk_size;
+ const num_chunks = mem.alignForward(usize, size, chunk_size) / chunk_size;
var i: usize = 0;
while (i < num_chunks) : (i += 1) {
@@ -2725,10 +2725,10 @@ pub const Zld = struct {
const seg = self.getLinkeditSegmentPtr();
// Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
// https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
- const offset = mem.alignForwardGeneric(u64, seg.fileoff + seg.filesize, 16);
+ const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
const needed_size = code_sig.estimateSize(offset);
seg.filesize = offset + needed_size - seg.fileoff;
- seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, self.page_size);
+ seg.vmsize = mem.alignForward(u64, seg.filesize, self.page_size);
log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
// except for code signature data.
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index f911074473..2d2930be8c 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -2118,7 +2118,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
}
}
}
- offset = std.mem.alignForwardGeneric(u32, offset, atom.alignment);
+ offset = std.mem.alignForward(u32, offset, atom.alignment);
atom.offset = offset;
log.debug("Atom '{s}' allocated from 0x{x:0>8} to 0x{x:0>8} size={d}", .{
symbol_loc.getName(wasm),
@@ -2129,7 +2129,7 @@ fn allocateAtoms(wasm: *Wasm) !void {
offset += atom.size;
atom_index = atom.prev orelse break;
}
- segment.size = std.mem.alignForwardGeneric(u32, offset, segment.alignment);
+ segment.size = std.mem.alignForward(u32, offset, segment.alignment);
}
}
@@ -2731,7 +2731,7 @@ fn setupMemory(wasm: *Wasm) !void {
const is_obj = wasm.base.options.output_mode == .Obj;
if (place_stack_first and !is_obj) {
- memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment);
+ memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size;
// We always put the stack pointer global at index 0
wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
@@ -2741,7 +2741,7 @@ fn setupMemory(wasm: *Wasm) !void {
var data_seg_it = wasm.data_segments.iterator();
while (data_seg_it.next()) |entry| {
const segment = &wasm.segments.items[entry.value_ptr.*];
- memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment);
+ memory_ptr = std.mem.alignForward(u64, memory_ptr, segment.alignment);
// set TLS-related symbols
if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
@@ -2779,7 +2779,7 @@ fn setupMemory(wasm: *Wasm) !void {
// create the memory init flag which is used by the init memory function
if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) {
// align to pointer size
- memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4);
+ memory_ptr = mem.alignForward(u64, memory_ptr, 4);
const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data);
const sym = loc.getSymbol(wasm);
sym.virtual_address = @intCast(u32, memory_ptr);
@@ -2787,7 +2787,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
if (!place_stack_first and !is_obj) {
- memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment);
+ memory_ptr = std.mem.alignForward(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size;
wasm.wasm_globals.items[0].init.i32_const = @bitCast(i32, @intCast(u32, memory_ptr));
}
@@ -2796,7 +2796,7 @@ fn setupMemory(wasm: *Wasm) !void {
// We must set its virtual address so it can be used in relocations.
if (wasm.findGlobalSymbol("__heap_base")) |loc| {
const symbol = loc.getSymbol(wasm);
- symbol.virtual_address = @intCast(u32, mem.alignForwardGeneric(u64, memory_ptr, heap_alignment));
+ symbol.virtual_address = @intCast(u32, mem.alignForward(u64, memory_ptr, heap_alignment));
}
// Setup the max amount of pages
@@ -2818,7 +2818,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
memory_ptr = initial_memory;
}
- memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, std.wasm.page_size);
+ memory_ptr = mem.alignForward(u64, memory_ptr, std.wasm.page_size);
// In case we do not import memory, but define it ourselves,
// set the minimum amount of pages on the memory section.
wasm.memories.limits.min = @intCast(u32, memory_ptr / page_size);