aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt1
-rw-r--r--lib/compiler_rt/comparef.zig4
-rw-r--r--lib/compiler_rt/extendf.zig2
-rw-r--r--lib/compiler_rt/extendxftf2.zig2
-rw-r--r--lib/compiler_rt/subxf3.zig4
-rw-r--r--lib/compiler_rt/truncf.zig2
-rw-r--r--lib/compiler_rt/trunctfxf2.zig2
-rw-r--r--lib/std/Target.zig2
-rw-r--r--lib/std/array_hash_map.zig8
-rw-r--r--lib/std/array_list.zig8
-rw-r--r--lib/std/base64.zig39
-rw-r--r--lib/std/crypto/ml_kem.zig4
-rw-r--r--lib/std/elf.zig427
-rw-r--r--lib/std/fmt.zig3
-rw-r--r--lib/std/hash_map.zig13
-rw-r--r--lib/std/heap/general_purpose_allocator.zig11
-rw-r--r--lib/std/math.zig24
-rw-r--r--lib/std/math/nextafter.zig4
-rw-r--r--lib/std/zig/AstGen.zig49
-rw-r--r--lib/std/zig/BuiltinFn.zig2
-rw-r--r--lib/std/zig/Zir.zig32
-rw-r--r--src/Sema.zig121
-rw-r--r--src/arch/riscv64/CodeGen.zig41
-rw-r--r--src/codegen/llvm.zig141
-rw-r--r--src/codegen/llvm/Builder.zig14
-rw-r--r--src/codegen/llvm/bindings.zig9
-rw-r--r--src/glibc.zig13
-rw-r--r--src/link/Dwarf.zig144
-rw-r--r--src/link/Elf.zig633
-rw-r--r--src/link/Elf/Atom.zig159
-rw-r--r--src/link/Elf/AtomList.zig208
-rw-r--r--src/link/Elf/Object.zig123
-rw-r--r--src/link/Elf/ZigObject.zig645
-rw-r--r--src/link/Elf/eh_frame.zig36
-rw-r--r--src/link/Elf/relocatable.zig194
-rw-r--r--src/print_zir.zig4
-rw-r--r--src/target.zig4
-rw-r--r--src/zig_llvm.cpp14
-rw-r--r--src/zig_llvm.h4
-rw-r--r--test/behavior.zig1
-rw-r--r--test/behavior/cast_int.zig4
-rw-r--r--test/behavior/decl_literals.zig38
-rw-r--r--test/behavior/defer.zig37
-rw-r--r--test/behavior/struct.zig1
-rw-r--r--test/behavior/try.zig19
-rw-r--r--test/behavior/while.zig1
-rw-r--r--test/cases/compile_errors/cast_enum_literal_to_enum_but_it_doesnt_match.zig2
-rw-r--r--test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig2
-rw-r--r--test/link/elf.zig2
49 files changed, 1863 insertions, 1394 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8348b45cfd..781076ef61 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -600,6 +600,7 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf.zig
src/link/Elf/Archive.zig
src/link/Elf/Atom.zig
+ src/link/Elf/AtomList.zig
src/link/Elf/LdScript.zig
src/link/Elf/LinkerDefined.zig
src/link/Elf/Object.zig
diff --git a/lib/compiler_rt/comparef.zig b/lib/compiler_rt/comparef.zig
index 512eba0594..76f04f430a 100644
--- a/lib/compiler_rt/comparef.zig
+++ b/lib/compiler_rt/comparef.zig
@@ -61,8 +61,8 @@ pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
}
pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
- const a_rep = std.math.break_f80(a);
- const b_rep = std.math.break_f80(b);
+ const a_rep = std.math.F80.fromFloat(a);
+ const b_rep = std.math.F80.fromFloat(b);
const sig_bits = std.math.floatMantissaBits(f80);
const int_bit = 0x8000000000000000;
const sign_bit = 0x8000;
diff --git a/lib/compiler_rt/extendf.zig b/lib/compiler_rt/extendf.zig
index 5c7c2fecde..6e546b182c 100644
--- a/lib/compiler_rt/extendf.zig
+++ b/lib/compiler_rt/extendf.zig
@@ -131,7 +131,7 @@ pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeI
}
dst.exp |= sign;
- return std.math.make_f80(dst);
+ return dst.toFloat();
}
test {
diff --git a/lib/compiler_rt/extendxftf2.zig b/lib/compiler_rt/extendxftf2.zig
index 53de08e686..55324be2b4 100644
--- a/lib/compiler_rt/extendxftf2.zig
+++ b/lib/compiler_rt/extendxftf2.zig
@@ -18,7 +18,7 @@ fn __extendxftf2(a: f80) callconv(.C) f128 {
const dst_min_normal = @as(u128, 1) << dst_sig_bits;
// Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
+ var a_rep = std.math.F80.fromFloat(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
var abs_result: u128 = undefined;
diff --git a/lib/compiler_rt/subxf3.zig b/lib/compiler_rt/subxf3.zig
index 815bc1f78f..9dc7625b1e 100644
--- a/lib/compiler_rt/subxf3.zig
+++ b/lib/compiler_rt/subxf3.zig
@@ -8,8 +8,8 @@ comptime {
}
fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
- var b_rep = std.math.break_f80(b);
+ var b_rep = std.math.F80.fromFloat(b);
b_rep.exp ^= 0x8000;
- const neg_b = std.math.make_f80(b_rep);
+ const neg_b = b_rep.toFloat();
return a + neg_b;
}
diff --git a/lib/compiler_rt/truncf.zig b/lib/compiler_rt/truncf.zig
index d8b7c6b682..5c116811dc 100644
--- a/lib/compiler_rt/truncf.zig
+++ b/lib/compiler_rt/truncf.zig
@@ -121,7 +121,7 @@ pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
const dst_nan_mask = dst_qnan - 1;
// Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
+ var a_rep = std.math.F80.fromFloat(a);
const sign = a_rep.exp & 0x8000;
a_rep.exp &= 0x7FFF;
a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
diff --git a/lib/compiler_rt/trunctfxf2.zig b/lib/compiler_rt/trunctfxf2.zig
index b7c2b1cb1d..be78200a16 100644
--- a/lib/compiler_rt/trunctfxf2.zig
+++ b/lib/compiler_rt/trunctfxf2.zig
@@ -64,5 +64,5 @@ pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
}
res.exp |= sign;
- return math.make_f80(res);
+ return res.toFloat();
}
diff --git a/lib/std/Target.zig b/lib/std/Target.zig
index 3580813aec..d215e29fc8 100644
--- a/lib/std/Target.zig
+++ b/lib/std/Target.zig
@@ -846,7 +846,7 @@ pub fn toElfMachine(target: Target) std.elf.EM {
.avr => .AVR,
.bpfel, .bpfeb => .BPF,
.csky => .CSKY,
- .hexagon => .HEXAGON,
+ .hexagon => .QDSP6,
.kalimba => .CSR_KALIMBA,
.lanai => .LANAI,
.loongarch32, .loongarch64 => .LOONGARCH,
diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig
index eb31d1cae3..1b96be472a 100644
--- a/lib/std/array_hash_map.zig
+++ b/lib/std/array_hash_map.zig
@@ -510,6 +510,8 @@ pub fn ArrayHashMap(
/// `store_hash` is `false` and the number of entries in the map is less than 9,
/// the overhead cost of using `ArrayHashMapUnmanaged` rather than `std.ArrayList` is
/// only a single pointer-sized integer.
+///
+/// Default initialization of this struct is deprecated; use `.empty` instead.
pub fn ArrayHashMapUnmanaged(
comptime K: type,
comptime V: type,
@@ -538,6 +540,12 @@ pub fn ArrayHashMapUnmanaged(
/// Used to detect memory safety violations.
pointer_stability: std.debug.SafetyLock = .{},
+ /// A map containing no keys or values.
+ pub const empty: Self = .{
+ .entries = .{},
+ .index_header = null,
+ };
+
/// Modifying the key is allowed only if it does not change the hash.
/// Modifying the value is allowed.
/// Entry pointers become invalid whenever this ArrayHashMap is modified,
diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig
index 2510973692..24098a01f6 100644
--- a/lib/std/array_list.zig
+++ b/lib/std/array_list.zig
@@ -618,6 +618,8 @@ pub fn ArrayListUnmanaged(comptime T: type) type {
/// Functions that potentially allocate memory accept an `Allocator` parameter.
/// Initialize directly or with `initCapacity`, and deinitialize with `deinit`
/// or use `toOwnedSlice`.
+///
+/// Default initialization of this struct is deprecated; use `.empty` instead.
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
if (alignment) |a| {
if (a == @alignOf(T)) {
@@ -638,6 +640,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// additional memory.
capacity: usize = 0,
+ /// An ArrayList containing no elements.
+ pub const empty: Self = .{
+ .items = &.{},
+ .capacity = 0,
+ };
+
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
diff --git a/lib/std/base64.zig b/lib/std/base64.zig
index 2627480295..243f206445 100644
--- a/lib/std/base64.zig
+++ b/lib/std/base64.zig
@@ -5,6 +5,7 @@ const assert = std.debug.assert;
const builtin = @import("builtin");
const testing = std.testing;
const mem = std.mem;
+const window = mem.window;
pub const Error = error{
InvalidCharacter,
@@ -98,6 +99,32 @@ pub const Base64Encoder = struct {
}
}
+ // dest must be compatible with std.io.Writer's writeAll interface
+ pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
+ var chunker = window(u8, source, 3, 3);
+ while (chunker.next()) |chunk| {
+ var temp: [5]u8 = undefined;
+ const s = encoder.encode(&temp, chunk);
+ try dest.writeAll(s);
+ }
+ }
+
+ // destWriter must be compatible with std.io.Writer's writeAll interface
+ // sourceReader must be compatible with std.io.Reader's read interface
+ pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
+ while (true) {
+ var tempSource: [3]u8 = undefined;
+ const bytesRead = try sourceReader.read(&tempSource);
+ if (bytesRead == 0) {
+ break;
+ }
+
+ var temp: [5]u8 = undefined;
+ const s = encoder.encode(&temp, tempSource[0..bytesRead]);
+ try destWriter.writeAll(s);
+ }
+ }
+
/// dest.len must at least be what you get from ::calcSize.
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 {
const out_len = encoder.calcSize(source.len);
@@ -477,9 +504,21 @@ fn testBase64UrlSafeNoPad() !void {
fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: []const u8) !void {
// Base64Encoder
{
+ // raw encode
var buffer: [0x100]u8 = undefined;
const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
try testing.expectEqualSlices(u8, expected_encoded, encoded);
+
+ // stream encode
+ var list = try std.BoundedArray(u8, 0x100).init(0);
+ try codecs.Encoder.encodeWriter(list.writer(), expected_decoded);
+ try testing.expectEqualSlices(u8, expected_encoded, list.slice());
+
+ // reader to writer encode
+ var stream = std.io.fixedBufferStream(expected_decoded);
+ list = try std.BoundedArray(u8, 0x100).init(0);
+ try codecs.Encoder.encodeFromReaderToWriter(list.writer(), stream.reader());
+ try testing.expectEqualSlices(u8, expected_encoded, list.slice());
}
// Base64Decoder
diff --git a/lib/std/crypto/ml_kem.zig b/lib/std/crypto/ml_kem.zig
index 9a3a35492c..950b72016b 100644
--- a/lib/std/crypto/ml_kem.zig
+++ b/lib/std/crypto/ml_kem.zig
@@ -1677,7 +1677,7 @@ test "Test inner PKE" {
p.* = @as(u8, @intCast(i + 32));
}
inline for (modes) |mode| {
- for (0..100) |i| {
+ for (0..10) |i| {
var pk: mode.InnerPk = undefined;
var sk: mode.InnerSk = undefined;
seed[0] = @as(u8, @intCast(i));
@@ -1696,7 +1696,7 @@ test "Test happy flow" {
s.* = @as(u8, @intCast(i));
}
inline for (modes) |mode| {
- for (0..100) |i| {
+ for (0..10) |i| {
seed[0] = @as(u8, @intCast(i));
const kp = try mode.KeyPair.create(seed);
const sk = try mode.SecretKey.fromBytes(&kp.secret_key.toBytes());
diff --git a/lib/std/elf.zig b/lib/std/elf.zig
index aefaca4bef..d92973c314 100644
--- a/lib/std/elf.zig
+++ b/lib/std/elf.zig
@@ -1101,549 +1101,456 @@ pub const Half = u16;
pub const EM = enum(u16) {
/// No machine
NONE = 0,
-
/// AT&T WE 32100
M32 = 1,
-
- /// SPARC
+ /// SUN SPARC
SPARC = 2,
-
- /// Intel 386
+ /// Intel 80386
@"386" = 3,
-
- /// Motorola 68000
+ /// Motorola m68k family
@"68K" = 4,
-
- /// Motorola 88000
+ /// Motorola m88k family
@"88K" = 5,
-
/// Intel MCU
IAMCU = 6,
-
/// Intel 80860
@"860" = 7,
-
- /// MIPS R3000
+ /// MIPS R3000 (officially, big-endian only)
MIPS = 8,
-
/// IBM System/370
S370 = 9,
-
- /// MIPS RS3000 Little-endian
+ /// MIPS R3000 (and R4000) little-endian, Oct 4 1993 Draft (deprecated)
MIPS_RS3_LE = 10,
-
+ /// Old version of Sparc v9, from before the ABI (deprecated)
+ OLD_SPARCV9 = 11,
/// SPU Mark II
SPU_2 = 13,
-
- /// Hewlett-Packard PA-RISC
+ /// HPPA
PARISC = 15,
-
- /// Fujitsu VPP500
+ /// Fujitsu VPP500 (also old version of PowerPC; deprecated)
VPP500 = 17,
-
- /// Enhanced instruction set SPARC
+ /// Sun's "v8plus"
SPARC32PLUS = 18,
-
/// Intel 80960
@"960" = 19,
-
/// PowerPC
PPC = 20,
-
- /// PowerPC64
+ /// 64-bit PowerPC
PPC64 = 21,
-
- /// IBM System/390
+ /// IBM S/390
S390 = 22,
-
- /// IBM SPU/SPC
+ /// Sony/Toshiba/IBM SPU
SPU = 23,
-
- /// NEC V800
+ /// NEC V800 series
V800 = 36,
-
/// Fujitsu FR20
FR20 = 37,
-
- /// TRW RH-32
+ /// TRW RH32
RH32 = 38,
-
- /// Motorola RCE
- RCE = 39,
-
+ /// Motorola M*Core, aka RCE (also Fujitsu MMA)
+ MCORE = 39,
/// ARM
ARM = 40,
-
- /// DEC Alpha
- ALPHA = 41,
-
- /// Hitachi SH
+ /// Digital Alpha
+ OLD_ALPHA = 41,
+ /// Renesas (formerly Hitachi) / SuperH SH
SH = 42,
-
- /// SPARC V9
+ /// SPARC v9 64-bit
SPARCV9 = 43,
-
- /// Siemens TriCore
+ /// Siemens Tricore embedded processor
TRICORE = 44,
-
- /// Argonaut RISC Core
+ /// ARC Cores
ARC = 45,
-
- /// Hitachi H8/300
+ /// Renesas (formerly Hitachi) H8/300
H8_300 = 46,
-
- /// Hitachi H8/300H
+ /// Renesas (formerly Hitachi) H8/300H
H8_300H = 47,
-
- /// Hitachi H8S
+ /// Renesas (formerly Hitachi) H8S
H8S = 48,
-
- /// Hitachi H8/500
+ /// Renesas (formerly Hitachi) H8/500
H8_500 = 49,
-
- /// Intel IA-64 processor architecture
+ /// Intel IA-64 Processor
IA_64 = 50,
-
/// Stanford MIPS-X
MIPS_X = 51,
-
- /// Motorola ColdFire
+ /// Motorola Coldfire
COLDFIRE = 52,
-
/// Motorola M68HC12
@"68HC12" = 53,
-
- /// Fujitsu MMA Multimedia Accelerator
+ /// Fujitsu Multimedia Accelerator
MMA = 54,
-
/// Siemens PCP
PCP = 55,
-
/// Sony nCPU embedded RISC processor
NCPU = 56,
-
/// Denso NDR1 microprocessor
NDR1 = 57,
-
/// Motorola Star*Core processor
STARCORE = 58,
-
/// Toyota ME16 processor
ME16 = 59,
-
/// STMicroelectronics ST100 processor
ST100 = 60,
-
- /// Advanced Logic Corp. TinyJ embedded processor family
+ /// Advanced Logic Corp. TinyJ embedded processor
TINYJ = 61,
-
- /// AMD x86-64 architecture
+ /// Advanced Micro Devices X86-64 processor
X86_64 = 62,
-
/// Sony DSP Processor
PDSP = 63,
-
/// Digital Equipment Corp. PDP-10
PDP10 = 64,
-
/// Digital Equipment Corp. PDP-11
PDP11 = 65,
-
/// Siemens FX66 microcontroller
FX66 = 66,
-
/// STMicroelectronics ST9+ 8/16 bit microcontroller
ST9PLUS = 67,
-
/// STMicroelectronics ST7 8-bit microcontroller
ST7 = 68,
-
/// Motorola MC68HC16 Microcontroller
@"68HC16" = 69,
-
/// Motorola MC68HC11 Microcontroller
@"68HC11" = 70,
-
/// Motorola MC68HC08 Microcontroller
@"68HC08" = 71,
-
/// Motorola MC68HC05 Microcontroller
@"68HC05" = 72,
-
/// Silicon Graphics SVx
SVX = 73,
-
- /// STMicroelectronics ST19 8-bit microcontroller
+ /// STMicroelectronics ST19 8-bit cpu
ST19 = 74,
-
/// Digital VAX
VAX = 75,
-
/// Axis Communications 32-bit embedded processor
CRIS = 76,
-
- /// Infineon Technologies 32-bit embedded processor
+ /// Infineon Technologies 32-bit embedded cpu
JAVELIN = 77,
-
- /// Element 14 64-bit DSP Processor
+ /// Element 14 64-bit DSP processor
FIREPATH = 78,
-
- /// LSI Logic 16-bit DSP Processor
+ /// LSI Logic's 16-bit DSP processor
ZSP = 79,
-
/// Donald Knuth's educational 64-bit processor
MMIX = 80,
-
- /// Harvard University machine-independent object files
+ /// Harvard's machine-independent format
HUANY = 81,
-
/// SiTera Prism
PRISM = 82,
-
/// Atmel AVR 8-bit microcontroller
AVR = 83,
-
/// Fujitsu FR30
FR30 = 84,
-
/// Mitsubishi D10V
D10V = 85,
-
/// Mitsubishi D30V
D30V = 86,
-
- /// NEC v850
+ /// Renesas V850 (formerly NEC V850)
V850 = 87,
-
- /// Mitsubishi M32R
+ /// Renesas M32R (formerly Mitsubishi M32R)
M32R = 88,
-
/// Matsushita MN10300
MN10300 = 89,
-
/// Matsushita MN10200
MN10200 = 90,
-
/// picoJava
PJ = 91,
-
- /// OpenRISC 32-bit embedded processor
- OPENRISC = 92,
-
- /// ARC International ARCompact processor (old spelling/synonym: EM_ARC_A5)
+ /// OpenRISC 1000 32-bit embedded processor
+ OR1K = 92,
+ /// ARC International ARCompact processor
ARC_COMPACT = 93,
-
/// Tensilica Xtensa Architecture
XTENSA = 94,
-
- /// Alphamosaic VideoCore processor
+ /// Alphamosaic VideoCore processor (also old Sunplus S+core7 backend magic number)
VIDEOCORE = 95,
-
/// Thompson Multimedia General Purpose Processor
TMM_GPP = 96,
-
/// National Semiconductor 32000 series
NS32K = 97,
-
/// Tenor Network TPC processor
TPC = 98,
-
- /// Trebia SNP 1000 processor
+ /// Trebia SNP 1000 processor (also old value for picoJava; deprecated)
SNP1K = 99,
-
- /// STMicroelectronics (www.st.com) ST200
+ /// STMicroelectronics ST200 microcontroller
ST200 = 100,
-
- /// Ubicom IP2xxx microcontroller family
+ /// Ubicom IP2022 micro controller
IP2K = 101,
-
/// MAX Processor
MAX = 102,
-
- /// National Semiconductor CompactRISC microprocessor
+ /// National Semiconductor CompactRISC
CR = 103,
-
/// Fujitsu F2MC16
F2MC16 = 104,
-
- /// Texas Instruments embedded microcontroller msp430
+ /// TI msp430 micro controller
MSP430 = 105,
-
- /// Analog Devices Blackfin (DSP) processor
+ /// ADI Blackfin
BLACKFIN = 106,
-
/// S1C33 Family of Seiko Epson processors
SE_C33 = 107,
-
/// Sharp embedded microprocessor
SEP = 108,
-
/// Arca RISC Microprocessor
ARCA = 109,
-
/// Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University
UNICORE = 110,
-
/// eXcess: 16/32/64-bit configurable embedded CPU
EXCESS = 111,
-
/// Icera Semiconductor Inc. Deep Execution Processor
DXP = 112,
-
/// Altera Nios II soft-core processor
ALTERA_NIOS2 = 113,
-
- /// National Semiconductor CompactRISC CRX
+ /// National Semiconductor CRX
CRX = 114,
-
- /// Motorola XGATE embedded processor
+ /// Motorola XGATE embedded processor (also old value for National Semiconductor CompactRISC; deprecated)
XGATE = 115,
-
/// Infineon C16x/XC16x processor
C166 = 116,
-
/// Renesas M16C series microprocessors
M16C = 117,
-
/// Microchip Technology dsPIC30F Digital Signal Controller
DSPIC30F = 118,
-
/// Freescale Communication Engine RISC core
CE = 119,
-
/// Renesas M32C series microprocessors
M32C = 120,
-
/// Altium TSK3000 core
TSK3000 = 131,
-
/// Freescale RS08 embedded processor
RS08 = 132,
-
/// Analog Devices SHARC family of 32-bit DSP processors
SHARC = 133,
-
/// Cyan Technology eCOG2 microprocessor
ECOG2 = 134,
-
- /// Sunplus S+core7 RISC processor
- SCORE7 = 135,
-
+ /// Sunplus S+core (and S+core7) RISC processor
+ SCORE = 135,
/// New Japan Radio (NJR) 24-bit DSP Processor
DSP24 = 136,
-
/// Broadcom VideoCore III processor
VIDEOCORE3 = 137,
-
/// RISC processor for Lattice FPGA architecture
LATTICEMICO32 = 138,
-
/// Seiko Epson C17 family
SE_C17 = 139,
-
- /// The Texas Instruments TMS320C6000 DSP family
+ /// Texas Instruments TMS320C6000 DSP family
TI_C6000 = 140,
-
- /// The Texas Instruments TMS320C2000 DSP family
+ /// Texas Instruments TMS320C2000 DSP family
TI_C2000 = 141,
-
- /// The Texas Instruments TMS320C55x DSP family
+ /// Texas Instruments TMS320C55x DSP family
TI_C5500 = 142,
-
+ /// Texas Instruments Programmable Realtime Unit
+ TI_PRU = 144,
/// STMicroelectronics 64bit VLIW Data Signal Processor
MMDSP_PLUS = 160,
-
/// Cypress M8C microprocessor
CYPRESS_M8C = 161,
-
/// Renesas R32C series microprocessors
R32C = 162,
-
/// NXP Semiconductors TriMedia architecture family
TRIMEDIA = 163,
-
- /// Qualcomm Hexagon processor
- HEXAGON = 164,
-
+ /// QUALCOMM DSP6 Processor
+ QDSP6 = 164,
/// Intel 8051 and variants
@"8051" = 165,
-
- /// STMicroelectronics STxP7x family of configurable and extensible RISC processors
+ /// STMicroelectronics STxP7x family
STXP7X = 166,
-
/// Andes Technology compact code size embedded RISC processor family
NDS32 = 167,
-
/// Cyan Technology eCOG1X family
ECOG1X = 168,
-
/// Dallas Semiconductor MAXQ30 Core Micro-controllers
MAXQ30 = 169,
-
/// New Japan Radio (NJR) 16-bit DSP Processor
XIMO16 = 170,
-
/// M2000 Reconfigurable RISC Microprocessor
MANIK = 171,
-
/// Cray Inc. NV2 vector architecture
CRAYNV2 = 172,
-
/// Renesas RX family
RX = 173,
-
- /// Imagination Technologies META processor architecture
+ /// Imagination Technologies Meta processor architecture
METAG = 174,
-
/// MCST Elbrus general purpose hardware architecture
MCST_ELBRUS = 175,
-
/// Cyan Technology eCOG16 family
ECOG16 = 176,
-
- /// National Semiconductor CompactRISC CR16 16-bit microprocessor
+ /// National Semiconductor CompactRISC 16-bit processor
CR16 = 177,
-
/// Freescale Extended Time Processing Unit
ETPU = 178,
-
/// Infineon Technologies SLE9X core
SLE9X = 179,
-
/// Intel L10M
L10M = 180,
-
/// Intel K10M
K10M = 181,
-
- /// ARM AArch64
+ /// ARM 64-bit architecture
AARCH64 = 183,
-
/// Atmel Corporation 32-bit microprocessor family
AVR32 = 185,
-
/// STMicroeletronics STM8 8-bit microcontroller
STM8 = 186,
-
/// Tilera TILE64 multicore architecture family
TILE64 = 187,
-
/// Tilera TILEPro multicore architecture family
TILEPRO = 188,
-
- /// Xilinx MicroBlaze
+ /// Xilinx MicroBlaze 32-bit RISC soft processor core
MICROBLAZE = 189,
-
/// NVIDIA CUDA architecture
CUDA = 190,
-
/// Tilera TILE-Gx multicore architecture family
TILEGX = 191,
-
/// CloudShield architecture family
CLOUDSHIELD = 192,
-
/// KIPO-KAIST Core-A 1st generation processor family
COREA_1ST = 193,
-
/// KIPO-KAIST Core-A 2nd generation processor family
COREA_2ND = 194,
-
/// Synopsys ARCompact V2
ARC_COMPACT2 = 195,
-
/// Open8 8-bit RISC soft processor core
OPEN8 = 196,
-
/// Renesas RL78 family
RL78 = 197,
-
/// Broadcom VideoCore V processor
VIDEOCORE5 = 198,
-
- /// Renesas 78KOR family
- @"78KOR" = 199,
-
+ /// Renesas 78K0R
+ @"78K0R" = 199,
/// Freescale 56800EX Digital Signal Controller (DSC)
@"56800EX" = 200,
-
/// Beyond BA1 CPU architecture
BA1 = 201,
-
/// Beyond BA2 CPU architecture
BA2 = 202,
-
/// XMOS xCORE processor family
XCORE = 203,
-
/// Microchip 8-bit PIC(r) family
MCHP_PIC = 204,
-
- /// Reserved by Intel
- INTEL205 = 205,
-
- /// Reserved by Intel
- INTEL206 = 206,
-
- /// Reserved by Intel
- INTEL207 = 207,
-
- /// Reserved by Intel
- INTEL208 = 208,
-
- /// Reserved by Intel
- INTEL209 = 209,
-
+ /// Intel Graphics Technology
+ INTELGT = 205,
/// KM211 KM32 32-bit processor
KM32 = 210,
-
/// KM211 KMX32 32-bit processor
KMX32 = 211,
-
/// KM211 KMX16 16-bit processor
KMX16 = 212,
-
/// KM211 KMX8 8-bit processor
KMX8 = 213,
-
/// KM211 KVARC processor
KVARC = 214,
-
/// Paneve CDP architecture family
CDP = 215,
-
/// Cognitive Smart Memory Processor
COGE = 216,
-
- /// iCelero CoolEngine
+ /// Bluechip Systems CoolEngine
COOL = 217,
-
/// Nanoradio Optimized RISC
NORC = 218,
-
/// CSR Kalimba architecture family
CSR_KALIMBA = 219,
-
+ /// Zilog Z80
+ Z80 = 220,
+ /// Controls and Data Services VISIUMcore processor
+ VISIUM = 221,
+ /// FTDI Chip FT32 high performance 32-bit RISC architecture
+ FT32 = 222,
+ /// Moxie processor family
+ MOXIE = 223,
/// AMD GPU architecture
AMDGPU = 224,
-
/// RISC-V
RISCV = 243,
-
/// Lanai 32-bit processor
LANAI = 244,
-
- /// Linux kernel bpf virtual machine
+ /// CEVA Processor Architecture Family
+ CEVA = 245,
+ /// CEVA X2 Processor Family
+ CEVA_X2 = 246,
+ /// Linux BPF - in-kernel virtual machine
BPF = 247,
-
- /// C-SKY
+ /// Graphcore Intelligent Processing Unit
+ GRAPHCORE_IPU = 248,
+ /// Imagination Technologies
+ IMG1 = 249,
+ /// Netronome Flow Processor
+ NFP = 250,
+ /// NEC Vector Engine
+ VE = 251,
+ /// C-SKY processor family
CSKY = 252,
-
+ /// Synopsys ARCv2.3 64-bit
+ ARC_COMPACT3_64 = 253,
+ /// MOS Technology MCS 6502 processor
+ MCS6502 = 254,
+ /// Synopsys ARCv2.3 32-bit
+ ARC_COMPACT3 = 255,
+ /// Kalray VLIW core of the MPPA processor family
+ KVX = 256,
+ /// WDC 65816/65C816
+ @"65816" = 257,
/// LoongArch
LOONGARCH = 258,
-
- /// Fujitsu FR-V
- FRV = 0x5441,
+ /// ChipON KungFu32
+ KF32 = 259,
+ /// LAPIS nX-U16/U8
+ U16_U8CORE = 260,
+ /// Tachyum
+ TACHYUM = 261,
+ /// NXP 56800EF Digital Signal Controller (DSC)
+ @"56800EF" = 262,
+ /// AVR
+ AVR_OLD = 0x1057,
+ /// MSP430
+ MSP430_OLD = 0x1059,
+ /// Morpho MT
+ MT = 0x2530,
+ /// FR30
+ CYGNUS_FR30 = 0x3330,
+ /// WebAssembly (as used by LLVM)
+ WEBASSEMBLY = 0x4157,
+ /// Infineon Technologies 16-bit microcontroller with C166-V2 core
+ XC16X = 0x4688,
+ /// Freescale S12Z
+ S12Z = 0x4def,
+ /// DLX
+ DLX = 0x5aa5,
+ /// FRV
+ CYGNUS_FRV = 0x5441,
+ /// D10V
+ CYGNUS_D10V = 0x7650,
+ /// D30V
+ CYGNUS_D30V = 0x7676,
+ /// Ubicom IP2xxx
+ IP2K_OLD = 0x8217,
+ /// Cygnus PowerPC ELF
+ CYGNUS_POWERPC = 0x9025,
+ /// Alpha
+ ALPHA = 0x9026,
+ /// Cygnus M32R ELF
+ CYGNUS_M32R = 0x9041,
+ /// V850
+ CYGNUS_V850 = 0x9080,
+ /// Old S/390
+ S390_OLD = 0xa390,
+ /// Old unofficial value for Xtensa
+ XTENSA_OLD = 0xabc7,
+ /// Xstormy16
+ XSTORMY16 = 0xad45,
+ /// MN10300
+ CYGNUS_MN10300 = 0xbeef,
+ /// MN10200
+ CYGNUS_MN10200 = 0xdead,
+ /// Renesas M32C and M16C
+ M32C_OLD = 0xfeb0,
+ /// Vitesse IQ2000
+ IQ2000 = 0xfeba,
+ /// NIOS
+ NIOS32 = 0xfebb,
+ /// Toshiba MeP
+ CYGNUS_MEP = 0xf00d,
+ /// Old unofficial value for Moxie
+ MOXIE_OLD = 0xfeed,
+ /// Old MicroBlaze
+ MICROBLAZE_OLD = 0xbaab,
+ /// Adapteva's Epiphany architecture
+ ADAPTEVA_EPIPHANY = 0x1223,
_,
};
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 7f323248cf..dad39025b6 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -950,13 +950,13 @@ fn formatSizeImpl(comptime base: comptime_int) type {
}
};
}
-
const formatSizeDec = formatSizeImpl(1000).formatSizeImpl;
const formatSizeBin = formatSizeImpl(1024).formatSizeImpl;
/// Return a Formatter for a u64 value representing a file size.
/// This formatter represents the number as multiple of 1000 and uses the SI
/// measurement units (kB, MB, GB, ...).
+/// Format option `precision` is ignored when `value` is less than 1kB
pub fn fmtIntSizeDec(value: u64) std.fmt.Formatter(formatSizeDec) {
return .{ .data = value };
}
@@ -964,6 +964,7 @@ pub fn fmtIntSizeDec(value: u64) std.fmt.Formatter(formatSizeDec) {
/// Return a Formatter for a u64 value representing a file size.
/// This formatter represents the number as multiple of 1024 and uses the IEC
/// measurement units (KiB, MiB, GiB, ...).
+/// Format option `precision` is ignored when `value` is less than 1KiB
pub fn fmtIntSizeBin(value: u64) std.fmt.Formatter(formatSizeBin) {
return .{ .data = value };
}
diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig
index 1c8a5d78af..a812760172 100644
--- a/lib/std/hash_map.zig
+++ b/lib/std/hash_map.zig
@@ -11,11 +11,11 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
comptime {
assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
if (K == []const u8) {
- @compileError("std.auto_hash.autoHash does not allow slices here (" ++
+ @compileError("std.hash.autoHash does not allow slices here (" ++
@typeName(K) ++
") because the intent is unclear. " ++
"Consider using std.StringHashMap for hashing the contents of []const u8. " ++
- "Alternatively, consider using std.auto_hash.hash or providing your own hash function instead.");
+ "Alternatively, consider using std.hash.autoHashStrat or providing your own hash function instead.");
}
}
@@ -721,6 +721,8 @@ pub fn HashMap(
/// the price of handling size with u32, which should be reasonable enough
/// for almost all uses.
/// Deletions are achieved with tombstones.
+///
+/// Default initialization of this struct is deprecated; use `.empty` instead.
pub fn HashMapUnmanaged(
comptime K: type,
comptime V: type,
@@ -762,6 +764,13 @@ pub fn HashMapUnmanaged(
/// Capacity of the first grow when bootstrapping the hashmap.
const minimal_capacity = 8;
+ /// A map containing no keys or values.
+ pub const empty: Self = .{
+ .metadata = null,
+ .size = 0,
+ .available = 0,
+ };
+
// This hashmap is specially designed for sizes that fit in a u32.
pub const Size = u32;
diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig
index 61e256d290..b760c9d85d 100644
--- a/lib/std/heap/general_purpose_allocator.zig
+++ b/lib/std/heap/general_purpose_allocator.zig
@@ -157,6 +157,7 @@ pub const Config = struct {
pub const Check = enum { ok, leak };
+/// Default initialization of this struct is deprecated; use `.init` instead.
pub fn GeneralPurposeAllocator(comptime config: Config) type {
return struct {
backing_allocator: Allocator = std.heap.page_allocator,
@@ -174,6 +175,16 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
const Self = @This();
+ /// The initial state of a `GeneralPurposeAllocator`, containing no allocations and backed by the system page allocator.
+ pub const init: Self = .{
+ .backing_allocator = std.heap.page_allocator,
+ .buckets = [1]Buckets{.{}} ** small_bucket_count,
+ .cur_buckets = [1]?*BucketHeader{null} ** small_bucket_count,
+ .large_allocations = .{},
+ .empty_buckets = if (config.retain_metadata) .{} else {},
+ .bucket_node_pool = .init(std.heap.page_allocator),
+ };
+
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
diff --git a/lib/std/math.zig b/lib/std/math.zig
index 67782bf93b..1e7858aaa9 100644
--- a/lib/std/math.zig
+++ b/lib/std/math.zig
@@ -1720,20 +1720,20 @@ pub fn comptimeMod(num: anytype, comptime denom: comptime_int) IntFittingRange(0
pub const F80 = struct {
fraction: u64,
exp: u16,
-};
-pub fn make_f80(repr: F80) f80 {
- const int = (@as(u80, repr.exp) << 64) | repr.fraction;
- return @as(f80, @bitCast(int));
-}
+ pub fn toFloat(self: F80) f80 {
+ const int = (@as(u80, self.exp) << 64) | self.fraction;
+ return @as(f80, @bitCast(int));
+ }
-pub fn break_f80(x: f80) F80 {
- const int = @as(u80, @bitCast(x));
- return .{
- .fraction = @as(u64, @truncate(int)),
- .exp = @as(u16, @truncate(int >> 64)),
- };
-}
+ pub fn fromFloat(x: f80) F80 {
+ const int = @as(u80, @bitCast(x));
+ return .{
+ .fraction = @as(u64, @truncate(int)),
+ .exp = @as(u16, @truncate(int >> 64)),
+ };
+ }
+};
/// Returns -1, 0, or 1.
/// Supports integer and float types and vectors of integer and float types.
diff --git a/lib/std/math/nextafter.zig b/lib/std/math/nextafter.zig
index 12418b5a1a..399be6cd11 100644
--- a/lib/std/math/nextafter.zig
+++ b/lib/std/math/nextafter.zig
@@ -61,7 +61,7 @@ fn nextAfterFloat(comptime T: type, x: T, y: T) T {
const integer_bit_mask = 1 << math.floatFractionalBits(f80);
const exponent_bits_mask = (1 << math.floatExponentBits(f80)) - 1;
- var x_parts = math.break_f80(x);
+ var x_parts = math.F80.fromFloat(x);
// Bitwise increment/decrement the fractional part while also taking care to update the
// exponent if we overflow the fractional part. This might flip the integer bit; this is
@@ -88,7 +88,7 @@ fn nextAfterFloat(comptime T: type, x: T, y: T) T {
// set to cleared (if the old value was normal) or remained cleared (if the old value was
// subnormal), both of which are the outcomes we want.
- return math.make_f80(x_parts);
+ return x_parts.toFloat();
} else {
const Bits = std.meta.Int(.unsigned, @bitSizeOf(T));
var x_bits: Bits = @bitCast(x);
diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig
index aa1ea2498f..44af61048d 100644
--- a/lib/std/zig/AstGen.zig
+++ b/lib/std/zig/AstGen.zig
@@ -1025,7 +1025,18 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE
const statements = tree.extra_data[node_datas[node].lhs..node_datas[node].rhs];
return blockExpr(gz, scope, ri, node, statements, .normal);
},
- .enum_literal => return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal),
+ .enum_literal => if (try ri.rl.resultType(gz, node)) |res_ty| {
+ const str_index = try astgen.identAsString(main_tokens[node]);
+ const res = try gz.addPlNode(.decl_literal, node, Zir.Inst.Field{
+ .lhs = res_ty,
+ .field_name_start = str_index,
+ });
+ switch (ri.rl) {
+ .discard, .none, .ref => unreachable, // no result type
+ .ty, .coerced_ty => return res, // `decl_literal` does the coercion for us
+ .ref_coerced_ty, .ptr, .inferred_ptr, .destructure => return rvalue(gz, ri, res, node),
+ }
+ } else return simpleStrTok(gz, ri, main_tokens[node], node, .enum_literal),
.error_value => return simpleStrTok(gz, ri, node_datas[node].rhs, node, .error_value),
// TODO restore this when implementing https://github.com/ziglang/zig/issues/6025
// .anyframe_literal => return rvalue(gz, ri, .anyframe_type, node),
@@ -2783,6 +2794,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.err_union_code_ptr,
.ptr_type,
.enum_literal,
+ .decl_literal,
+ .decl_literal_no_coerce,
.merge_error_sets,
.error_union_type,
.bit_not,
@@ -2946,6 +2959,8 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.validate_array_init_result_ty,
.validate_ptr_array_init,
.validate_ref_ty,
+ .try_operand_ty,
+ .try_ref_operand_ty,
=> break :b true,
.@"defer" => unreachable,
@@ -5919,13 +5934,21 @@ fn tryExpr(
}
const try_lc = LineColumn{ astgen.source_line - parent_gz.decl_line, astgen.source_column };
- const operand_ri: ResultInfo = switch (ri.rl) {
- .ref, .ref_coerced_ty => .{ .rl = .ref, .ctx = .error_handling_expr },
- else => .{ .rl = .none, .ctx = .error_handling_expr },
+ const operand_rl: ResultInfo.Loc, const block_tag: Zir.Inst.Tag = switch (ri.rl) {
+ .ref => .{ .ref, .try_ptr },
+ .ref_coerced_ty => |payload_ptr_ty| .{
+ .{ .ref_coerced_ty = try parent_gz.addUnNode(.try_ref_operand_ty, payload_ptr_ty, node) },
+ .try_ptr,
+ },
+ else => if (try ri.rl.resultType(parent_gz, node)) |payload_ty| .{
+ // `coerced_ty` is OK due to the `rvalue` call below
+ .{ .coerced_ty = try parent_gz.addUnNode(.try_operand_ty, payload_ty, node) },
+ .@"try",
+ } else .{ .none, .@"try" },
};
+ const operand_ri: ResultInfo = .{ .rl = operand_rl, .ctx = .error_handling_expr };
// This could be a pointer or value depending on the `ri` parameter.
const operand = try reachableExpr(parent_gz, scope, operand_ri, operand_node, node);
- const block_tag: Zir.Inst.Tag = if (operand_ri.rl == .ref) .try_ptr else .@"try";
const try_inst = try parent_gz.makeBlockInst(block_tag, node);
try parent_gz.instructions.append(astgen.gpa, try_inst);
@@ -9981,7 +10004,7 @@ fn callExpr(
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
- const callee = try calleeExpr(gz, scope, call.ast.fn_expr);
+ const callee = try calleeExpr(gz, scope, ri.rl, call.ast.fn_expr);
const modifier: std.builtin.CallModifier = blk: {
if (gz.is_comptime) {
break :blk .compile_time;
@@ -10109,6 +10132,7 @@ const Callee = union(enum) {
fn calleeExpr(
gz: *GenZir,
scope: *Scope,
+ call_rl: ResultInfo.Loc,
node: Ast.Node.Index,
) InnerError!Callee {
const astgen = gz.astgen;
@@ -10135,6 +10159,19 @@ fn calleeExpr(
.field_name_start = str_index,
} };
},
+ .enum_literal => if (try call_rl.resultType(gz, node)) |res_ty| {
+ // Decl literal call syntax, e.g.
+ // `const foo: T = .init();`
+ // Look up `init` in `T`, but don't try and coerce it.
+ const str_index = try astgen.identAsString(tree.nodes.items(.main_token)[node]);
+ const callee = try gz.addPlNode(.decl_literal_no_coerce, node, Zir.Inst.Field{
+ .lhs = res_ty,
+ .field_name_start = str_index,
+ });
+ return .{ .direct = callee };
+ } else {
+ return .{ .direct = try expr(gz, scope, .{ .rl = .none }, node) };
+ },
else => return .{ .direct = try expr(gz, scope, .{ .rl = .none }, node) },
}
}
diff --git a/lib/std/zig/BuiltinFn.zig b/lib/std/zig/BuiltinFn.zig
index 1da3ffb5a7..95c6c7be12 100644
--- a/lib/std/zig/BuiltinFn.zig
+++ b/lib/std/zig/BuiltinFn.zig
@@ -482,7 +482,7 @@ pub const list = list: {
"@errorCast",
.{
.tag = .error_cast,
- .eval_to_error = .always,
+ .eval_to_error = .maybe,
.param_count = 1,
},
},
diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig
index 0186b45f74..a918bb769c 100644
--- a/lib/std/zig/Zir.zig
+++ b/lib/std/zig/Zir.zig
@@ -654,6 +654,14 @@ pub const Inst = struct {
err_union_code_ptr,
/// An enum literal. Uses the `str_tok` union field.
enum_literal,
+ /// A decl literal. This is similar to `field`, but unwraps error unions and optionals,
+ /// and coerces the result to the given type.
+ /// Uses the `pl_node` union field. Payload is `Field`.
+ decl_literal,
+ /// The same as `decl_literal`, but the coercion is omitted. This is used for decl literal
+ /// function call syntax, i.e. `.foo()`.
+ /// Uses the `pl_node` union field. Payload is `Field`.
+ decl_literal_no_coerce,
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`.
switch_block,
@@ -687,6 +695,14 @@ pub const Inst = struct {
/// operator. Emit a compile error if not.
/// Uses the `un_tok` union field. Token is the `&` operator. Operand is the type.
validate_ref_ty,
+ /// Given a type `T`, construct the type `E!T`, where `E` is this function's error set, to be used
+ /// as the result type of a `try` operand. Generic poison is propagated.
+ /// Uses the `un_node` union field. Node is the `try` expression. Operand is the type `T`.
+ try_operand_ty,
+ /// Given a type `*T`, construct the type `*E!T`, where `E` is this function's error set, to be used
+ /// as the result type of a `try` operand whose address is taken with `&`. Generic poison is propagated.
+ /// Uses the `un_node` union field. Node is the `try` expression. Operand is the type `*T`.
+ try_ref_operand_ty,
// The following tags all relate to struct initialization expressions.
@@ -1139,6 +1155,8 @@ pub const Inst = struct {
.err_union_code_ptr,
.ptr_type,
.enum_literal,
+ .decl_literal,
+ .decl_literal_no_coerce,
.merge_error_sets,
.error_union_type,
.bit_not,
@@ -1257,6 +1275,8 @@ pub const Inst = struct {
.array_init_elem_type,
.array_init_elem_ptr,
.validate_ref_ty,
+ .try_operand_ty,
+ .try_ref_operand_ty,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
=> false,
@@ -1328,6 +1348,8 @@ pub const Inst = struct {
.validate_array_init_result_ty,
.validate_ptr_array_init,
.validate_ref_ty,
+ .try_operand_ty,
+ .try_ref_operand_ty,
=> true,
.param,
@@ -1434,6 +1456,8 @@ pub const Inst = struct {
.err_union_code_ptr,
.ptr_type,
.enum_literal,
+ .decl_literal,
+ .decl_literal_no_coerce,
.merge_error_sets,
.error_union_type,
.bit_not,
@@ -1691,6 +1715,8 @@ pub const Inst = struct {
.err_union_code = .un_node,
.err_union_code_ptr = .un_node,
.enum_literal = .str_tok,
+ .decl_literal = .pl_node,
+ .decl_literal_no_coerce = .pl_node,
.switch_block = .pl_node,
.switch_block_ref = .pl_node,
.switch_block_err_union = .pl_node,
@@ -1704,6 +1730,8 @@ pub const Inst = struct {
.opt_eu_base_ptr_init = .un_node,
.coerce_ptr_elem_ty = .pl_node,
.validate_ref_ty = .un_tok,
+ .try_operand_ty = .un_node,
+ .try_ref_operand_ty = .un_node,
.int_from_ptr = .un_node,
.compile_error = .un_node,
@@ -3840,12 +3868,16 @@ fn findDeclsInner(
.err_union_code,
.err_union_code_ptr,
.enum_literal,
+ .decl_literal,
+ .decl_literal_no_coerce,
.validate_deref,
.validate_destructure,
.field_type_ref,
.opt_eu_base_ptr_init,
.coerce_ptr_elem_ty,
.validate_ref_ty,
+ .try_operand_ty,
+ .try_ref_operand_ty,
.struct_init_empty,
.struct_init_empty_result,
.struct_init_empty_ref_result,
diff --git a/src/Sema.zig b/src/Sema.zig
index 57c33324f5..7ab60adbf2 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -1089,6 +1089,8 @@ fn analyzeBodyInner(
.indexable_ptr_elem_type => try sema.zirIndexablePtrElemType(block, inst),
.vector_elem_type => try sema.zirVectorElemType(block, inst),
.enum_literal => try sema.zirEnumLiteral(block, inst),
+ .decl_literal => try sema.zirDeclLiteral(block, inst, true),
+ .decl_literal_no_coerce => try sema.zirDeclLiteral(block, inst, false),
.int_from_enum => try sema.zirIntFromEnum(block, inst),
.enum_from_int => try sema.zirEnumFromInt(block, inst),
.err_union_code => try sema.zirErrUnionCode(block, inst),
@@ -1194,6 +1196,8 @@ fn analyzeBodyInner(
.validate_array_init_ref_ty => try sema.zirValidateArrayInitRefTy(block, inst),
.opt_eu_base_ptr_init => try sema.zirOptEuBasePtrInit(block, inst),
.coerce_ptr_elem_ty => try sema.zirCoercePtrElemTy(block, inst),
+ .try_operand_ty => try sema.zirTryOperandTy(block, inst, false),
+ .try_ref_operand_ty => try sema.zirTryOperandTy(block, inst, true),
.clz => try sema.zirBitCount(block, inst, .clz, Value.clz),
.ctz => try sema.zirBitCount(block, inst, .ctz, Value.ctz),
@@ -2050,6 +2054,22 @@ fn genericPoisonReason(sema: *Sema, block: *Block, ref: Zir.Inst.Ref) GenericPoi
const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
cur = un_node.operand;
},
+ .try_operand_ty => {
+ // Either the input type was itself poison, or it was a slice, which we cannot translate
+ // to an overall result type.
+ const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
+ const operand_ref = sema.resolveInst(un_node.operand) catch |err| switch (err) {
+ error.GenericPoison => unreachable, // this is a type, not a value
+ };
+ if (operand_ref == .generic_poison_type) {
+ // The input was poison -- keep looking.
+ cur = un_node.operand;
+ continue;
+ }
+ // We got a poison because the result type was a slice. This is a tricky case -- let's just
+ // not bother explaining it to the user for now...
+ return .unknown;
+ },
.struct_init_field_type => {
const pl_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
const extra = sema.code.extraData(Zir.Inst.FieldType, pl_node.payload_index).data;
@@ -4449,6 +4469,59 @@ fn zirCoercePtrElemTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
}
+fn zirTryOperandTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
+ const src = block.nodeOffset(un_node.src_node);
+
+ const operand_ty = sema.resolveType(block, src, un_node.operand) catch |err| switch (err) {
+ error.GenericPoison => return .generic_poison_type,
+ else => |e| return e,
+ };
+
+ const payload_ty = if (is_ref) ty: {
+ if (!operand_ty.isSinglePointer(zcu)) {
+ return .generic_poison_type; // we can't get a meaningful result type here, since it will be `*E![n]T`, and we don't know `n`.
+ }
+ break :ty operand_ty.childType(zcu);
+ } else operand_ty;
+
+ const err_set_ty = err_set: {
+ // There are awkward cases, like `?E`. Our strategy is to repeatedly unwrap optionals
+ // until we hit an error union or set.
+ var cur_ty = sema.fn_ret_ty;
+ while (true) {
+ switch (cur_ty.zigTypeTag(zcu)) {
+ .error_set => break :err_set cur_ty,
+ .error_union => break :err_set cur_ty.errorUnionSet(zcu),
+ .optional => cur_ty = cur_ty.optionalChild(zcu),
+ else => return sema.failWithOwnedErrorMsg(block, msg: {
+ const msg = try sema.errMsg(src, "expected '{}', found error set", .{sema.fn_ret_ty.fmt(pt)});
+ errdefer msg.destroy(sema.gpa);
+ const ret_ty_src: LazySrcLoc = .{
+ .base_node_inst = sema.getOwnerFuncDeclInst(),
+ .offset = .{ .node_offset_fn_type_ret_ty = 0 },
+ };
+ try sema.errNote(ret_ty_src, msg, "function cannot return an error", .{});
+ break :msg msg;
+ }),
+ }
+ }
+ };
+
+ const eu_ty = try pt.errorUnionType(err_set_ty, payload_ty);
+
+ if (is_ref) {
+ var ptr_info = operand_ty.ptrInfo(zcu);
+ ptr_info.child = eu_ty.toIntern();
+ const eu_ptr_ty = try pt.ptrTypeSema(ptr_info);
+ return Air.internedToRef(eu_ptr_ty.toIntern());
+ } else {
+ return Air.internedToRef(eu_ty.toIntern());
+ }
+}
+
fn zirValidateRefTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
@@ -8892,6 +8965,54 @@ fn zirEnumLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
})));
}
+fn zirDeclLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index, do_coerce: bool) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const pt = sema.pt;
+ const zcu = pt.zcu;
+ const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node;
+ const src = block.nodeOffset(inst_data.src_node);
+ const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
+ const name = try zcu.intern_pool.getOrPutString(
+ sema.gpa,
+ pt.tid,
+ sema.code.nullTerminatedString(extra.field_name_start),
+ .no_embedded_nulls,
+ );
+ const orig_ty = sema.resolveType(block, src, extra.lhs) catch |err| switch (err) {
+ error.GenericPoison => {
+ // Treat this as a normal enum literal.
+ return Air.internedToRef(try pt.intern(.{ .enum_literal = name }));
+ },
+ else => |e| return e,
+ };
+
+ var ty = orig_ty;
+ while (true) switch (ty.zigTypeTag(zcu)) {
+ .error_union => ty = ty.errorUnionPayload(zcu),
+ .optional => ty = ty.optionalChild(zcu),
+ .enum_literal, .error_set => {
+ // Treat this as a normal enum literal.
+ return Air.internedToRef(try pt.intern(.{ .enum_literal = name }));
+ },
+ else => break,
+ };
+
+ const result = try sema.fieldVal(block, src, Air.internedToRef(ty.toIntern()), name, src);
+
+ // Decl literals cannot lookup runtime `var`s.
+ if (!try sema.isComptimeKnown(result)) {
+ return sema.fail(block, src, "decl literal must be comptime-known", .{});
+ }
+
+ if (do_coerce) {
+ return sema.coerce(block, orig_ty, result, src);
+ } else {
+ return result;
+ }
+}
+
fn zirIntFromEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig
index a30f764eb8..1207eed88d 100644
--- a/src/arch/riscv64/CodeGen.zig
+++ b/src/arch/riscv64/CodeGen.zig
@@ -3483,9 +3483,48 @@ fn airUnwrapErrPayloadPtr(func: *Func, inst: Air.Inst.Index) !void {
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+// *(E!T) => *T
fn airErrUnionPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void {
const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .errunion_payload_ptr_set for {}", .{func.target.cpu.arch});
+ const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: {
+ const zcu = func.pt.zcu;
+ const src_ty = func.typeOf(ty_op.operand);
+ const src_mcv = try func.resolveInst(ty_op.operand);
+
+ // `src_reg` contains the pointer to the error union
+ const src_reg = switch (src_mcv) {
+ .register => |reg| reg,
+ else => try func.copyToTmpRegister(src_ty, src_mcv),
+ };
+ const src_lock = func.register_manager.lockRegAssumeUnused(src_reg);
+ defer func.register_manager.unlockReg(src_lock);
+
+ // we set the place of where the error would have been to 0
+ const eu_ty = src_ty.childType(zcu);
+ const pl_ty = eu_ty.errorUnionPayload(zcu);
+ const err_ty = eu_ty.errorUnionSet(zcu);
+ const err_off: i32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
+ try func.genSetMem(.{ .reg = src_reg }, err_off, err_ty, .{ .immediate = 0 });
+
+ const dst_reg, const dst_lock = if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ .{ src_reg, null }
+ else
+ try func.allocReg(.int);
+ defer if (dst_lock) |lock| func.register_manager.unlockReg(lock);
+
+ // move the pointer to be at the payload
+ const pl_off = errUnionPayloadOffset(pl_ty, zcu);
+ try func.genBinOp(
+ .add,
+ .{ .register = src_reg },
+ Type.u64,
+ .{ .immediate = pl_off },
+ Type.u64,
+ dst_reg,
+ );
+
+ break :result .{ .register = dst_reg };
+ };
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index e5e43bbbf8..a46d875b34 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -898,58 +898,7 @@ pub const Object = struct {
.{ .optimized = comp.root_mod.optimize_mode != .Debug },
);
- const i32_2 = try builder.intConst(.i32, 2);
- const i32_3 = try builder.intConst(.i32, 3);
- const debug_info_version = try builder.debugModuleFlag(
- try builder.metadataConstant(i32_2),
- try builder.metadataString("Debug Info Version"),
- try builder.metadataConstant(i32_3),
- );
-
- switch (comp.config.debug_format) {
- .strip => unreachable,
- .dwarf => |f| {
- const i32_4 = try builder.intConst(.i32, 4);
- const dwarf_version = try builder.debugModuleFlag(
- try builder.metadataConstant(i32_2),
- try builder.metadataString("Dwarf Version"),
- try builder.metadataConstant(i32_4),
- );
- switch (f) {
- .@"32" => {
- try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
- debug_info_version,
- dwarf_version,
- });
- },
- .@"64" => {
- const dwarf64 = try builder.debugModuleFlag(
- try builder.metadataConstant(i32_2),
- try builder.metadataString("DWARF64"),
- try builder.metadataConstant(.@"1"),
- );
- try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
- debug_info_version,
- dwarf_version,
- dwarf64,
- });
- },
- }
- },
- .code_view => {
- const code_view = try builder.debugModuleFlag(
- try builder.metadataConstant(i32_2),
- try builder.metadataString("CodeView"),
- try builder.metadataConstant(.@"1"),
- );
- try builder.debugNamed(try builder.metadataString("llvm.module.flags"), &.{
- debug_info_version,
- code_view,
- });
- },
- }
-
- try builder.debugNamed(try builder.metadataString("llvm.dbg.cu"), &.{debug_compile_unit});
+ try builder.metadataNamed(try builder.metadataString("llvm.dbg.cu"), &.{debug_compile_unit});
break :debug_info .{ debug_compile_unit, debug_enums_fwd_ref, debug_globals_fwd_ref };
} else .{.none} ** 3;
@@ -1149,6 +1098,84 @@ pub const Object = struct {
}
}
+ {
+ var module_flags = try std.ArrayList(Builder.Metadata).initCapacity(o.gpa, 6);
+ defer module_flags.deinit();
+
+ const behavior_error = try o.builder.metadataConstant(try o.builder.intConst(.i32, 1));
+ const behavior_warning = try o.builder.metadataConstant(try o.builder.intConst(.i32, 2));
+ const behavior_max = try o.builder.metadataConstant(try o.builder.intConst(.i32, 7));
+ const behavior_min = try o.builder.metadataConstant(try o.builder.intConst(.i32, 8));
+
+ const pic_level = target_util.picLevel(comp.root_mod.resolved_target.result);
+ if (comp.root_mod.pic) {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_min,
+ try o.builder.metadataString("PIC Level"),
+ try o.builder.metadataConstant(try o.builder.intConst(.i32, pic_level)),
+ ));
+ }
+
+ if (comp.config.pie) {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_max,
+ try o.builder.metadataString("PIE Level"),
+ try o.builder.metadataConstant(try o.builder.intConst(.i32, pic_level)),
+ ));
+ }
+
+ if (comp.root_mod.code_model != .default) {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_error,
+ try o.builder.metadataString("Code Model"),
+ try o.builder.metadataConstant(try o.builder.intConst(.i32, @as(i32, switch (comp.root_mod.code_model) {
+ .tiny => 0,
+ .small => 1,
+ .kernel => 2,
+ .medium => 3,
+ .large => 4,
+ else => unreachable,
+ }))),
+ ));
+ }
+
+ if (!o.builder.strip) {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_warning,
+ try o.builder.metadataString("Debug Info Version"),
+ try o.builder.metadataConstant(try o.builder.intConst(.i32, 3)),
+ ));
+
+ switch (comp.config.debug_format) {
+ .strip => unreachable,
+ .dwarf => |f| {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_max,
+ try o.builder.metadataString("Dwarf Version"),
+ try o.builder.metadataConstant(try o.builder.intConst(.i32, 4)),
+ ));
+
+ if (f == .@"64") {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_max,
+ try o.builder.metadataString("DWARF64"),
+ try o.builder.metadataConstant(.@"1"),
+ ));
+ }
+ },
+ .code_view => {
+ module_flags.appendAssumeCapacity(try o.builder.metadataModuleFlag(
+ behavior_warning,
+ try o.builder.metadataString("CodeView"),
+ try o.builder.metadataConstant(.@"1"),
+ ));
+ },
+ }
+ }
+
+ try o.builder.metadataNamed(try o.builder.metadataString("llvm.module.flags"), module_flags.items);
+ }
+
const target_triple_sentinel =
try o.gpa.dupeZ(u8, o.builder.target_triple.slice(&o.builder).?);
defer o.gpa.free(target_triple_sentinel);
@@ -1235,14 +1262,13 @@ pub const Object = struct {
}
const optimize_mode = comp.root_mod.optimize_mode;
- const pic = comp.root_mod.pic;
const opt_level: llvm.CodeGenOptLevel = if (optimize_mode == .Debug)
.None
else
.Aggressive;
- const reloc_mode: llvm.RelocMode = if (pic)
+ const reloc_mode: llvm.RelocMode = if (comp.root_mod.pic)
.PIC
else if (comp.config.link_mode == .dynamic)
llvm.RelocMode.DynamicNoPIC
@@ -1276,13 +1302,6 @@ pub const Object = struct {
);
errdefer target_machine.dispose();
- const large_pic = target_util.usesLargePIC(comp.root_mod.resolved_target.result);
-
- if (pic) module.setModulePICLevel(large_pic);
- if (comp.config.pie) module.setModulePIELevel(large_pic);
-
- if (code_model != .Default) module.setModuleCodeModel(code_model);
-
if (comp.llvm_opt_bisect_limit >= 0) {
context.setOptBisectLimit(comp.llvm_opt_bisect_limit);
}
diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig
index d663f21a21..f6bfcab1ad 100644
--- a/src/codegen/llvm/Builder.zig
+++ b/src/codegen/llvm/Builder.zig
@@ -11952,10 +11952,10 @@ pub fn trailingMetadataStringAssumeCapacity(self: *Builder) MetadataString {
return @enumFromInt(gop.index);
}
-pub fn debugNamed(self: *Builder, name: MetadataString, operands: []const Metadata) Allocator.Error!void {
+pub fn metadataNamed(self: *Builder, name: MetadataString, operands: []const Metadata) Allocator.Error!void {
try self.metadata_extra.ensureUnusedCapacity(self.gpa, operands.len);
try self.metadata_named.ensureUnusedCapacity(self.gpa, 1);
- self.debugNamedAssumeCapacity(name, operands);
+ self.metadataNamedAssumeCapacity(name, operands);
}
fn metadataNone(self: *Builder) Allocator.Error!Metadata {
@@ -12266,14 +12266,14 @@ pub fn strTuple(
return self.strTupleAssumeCapacity(str, elements);
}
-pub fn debugModuleFlag(
+pub fn metadataModuleFlag(
self: *Builder,
behavior: Metadata,
name: MetadataString,
constant: Metadata,
) Allocator.Error!Metadata {
try self.ensureUnusedMetadataCapacity(1, Metadata.ModuleFlag, 0);
- return self.debugModuleFlagAssumeCapacity(behavior, name, constant);
+ return self.metadataModuleFlagAssumeCapacity(behavior, name, constant);
}
pub fn debugLocalVar(
@@ -12418,8 +12418,7 @@ fn metadataDistinctAssumeCapacity(self: *Builder, tag: Metadata.Tag, value: anyt
return @enumFromInt(gop.index);
}
-fn debugNamedAssumeCapacity(self: *Builder, name: MetadataString, operands: []const Metadata) void {
- assert(!self.strip);
+fn metadataNamedAssumeCapacity(self: *Builder, name: MetadataString, operands: []const Metadata) void {
assert(name != .none);
const extra_index: u32 = @intCast(self.metadata_extra.items.len);
self.metadata_extra.appendSliceAssumeCapacity(@ptrCast(operands));
@@ -13002,13 +13001,12 @@ fn strTupleAssumeCapacity(
return @enumFromInt(gop.index);
}
-fn debugModuleFlagAssumeCapacity(
+fn metadataModuleFlagAssumeCapacity(
self: *Builder,
behavior: Metadata,
name: MetadataString,
constant: Metadata,
) Metadata {
- assert(!self.strip);
return self.metadataSimpleAssumeCapacity(.module_flag, Metadata.ModuleFlag{
.behavior = behavior,
.name = name,
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index d21fc69ff3..a002dbcd8a 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -51,15 +51,6 @@ pub const Context = opaque {
pub const Module = opaque {
pub const dispose = LLVMDisposeModule;
extern fn LLVMDisposeModule(*Module) void;
-
- pub const setModulePICLevel = ZigLLVMSetModulePICLevel;
- extern fn ZigLLVMSetModulePICLevel(module: *Module, big: bool) void;
-
- pub const setModulePIELevel = ZigLLVMSetModulePIELevel;
- extern fn ZigLLVMSetModulePIELevel(module: *Module, large: bool) void;
-
- pub const setModuleCodeModel = ZigLLVMSetModuleCodeModel;
- extern fn ZigLLVMSetModuleCodeModel(module: *Module, code_model: CodeModel) void;
};
pub const disposeMessage = LLVMDisposeMessage;
diff --git a/src/glibc.zig b/src/glibc.zig
index 94c180f3d4..ee1b208cdd 100644
--- a/src/glibc.zig
+++ b/src/glibc.zig
@@ -16,6 +16,7 @@ const Module = @import("Package/Module.zig");
pub const Lib = struct {
name: []const u8,
sover: u8,
+ removed_in: ?Version = null,
};
pub const ABI = struct {
@@ -34,12 +35,12 @@ pub const ABI = struct {
// The order of the elements in this array defines the linking order.
pub const libs = [_]Lib{
.{ .name = "m", .sover = 6 },
- .{ .name = "pthread", .sover = 0 },
+ .{ .name = "pthread", .sover = 0, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } },
.{ .name = "c", .sover = 6 },
- .{ .name = "dl", .sover = 2 },
- .{ .name = "rt", .sover = 1 },
+ .{ .name = "dl", .sover = 2, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } },
+ .{ .name = "rt", .sover = 1, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } },
.{ .name = "ld", .sover = 2 },
- .{ .name = "util", .sover = 1 },
+ .{ .name = "util", .sover = 1, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } },
.{ .name = "resolv", .sover = 2 },
};
@@ -797,6 +798,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
defer stubs_asm.deinit();
for (libs, 0..) |lib, lib_i| {
+ if (lib.removed_in) |rem_in| {
+ if (target_version.order(rem_in) != .lt) continue;
+ }
+
stubs_asm.shrinkRetainingCapacity(0);
try stubs_asm.appendSlice(".text\n");
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index c3197f7651..bff33ecf14 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -261,7 +261,6 @@ pub const Section = struct {
index: u32,
first: Unit.Index.Optional,
last: Unit.Index.Optional,
- off: u64,
len: u64,
units: std.ArrayListUnmanaged(Unit),
@@ -284,9 +283,8 @@ pub const Section = struct {
.index = std.math.maxInt(u32),
.first = .none,
.last = .none,
- .off = 0,
- .len = 0,
.units = .{},
+ .len = 0,
};
fn deinit(sec: *Section, gpa: std.mem.Allocator) void {
@@ -295,6 +293,20 @@ pub const Section = struct {
sec.* = undefined;
}
+ fn off(sec: Section, dwarf: *Dwarf) u64 {
+ if (dwarf.bin_file.cast(.elf)) |elf_file| {
+ const zo = elf_file.zigObjectPtr().?;
+ const atom = zo.symbol(sec.index).atom(elf_file).?;
+ return atom.offset(elf_file);
+ } else if (dwarf.bin_file.cast(.macho)) |macho_file| {
+ const header = if (macho_file.d_sym) |d_sym|
+ d_sym.sections.items[sec.index]
+ else
+ macho_file.sections.items(.header)[sec.index];
+ return header.offset;
+ } else unreachable;
+ }
+
fn addUnit(sec: *Section, header_len: u32, trailer_len: u32, dwarf: *Dwarf) UpdateError!Unit.Index {
const unit: Unit.Index = @enumFromInt(sec.units.items.len);
const unit_ptr = try sec.units.addOne(dwarf.gpa);
@@ -306,9 +318,9 @@ pub const Section = struct {
.next = .none,
.first = .none,
.last = .none,
- .off = 0,
.header_len = aligned_header_len,
.trailer_len = aligned_trailer_len,
+ .off = 0,
.len = aligned_header_len + aligned_trailer_len,
.entries = .{},
.cross_unit_relocs = .{},
@@ -375,12 +387,16 @@ pub const Section = struct {
fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void {
if (len <= sec.len) return;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
+ const zo = elf_file.zigObjectPtr().?;
+ const atom = zo.symbol(sec.index).atom(elf_file).?;
+ const shndx = atom.output_section_index;
if (sec == &dwarf.debug_frame.section)
- try elf_file.growAllocSection(sec.index, len)
+ try elf_file.growAllocSection(shndx, len, sec.alignment.toByteUnits().?)
else
- try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
- const shdr = &elf_file.sections.items(.shdr)[sec.index];
- sec.off = shdr.sh_offset;
+ try elf_file.growNonAllocSection(shndx, len, sec.alignment.toByteUnits().?, true);
+ const shdr = elf_file.sections.items(.shdr)[shndx];
+ atom.size = shdr.sh_size;
+ atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
sec.len = shdr.sh_size;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym| header: {
@@ -390,7 +406,6 @@ pub const Section = struct {
try macho_file.growSection(@intCast(sec.index), len);
break :header &macho_file.sections.items(.header)[sec.index];
};
- sec.off = header.offset;
sec.len = header.size;
}
}
@@ -399,18 +414,21 @@ pub const Section = struct {
const len = sec.getUnit(sec.first.unwrap() orelse return).off;
if (len == 0) return;
for (sec.units.items) |*unit| unit.off -= len;
- sec.off += len;
sec.len -= len;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
- const shdr = &elf_file.sections.items(.shdr)[sec.index];
- shdr.sh_offset = sec.off;
+ const zo = elf_file.zigObjectPtr().?;
+ const atom = zo.symbol(sec.index).atom(elf_file).?;
+ const shndx = atom.output_section_index;
+ const shdr = &elf_file.sections.items(.shdr)[shndx];
+ atom.size = sec.len;
+ shdr.sh_offset += len;
shdr.sh_size = sec.len;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index]
else
&macho_file.sections.items(.header)[sec.index];
- header.offset = @intCast(sec.off);
+ header.offset += @intCast(len);
header.size = sec.len;
}
}
@@ -539,9 +557,9 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
if (try dwarf.getFile().?.copyRangeAll(
- sec.off + unit.off,
+ sec.off(dwarf) + unit.off,
dwarf.getFile().?,
- sec.off + new_off,
+ sec.off(dwarf) + new_off,
unit.len,
) != unit.len) return error.InputOutput;
unit.off = new_off;
@@ -573,7 +591,7 @@ const Unit = struct {
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off);
+ try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
@@ -605,7 +623,7 @@ const Unit = struct {
assert(fbs.pos == extended_op_bytes + op_len_bytes);
writer.writeByte(DW.LNE.padding) catch unreachable;
assert(fbs.pos >= unit.trailer_len and fbs.pos <= len);
- return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + start);
+ return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + start);
}
var trailer = try std.ArrayList(u8).initCapacity(dwarf.gpa, len);
defer trailer.deinit();
@@ -664,11 +682,11 @@ const Unit = struct {
assert(trailer.items.len == unit.trailer_len);
trailer.appendNTimesAssumeCapacity(fill_byte, len - unit.trailer_len);
assert(trailer.items.len == len);
- try dwarf.getFile().?.pwriteAll(trailer.items, sec.off + start);
+ try dwarf.getFile().?.pwriteAll(trailer.items, sec.off(dwarf) + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
- const unit_off = sec.off + unit.off;
+ const unit_off = sec.off(dwarf) + unit.off;
for (unit.cross_unit_relocs.items) |reloc| {
const target_unit = sec.getUnit(reloc.target_unit);
try dwarf.resolveReloc(
@@ -755,12 +773,12 @@ const Entry = struct {
dwarf.writeInt(unit_len[0..dwarf.sectionOffsetBytes()], len - dwarf.unitLengthBytes());
try dwarf.getFile().?.pwriteAll(
unit_len[0..dwarf.sectionOffsetBytes()],
- sec.off + unit.off + unit.header_len + entry.off,
+ sec.off(dwarf) + unit.off + unit.header_len + entry.off,
);
const buf = try dwarf.gpa.alloc(u8, len - entry.len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.CFA.nop);
- try dwarf.getFile().?.pwriteAll(buf, sec.off + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start);
return;
}
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
@@ -816,7 +834,7 @@ const Entry = struct {
},
} else assert(!sec.pad_to_ideal and len == 0);
assert(fbs.pos <= len);
- try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + unit.off + unit.header_len + start);
+ try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + unit.off + unit.header_len + start);
}
fn resize(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, len: u32) UpdateError!void {
@@ -851,15 +869,15 @@ const Entry = struct {
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == entry_ptr.len);
- try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off);
+ try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
- _ = try dwarf.getFile().?.preadAll(buf, sec.off);
+ _ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf));
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
- sec.off,
+ sec.off(dwarf),
sec.len,
});
for (sec.units.items) |*unit_ptr| {
@@ -891,9 +909,11 @@ const Entry = struct {
if (std.debug.runtime_safety) {
log.err("missing {} from {s}", .{
@as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)),
- std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file|
- elf_file.shstrtab.items[elf_file.sections.items(.shdr)[sec.index].sh_name..]
- else if (dwarf.bin_file.cast(.macho)) |macho_file|
+ std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| sh_name: {
+ const zo = elf_file.zigObjectPtr().?;
+ const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index;
+ break :sh_name elf_file.shstrtab.items[elf_file.sections.items(.shdr)[shndx].sh_name..];
+ } else if (dwarf.bin_file.cast(.macho)) |macho_file|
if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index].segname
else
@@ -924,7 +944,7 @@ const Entry = struct {
}
fn resolveRelocs(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
- const entry_off = sec.off + unit.off + unit.header_len + entry.off;
+ const entry_off = sec.off(dwarf) + unit.off + unit.header_len + entry.off;
for (entry.cross_entry_relocs.items) |reloc| {
try dwarf.resolveReloc(
entry_off + reloc.source_off,
@@ -961,7 +981,8 @@ const Entry = struct {
.none, .debug_frame => {},
.eh_frame => return if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
- const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[sec.index].sh_addr);
+ const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index;
+ const entry_addr: i64 = @intCast(entry_off - sec.off(dwarf) + elf_file.shdrs.items[shndx].sh_addr);
for (entry.external_relocs.items) |reloc| {
const symbol = zo.symbol(reloc.target_sym);
try dwarf.resolveReloc(
@@ -1877,34 +1898,7 @@ pub fn init(lf: *link.File, format: DW.Format) Dwarf {
}
pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
- if (dwarf.bin_file.cast(.elf)) |elf_file| {
- for ([_]*Section{
- &dwarf.debug_abbrev.section,
- &dwarf.debug_aranges.section,
- &dwarf.debug_frame.section,
- &dwarf.debug_info.section,
- &dwarf.debug_line.section,
- &dwarf.debug_line_str.section,
- &dwarf.debug_loclists.section,
- &dwarf.debug_rnglists.section,
- &dwarf.debug_str.section,
- }, [_]u32{
- elf_file.debug_abbrev_section_index.?,
- elf_file.debug_aranges_section_index.?,
- elf_file.eh_frame_section_index.?,
- elf_file.debug_info_section_index.?,
- elf_file.debug_line_section_index.?,
- elf_file.debug_line_str_section_index.?,
- elf_file.debug_loclists_section_index.?,
- elf_file.debug_rnglists_section_index.?,
- elf_file.debug_str_section_index.?,
- }) |sec, section_index| {
- const shdr = &elf_file.sections.items(.shdr)[section_index];
- sec.index = section_index;
- sec.off = shdr.sh_offset;
- sec.len = shdr.sh_size;
- }
- } else if (dwarf.bin_file.cast(.macho)) |macho_file| {
+ if (dwarf.bin_file.cast(.macho)) |macho_file| {
if (macho_file.d_sym) |*d_sym| {
for ([_]*Section{
&dwarf.debug_abbrev.section,
@@ -1927,7 +1921,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}) |sec, sect_index| {
const header = &d_sym.sections.items[sect_index];
sec.index = sect_index;
- sec.off = header.offset;
sec.len = header.size;
}
} else {
@@ -1952,7 +1945,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}) |sec, sect_index| {
const header = &macho_file.sections.items(.header)[sect_index];
sec.index = sect_index;
- sec.off = header.offset;
sec.len = header.size;
}
}
@@ -1960,6 +1952,32 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}
pub fn initMetadata(dwarf: *Dwarf) UpdateError!void {
+ if (dwarf.bin_file.cast(.elf)) |elf_file| {
+ const zo = elf_file.zigObjectPtr().?;
+ for ([_]*Section{
+ &dwarf.debug_abbrev.section,
+ &dwarf.debug_aranges.section,
+ &dwarf.debug_frame.section,
+ &dwarf.debug_info.section,
+ &dwarf.debug_line.section,
+ &dwarf.debug_line_str.section,
+ &dwarf.debug_loclists.section,
+ &dwarf.debug_rnglists.section,
+ &dwarf.debug_str.section,
+ }, [_]u32{
+ zo.debug_abbrev_index.?,
+ zo.debug_aranges_index.?,
+ zo.eh_frame_index.?,
+ zo.debug_info_index.?,
+ zo.debug_line_index.?,
+ zo.debug_line_str_index.?,
+ zo.debug_loclists_index.?,
+ zo.debug_rnglists_index.?,
+ zo.debug_str_index.?,
+ }) |sec, sym_index| {
+ sec.index = sym_index;
+ }
+ }
dwarf.reloadSectionMetadata();
dwarf.debug_abbrev.section.pad_to_ideal = false;
@@ -2523,7 +2541,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined;
if (try dwarf.getFile().?.preadAll(
&abbrev_code_buf,
- dwarf.debug_info.section.off + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
+ dwarf.debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
) != abbrev_code_buf.len) return error.InputOutput;
var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf);
const abbrev_code: AbbrevCode = @enumFromInt(
@@ -3934,7 +3952,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off);
+ try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf));
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
@@ -4040,7 +4058,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
- try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off);
+ try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf));
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_loclists.section.dirty) {
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index e577b8d45a..333501b29f 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -54,16 +54,6 @@ shdr_table_offset: ?u64 = null,
/// Same order as in the file.
phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
-/// Tracked loadable segments during incremental linking.
-/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
-phdr_zig_load_re_index: ?u16 = null,
-/// The index into the program headers of a PT_LOAD program header with Read flag
-phdr_zig_load_ro_index: ?u16 = null,
-/// The index into the program headers of a PT_LOAD program header with Write flag
-phdr_zig_load_rw_index: ?u16 = null,
-/// The index into the program headers of a PT_LOAD program header with zerofill data.
-phdr_zig_load_zerofill_index: ?u16 = null,
-
/// Special program headers
/// PT_PHDR
phdr_table_index: ?u16 = null,
@@ -124,22 +114,6 @@ rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
/// Applies only to a relocatable.
comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{},
-/// Tracked section headers with incremental updates to Zig object.
-/// .rela.* sections are only used when emitting a relocatable object file.
-zig_text_section_index: ?u32 = null,
-zig_data_rel_ro_section_index: ?u32 = null,
-zig_data_section_index: ?u32 = null,
-zig_bss_section_index: ?u32 = null,
-
-debug_info_section_index: ?u32 = null,
-debug_abbrev_section_index: ?u32 = null,
-debug_str_section_index: ?u32 = null,
-debug_aranges_section_index: ?u32 = null,
-debug_line_section_index: ?u32 = null,
-debug_line_str_section_index: ?u32 = null,
-debug_loclists_section_index: ?u32 = null,
-debug_rnglists_section_index: ?u32 = null,
-
copy_rel_section_index: ?u32 = null,
dynamic_section_index: ?u32 = null,
dynstrtab_section_index: ?u32 = null,
@@ -419,7 +393,8 @@ pub fn deinit(self: *Elf) void {
self.objects.deinit(gpa);
self.shared_objects.deinit(gpa);
- for (self.sections.items(.atom_list), self.sections.items(.free_list)) |*atoms, *free_list| {
+ for (self.sections.items(.atom_list_2), self.sections.items(.atom_list), self.sections.items(.free_list)) |*atom_list, *atoms, *free_list| {
+ atom_list.deinit(gpa);
atoms.deinit(gpa);
free_list.deinit(gpa);
}
@@ -554,7 +529,7 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
return start;
}
-pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
+pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void {
const slice = self.sections.slice();
const shdr = &slice.items(.shdr)[shdr_index];
assert(shdr.sh_flags & elf.SHF_ALLOC != 0);
@@ -573,8 +548,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
- const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
- const new_offset = try self.findFreeSpace(needed_size, alignment);
+ const new_offset = try self.findFreeSpace(needed_size, min_alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
@@ -614,7 +588,7 @@ pub fn growNonAllocSection(
self: *Elf,
shdr_index: u32,
needed_size: u64,
- min_alignment: u32,
+ min_alignment: u64,
requires_file_copy: bool,
) !void {
const shdr = &self.sections.items(.shdr)[shdr_index];
@@ -648,33 +622,124 @@ pub fn growNonAllocSection(
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
shdr.sh_size = needed_size;
-
self.markDirty(shdr_index);
}
pub fn markDirty(self: *Elf, shdr_index: u32) void {
- const zig_object = self.zigObjectPtr().?;
- if (zig_object.dwarf) |_| {
- if (self.debug_info_section_index.? == shdr_index) {
- zig_object.debug_info_section_dirty = true;
- } else if (self.debug_abbrev_section_index.? == shdr_index) {
- zig_object.debug_abbrev_section_dirty = true;
- } else if (self.debug_str_section_index.? == shdr_index) {
- zig_object.debug_str_section_dirty = true;
- } else if (self.debug_aranges_section_index.? == shdr_index) {
- zig_object.debug_aranges_section_dirty = true;
- } else if (self.debug_line_section_index.? == shdr_index) {
- zig_object.debug_line_section_dirty = true;
- } else if (self.debug_line_str_section_index.? == shdr_index) {
- zig_object.debug_line_str_section_dirty = true;
- } else if (self.debug_loclists_section_index.? == shdr_index) {
- zig_object.debug_loclists_section_dirty = true;
- } else if (self.debug_rnglists_section_index.? == shdr_index) {
- zig_object.debug_rnglists_section_dirty = true;
+ if (self.zigObjectPtr()) |zo| {
+ for ([_]?Symbol.Index{
+ zo.debug_info_index,
+ zo.debug_abbrev_index,
+ zo.debug_aranges_index,
+ zo.debug_str_index,
+ zo.debug_line_index,
+ zo.debug_line_str_index,
+ zo.debug_loclists_index,
+ zo.debug_rnglists_index,
+ }, [_]*bool{
+ &zo.debug_info_section_dirty,
+ &zo.debug_abbrev_section_dirty,
+ &zo.debug_aranges_section_dirty,
+ &zo.debug_str_section_dirty,
+ &zo.debug_line_section_dirty,
+ &zo.debug_line_str_section_dirty,
+ &zo.debug_loclists_section_dirty,
+ &zo.debug_rnglists_section_dirty,
+ }) |maybe_sym_index, dirty| {
+ const sym_index = maybe_sym_index orelse continue;
+ if (zo.symbol(sym_index).atom(self).?.output_section_index == shdr_index) {
+ dirty.* = true;
+ break;
+ }
}
}
}
+const AllocateChunkResult = struct {
+ value: u64,
+ placement: Ref,
+};
+
+pub fn allocateChunk(self: *Elf, args: struct {
+ size: u64,
+ shndx: u32,
+ alignment: Atom.Alignment,
+ requires_padding: bool = true,
+}) !AllocateChunkResult {
+ const slice = self.sections.slice();
+ const shdr = &slice.items(.shdr)[args.shndx];
+ const free_list = &slice.items(.free_list)[args.shndx];
+ const last_atom_ref = &slice.items(.last_atom)[args.shndx];
+ const new_atom_ideal_capacity = if (args.requires_padding) padToIdeal(args.size) else args.size;
+
+ // First we look for an appropriately sized free list node.
+ // The list is unordered. We'll just take the first thing that works.
+ const res: AllocateChunkResult = blk: {
+ var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len;
+ while (i < free_list.items.len) {
+ const big_atom_ref = free_list.items[i];
+ const big_atom = self.atom(big_atom_ref).?;
+ // We now have a pointer to a live atom that has too much capacity.
+ // Is it enough that we could fit this new atom?
+ const cap = big_atom.capacity(self);
+ const ideal_capacity = if (args.requires_padding) padToIdeal(cap) else cap;
+ const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
+ const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
+ const new_start_vaddr = args.alignment.backward(new_start_vaddr_unaligned);
+ if (new_start_vaddr < ideal_capacity_end_vaddr) {
+ // Additional bookkeeping here to notice if this free list node
+ // should be deleted because the block that it points to has grown to take up
+ // more of the extra capacity.
+ if (!big_atom.freeListEligible(self)) {
+ _ = free_list.swapRemove(i);
+ } else {
+ i += 1;
+ }
+ continue;
+ }
+ // At this point we know that we will place the new block here. But the
+ // remaining question is whether there is still yet enough capacity left
+ // over for there to still be a free list node.
+ const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
+ const keep_free_list_node = remaining_capacity >= min_text_capacity;
+
+ if (!keep_free_list_node) {
+ _ = free_list.swapRemove(i);
+ }
+ break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref };
+ } else if (self.atom(last_atom_ref.*)) |last_atom| {
+ const ideal_capacity = if (args.requires_padding) padToIdeal(last_atom.size) else last_atom.size;
+ const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
+ const new_start_vaddr = args.alignment.forward(ideal_capacity_end_vaddr);
+ break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() };
+ } else {
+ break :blk .{ .value = 0, .placement = .{} };
+ }
+ };
+
+ log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{
+ args.size,
+ args.alignment.toByteUnits().?,
+ shdr.sh_addr + res.value,
+ shdr.sh_offset + res.value,
+ });
+
+ const expand_section = if (self.atom(res.placement)) |placement_atom|
+ placement_atom.nextAtom(self) == null
+ else
+ true;
+ if (expand_section) {
+ const needed_size = res.value + args.size;
+ if (shdr.sh_flags & elf.SHF_ALLOC != 0)
+ try self.growAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?)
+ else
+ try self.growNonAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?, true);
+ }
+
+ return res;
+}
+
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
if (use_lld) {
@@ -840,6 +905,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
} else if (target.isGnuLibC()) {
try system_libs.ensureUnusedCapacity(glibc.libs.len + 1);
for (glibc.libs) |lib| {
+ if (lib.removed_in) |rem_in| {
+ if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
+ }
+
const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
});
@@ -968,14 +1037,13 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.initSyntheticSections();
try self.initSpecialPhdrs();
try self.sortShdrs();
- for (self.objects.items) |index| {
- try self.file(index).?.object.addAtomsToOutputSections(self);
- }
- try self.sortInitFini();
+
try self.setDynamicSection(rpath_table.keys());
self.sortDynamicSymtab();
try self.setHashSections();
try self.setVersionSymtab();
+
+ try self.sortInitFini();
try self.updateMergeSectionSizes();
try self.updateSectionSizes();
@@ -1006,7 +1074,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const code = try zo.codeAlloc(self, atom_index);
defer gpa.free(code);
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
+ const file_offset = atom_ptr.offset(self);
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
@@ -1286,6 +1354,10 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
if (needs_grouping) try argv.append("--end-group");
} else if (target.isGnuLibC()) {
for (glibc.libs) |lib| {
+ if (lib.removed_in) |rem_in| {
+ if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
+ }
+
const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
});
@@ -1742,6 +1814,59 @@ fn scanRelocs(self: *Elf) !void {
}
}
+pub fn initOutputSection(self: *Elf, args: struct {
+ name: [:0]const u8,
+ flags: u64,
+ type: u32,
+}) error{OutOfMemory}!u32 {
+ const name = blk: {
+ if (self.base.isRelocatable()) break :blk args.name;
+ if (args.flags & elf.SHF_MERGE != 0) break :blk args.name;
+ const name_prefixes: []const [:0]const u8 = &.{
+ ".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
+ ".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
+ ".dtors", ".gnu.warning",
+ };
+ inline for (name_prefixes) |prefix| {
+ if (std.mem.eql(u8, args.name, prefix) or std.mem.startsWith(u8, args.name, prefix ++ ".")) {
+ break :blk prefix;
+ }
+ }
+ break :blk args.name;
+ };
+ const @"type" = tt: {
+ if (self.getTarget().cpu.arch == .x86_64 and args.type == elf.SHT_X86_64_UNWIND)
+ break :tt elf.SHT_PROGBITS;
+ switch (args.type) {
+ elf.SHT_NULL => unreachable,
+ elf.SHT_PROGBITS => {
+ if (std.mem.eql(u8, args.name, ".init_array") or std.mem.startsWith(u8, args.name, ".init_array."))
+ break :tt elf.SHT_INIT_ARRAY;
+ if (std.mem.eql(u8, args.name, ".fini_array") or std.mem.startsWith(u8, args.name, ".fini_array."))
+ break :tt elf.SHT_FINI_ARRAY;
+ break :tt args.type;
+ },
+ else => break :tt args.type,
+ }
+ };
+ const flags = blk: {
+ var flags = args.flags;
+ if (!self.base.isRelocatable()) {
+ flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN);
+ }
+ break :blk switch (@"type") {
+ elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE,
+ else => flags,
+ };
+ };
+ const out_shndx = self.sectionByName(name) orelse try self.addSection(.{
+ .type = @"type",
+ .flags = flags,
+ .name = try self.insertShString(name),
+ });
+ return out_shndx;
+}
+
fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
dev.check(.lld_linker);
@@ -2288,6 +2413,10 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
if (needs_grouping) try argv.append("--end-group");
} else if (target.isGnuLibC()) {
for (glibc.libs) |lib| {
+ if (lib.removed_in) |rem_in| {
+ if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
+ }
+
const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
});
@@ -2771,12 +2900,16 @@ fn initSyntheticSections(self: *Elf) !void {
const target = self.getTarget();
const ptr_size = self.ptrWidthBytes();
- const needs_eh_frame = for (self.objects.items) |index| {
- if (self.file(index).?.object.cies.items.len > 0) break true;
- } else false;
+ const needs_eh_frame = blk: {
+ if (self.zigObjectPtr()) |zo|
+ if (zo.eh_frame_index != null) break :blk true;
+ break :blk for (self.objects.items) |index| {
+ if (self.file(index).?.object.cies.items.len > 0) break true;
+ } else false;
+ };
if (needs_eh_frame) {
if (self.eh_frame_section_index == null) {
- self.eh_frame_section_index = try self.addSection(.{
+ self.eh_frame_section_index = self.sectionByName(".eh_frame") orelse try self.addSection(.{
.name = try self.insertShString(".eh_frame"),
.type = if (target.cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@@ -3072,8 +3205,9 @@ fn sortInitFini(self: *Elf) !void {
}
};
- for (slice.items(.shdr), slice.items(.atom_list)) |shdr, *atom_list| {
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
+ if (atom_list.atoms.items.len == 0) continue;
var is_init_fini = false;
var is_ctor_dtor = false;
@@ -3087,15 +3221,13 @@ fn sortInitFini(self: *Elf) !void {
is_ctor_dtor = mem.indexOf(u8, name, ".ctors") != null or mem.indexOf(u8, name, ".dtors") != null;
},
}
-
if (!is_init_fini and !is_ctor_dtor) continue;
- if (atom_list.items.len == 0) continue;
var entries = std.ArrayList(Entry).init(gpa);
- try entries.ensureTotalCapacityPrecise(atom_list.items.len);
+ try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len);
defer entries.deinit();
- for (atom_list.items) |ref| {
+ for (atom_list.atoms.items) |ref| {
const atom_ptr = self.atom(ref).?;
const object = atom_ptr.file(self).?.object;
const priority = blk: {
@@ -3114,9 +3246,9 @@ fn sortInitFini(self: *Elf) !void {
mem.sort(Entry, entries.items, self, Entry.lessThan);
- atom_list.clearRetainingCapacity();
+ atom_list.atoms.clearRetainingCapacity();
for (entries.items) |entry| {
- atom_list.appendAssumeCapacity(entry.atom_ref);
+ atom_list.atoms.appendAssumeCapacity(entry.atom_ref);
}
}
}
@@ -3221,9 +3353,6 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void {
}
for (&[_]*?u16{
- &self.phdr_zig_load_re_index,
- &self.phdr_zig_load_ro_index,
- &self.phdr_zig_load_zerofill_index,
&self.phdr_table_index,
&self.phdr_table_load_index,
&self.phdr_interp_index,
@@ -3260,33 +3389,36 @@ fn shdrRank(self: *Elf, shndx: u32) u8 {
elf.SHT_PREINIT_ARRAY,
elf.SHT_INIT_ARRAY,
elf.SHT_FINI_ARRAY,
- => return 0xf2,
+ => return 0xf1,
- elf.SHT_DYNAMIC => return 0xf3,
+ elf.SHT_DYNAMIC => return 0xf2,
elf.SHT_RELA, elf.SHT_GROUP => return 0xf,
elf.SHT_PROGBITS => if (flags & elf.SHF_ALLOC != 0) {
if (flags & elf.SHF_EXECINSTR != 0) {
- return 0xf1;
+ return 0xf0;
} else if (flags & elf.SHF_WRITE != 0) {
- return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6;
+ return if (flags & elf.SHF_TLS != 0) 0xf3 else 0xf5;
} else if (mem.eql(u8, name, ".interp")) {
return 1;
+ } else if (mem.startsWith(u8, name, ".eh_frame")) {
+ return 0xe1;
} else {
- return 0xf0;
+ return 0xe0;
}
} else {
if (mem.startsWith(u8, name, ".debug")) {
- return 0xf8;
+ return 0xf7;
} else {
- return 0xf9;
+ return 0xf8;
}
},
+ elf.SHT_X86_64_UNWIND => return 0xe1,
- elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf5 else 0xf7,
- elf.SHT_SYMTAB => return 0xfa,
- elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfb,
+ elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6,
+ elf.SHT_SYMTAB => return 0xf9,
+ elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfa,
else => return 0xff,
}
}
@@ -3349,18 +3481,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
&self.copy_rel_section_index,
&self.versym_section_index,
&self.verneed_section_index,
- &self.zig_text_section_index,
- &self.zig_data_rel_ro_section_index,
- &self.zig_data_section_index,
- &self.zig_bss_section_index,
- &self.debug_info_section_index,
- &self.debug_abbrev_section_index,
- &self.debug_str_section_index,
- &self.debug_aranges_section_index,
- &self.debug_line_section_index,
- &self.debug_line_str_section_index,
- &self.debug_loclists_section_index,
- &self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
@@ -3371,13 +3491,19 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
msec.output_section_index = backlinks[msec.output_section_index];
}
- for (self.sections.items(.shdr)) |*shdr| {
- if (shdr.sh_type != elf.SHT_RELA) continue;
- // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
- // to point at symtab
- // shdr.sh_link = backlinks[shdr.sh_link];
- shdr.sh_link = self.symtab_section_index.?;
- shdr.sh_info = backlinks[shdr.sh_info];
+ const slice = self.sections.slice();
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| {
+ atom_list.output_section_index = backlinks[atom_list.output_section_index];
+ for (atom_list.atoms.items) |ref| {
+ self.atom(ref).?.output_section_index = atom_list.output_section_index;
+ }
+ if (shdr.sh_type == elf.SHT_RELA) {
+ // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
+ // to point at symtab
+ // shdr.sh_link = backlinks[shdr.sh_link];
+ shdr.sh_link = self.symtab_section_index.?;
+ shdr.sh_info = backlinks[shdr.sh_info];
+ }
}
if (self.zigObjectPtr()) |zo| {
@@ -3385,7 +3511,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
const atom_ptr = zo.atom(atom_index) orelse continue;
atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
}
- if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.comdat_group_sections.items) |*cg| {
@@ -3393,53 +3518,53 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
}
if (self.symtab_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.strtab_section_index.?;
}
if (self.dynamic_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.dynsymtab_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.hash_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.gnu_hash_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.versym_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.verneed_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.rela_dyn_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index orelse 0;
}
if (self.rela_plt_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
shdr.sh_info = self.plt_section_index.?;
}
if (self.eh_frame_rela_section_index) |index| {
- const shdr = &self.sections.items(.shdr)[index];
+ const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.symtab_section_index.?;
shdr.sh_info = self.eh_frame_section_index.?;
}
@@ -3447,37 +3572,32 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
fn updateSectionSizes(self: *Elf) !void {
const slice = self.sections.slice();
- for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| {
- if (atom_list.items.len == 0) continue;
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
+ if (atom_list.atoms.items.len == 0) continue;
if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
- for (atom_list.items) |ref| {
- const atom_ptr = self.atom(ref) orelse continue;
- if (!atom_ptr.alive) continue;
- const offset = atom_ptr.alignment.forward(shdr.sh_size);
- const padding = offset - shdr.sh_size;
- atom_ptr.value = @intCast(offset);
- shdr.sh_size += padding + atom_ptr.size;
- shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
- }
+ atom_list.updateSize(self);
+ try atom_list.allocate(self);
}
if (self.requiresThunks()) {
- for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| {
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue;
- if (atom_list.items.len == 0) continue;
+ if (atom_list.atoms.items.len == 0) continue;
// Create jump/branch range extenders if needed.
- try self.createThunks(shdr, @intCast(shndx));
+ try self.createThunks(atom_list);
+ try atom_list.allocate(self);
+ }
+
+ // FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList.
+ for (self.thunks.items) |*th| {
+ th.value += slice.items(.atom_list_2)[th.output_section_index].value;
}
}
const shdrs = slice.items(.shdr);
if (self.eh_frame_section_index) |index| {
- shdrs[index].sh_size = existing_size: {
- const zo = self.zigObjectPtr() orelse break :existing_size 0;
- const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
- break :existing_size sym.atom(self).?.size;
- } + try eh_frame.calcEhFrameSize(self);
+ shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
}
if (self.eh_frame_hdr_section_index) |index| {
@@ -3575,13 +3695,11 @@ fn shdrToPhdrFlags(sh_flags: u64) u32 {
/// (This is an upper bound so that we can reserve enough space for the header and progam header
/// table without running out of space and being forced to move things around.)
fn getMaxNumberOfPhdrs() u64 {
- // First, assume we compile Zig's source incrementally, this gives us:
- var num: u64 = number_of_zig_segments;
- // Next, the estimated maximum number of segments the linker can emit for input sections are:
- num += max_number_of_object_segments;
- // Next, any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP:
+ // The estimated maximum number of segments the linker can emit for input sections are:
+ var num: u64 = max_number_of_object_segments;
+ // Any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP:
num += max_number_of_special_phdrs;
- // Finally, PHDR program header and corresponding read-only load segment:
+ // PHDR program header and corresponding read-only load segment:
num += 2;
return num;
}
@@ -3591,10 +3709,9 @@ fn getMaxNumberOfPhdrs() u64 {
/// We permit a maximum of 3**2 number of segments.
fn calcNumberOfSegments(self: *Elf) usize {
var covers: [9]bool = [_]bool{false} ** 9;
- for (self.sections.items(.shdr), 0..) |shdr, shndx| {
+ for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
- if (self.isZigSection(@intCast(shndx))) continue;
const flags = shdrToPhdrFlags(shdr.sh_flags);
covers[flags - 1] = true;
}
@@ -3692,7 +3809,6 @@ pub fn allocateAllocSections(self: *Elf) !void {
for (slice.items(.shdr), 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
- if (self.isZigSection(@intCast(shndx))) continue;
const flags = shdrToPhdrFlags(shdr.sh_flags);
try covers[flags - 1].append(@intCast(shndx));
}
@@ -3782,10 +3898,20 @@ pub fn allocateAllocSections(self: *Elf) !void {
}
new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset);
- if (shndx == self.eh_frame_section_index) eh_frame: {
- const zo = self.zigObjectPtr() orelse break :eh_frame;
- const sym = zo.symbol(zo.eh_frame_index orelse break :eh_frame);
- const existing_size = sym.atom(self).?.size;
+ if (self.zigObjectPtr()) |zo| blk: {
+ const existing_size = for ([_]?Symbol.Index{
+ zo.text_index,
+ zo.rodata_index,
+ zo.data_relro_index,
+ zo.data_index,
+ zo.tdata_index,
+ zo.eh_frame_index,
+ }) |maybe_sym_index| {
+ const sect_sym_index = maybe_sym_index orelse continue;
+ const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?;
+ if (sect_atom_ptr.output_section_index != shndx) continue;
+ break sect_atom_ptr.size;
+ } else break :blk;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
shdr.sh_offset,
@@ -3818,27 +3944,27 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
shdr.sh_size = 0;
const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign);
- if (self.isDebugSection(@intCast(shndx))) {
+ if (self.zigObjectPtr()) |zo| blk: {
+ const existing_size = for ([_]?Symbol.Index{
+ zo.debug_info_index,
+ zo.debug_abbrev_index,
+ zo.debug_aranges_index,
+ zo.debug_str_index,
+ zo.debug_line_index,
+ zo.debug_line_str_index,
+ zo.debug_loclists_index,
+ zo.debug_rnglists_index,
+ }) |maybe_sym_index| {
+ const sym_index = maybe_sym_index orelse continue;
+ const sym = zo.symbol(sym_index);
+ const atom_ptr = sym.atom(self).?;
+ if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
+ } else break :blk;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
shdr.sh_offset,
new_offset,
});
- const zo = self.zigObjectPtr().?;
- const existing_size = for ([_]Symbol.Index{
- zo.debug_info_index.?,
- zo.debug_abbrev_index.?,
- zo.debug_aranges_index.?,
- zo.debug_str_index.?,
- zo.debug_line_index.?,
- zo.debug_line_str_index.?,
- zo.debug_loclists_index.?,
- zo.debug_rnglists_index.?,
- }) |sym_index| {
- const sym = zo.symbol(sym_index);
- const atom_ptr = sym.atom(self).?;
- if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
- } else 0;
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
@@ -3922,91 +4048,28 @@ fn writeAtoms(self: *Elf) !void {
undefs.deinit();
}
- var has_reloc_errors = false;
+ var buffer = std.ArrayList(u8).init(gpa);
+ defer buffer.deinit();
+
const slice = self.sections.slice();
- for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
- if (shdr.sh_type == elf.SHT_NULL) continue;
+ var has_reloc_errors = false;
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (atom_list.items.len == 0) continue;
-
- log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
-
- // TODO really, really handle debug section separately
- const base_offset = if (self.isDebugSection(@intCast(shndx))) base_offset: {
- const zo = self.zigObjectPtr().?;
- for ([_]Symbol.Index{
- zo.debug_info_index.?,
- zo.debug_abbrev_index.?,
- zo.debug_aranges_index.?,
- zo.debug_str_index.?,
- zo.debug_line_index.?,
- zo.debug_line_str_index.?,
- zo.debug_loclists_index.?,
- zo.debug_rnglists_index.?,
- }) |sym_index| {
- const sym = zo.symbol(sym_index);
- const atom_ptr = sym.atom(self).?;
- if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size;
- }
- break :base_offset 0;
- } else if (@as(u32, @intCast(shndx)) == self.eh_frame_section_index) base_offset: {
- const zo = self.zigObjectPtr() orelse break :base_offset 0;
- const sym = zo.symbol(zo.eh_frame_index orelse break :base_offset 0);
- break :base_offset sym.atom(self).?.size;
- } else 0;
- const sh_offset = shdr.sh_offset + base_offset;
- const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
-
- const buffer = try gpa.alloc(u8, sh_size);
- defer gpa.free(buffer);
- const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and
- shdr.sh_flags & elf.SHF_EXECINSTR != 0 and self.getTarget().cpu.arch == .x86_64)
- 0xcc // int3
- else
- 0;
- @memset(buffer, padding_byte);
-
- for (atom_list.items) |ref| {
- const atom_ptr = self.atom(ref).?;
- assert(atom_ptr.alive);
-
- const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
- return error.Overflow;
- const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
-
- log.debug("writing atom({}) at 0x{x}", .{ ref, sh_offset + offset });
-
- // TODO decompress directly into provided buffer
- const out_code = buffer[offset..][0..size];
- const in_code = switch (atom_ptr.file(self).?) {
- .object => |x| try x.codeDecompressAlloc(self, ref.index),
- .zig_object => |x| try x.codeAlloc(self, ref.index),
- else => unreachable,
- };
- defer gpa.free(in_code);
- @memcpy(out_code, in_code);
-
- const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0)
- atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs)
- else
- atom_ptr.resolveRelocsAlloc(self, out_code);
- _ = res catch |err| switch (err) {
- error.UnsupportedCpuArch => {
- try self.reportUnsupportedCpuArch();
- return error.FlushFailure;
- },
- error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
- else => |e| return e,
- };
- }
-
- try self.base.file.?.pwriteAll(buffer, sh_offset);
+ if (atom_list.atoms.items.len == 0) continue;
+ atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
+ error.UnsupportedCpuArch => {
+ try self.reportUnsupportedCpuArch();
+ return error.FlushFailure;
+ },
+ error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
+ else => |e| return e,
+ };
}
- if (self.requiresThunks()) {
- var buffer = std.ArrayList(u8).init(gpa);
- defer buffer.deinit();
+ try self.reportUndefinedSymbols(&undefs);
+ if (has_reloc_errors) return error.FlushFailure;
+ if (self.requiresThunks()) {
for (self.thunks.items) |th| {
const thunk_size = th.size(self);
try buffer.ensureUnusedCapacity(thunk_size);
@@ -4018,10 +4081,6 @@ fn writeAtoms(self: *Elf) !void {
buffer.clearRetainingCapacity();
}
}
-
- try self.reportUndefinedSymbols(&undefs);
-
- if (has_reloc_errors) return error.FlushFailure;
}
pub fn updateSymtabSize(self: *Elf) !void {
@@ -4655,34 +4714,6 @@ pub fn isEffectivelyDynLib(self: Elf) bool {
};
}
-pub fn isZigSection(self: Elf, shndx: u32) bool {
- inline for (&[_]?u32{
- self.zig_text_section_index,
- self.zig_data_rel_ro_section_index,
- self.zig_data_section_index,
- self.zig_bss_section_index,
- }) |index| {
- if (index == shndx) return true;
- }
- return false;
-}
-
-pub fn isDebugSection(self: Elf, shndx: u32) bool {
- inline for (&[_]?u32{
- self.debug_info_section_index,
- self.debug_abbrev_section_index,
- self.debug_str_section_index,
- self.debug_aranges_section_index,
- self.debug_line_section_index,
- self.debug_line_str_section_index,
- self.debug_loclists_section_index,
- self.debug_rnglists_section_index,
- }) |index| {
- if (index == shndx) return true;
- }
- return false;
-}
-
pub fn addPhdr(self: *Elf, opts: struct {
type: u32 = 0,
flags: u32 = 0,
@@ -5058,7 +5089,7 @@ fn reportMissingLibraryError(
}
}
-pub fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
+fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
var err = try self.base.addErrorWithNotes(0);
try err.addMsg("fatal linker error: unsupported CPU architecture {s}", .{
@tagName(self.getTarget().cpu.arch),
@@ -5270,6 +5301,14 @@ fn fmtDumpState(
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
}
+ const slice = self.sections.slice();
+ {
+ try writer.writeAll("atom lists\n");
+ for (slice.items(.shdr), slice.items(.atom_list_2), 0..) |shdr, atom_list, shndx| {
+ try writer.print("shdr({d}) : {s} : {}", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) });
+ }
+ }
+
if (self.requiresThunks()) {
try writer.writeAll("thunks\n");
for (self.thunks.items, 0..) |th, index| {
@@ -5291,7 +5330,7 @@ fn fmtDumpState(
}
try writer.writeAll("\nOutput shdrs\n");
- for (self.sections.items(.shdr), self.sections.items(.phndx), 0..) |shdr, phndx, shndx| {
+ for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| {
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
shndx,
phndx,
@@ -5361,7 +5400,6 @@ fn requiresThunks(self: Elf) bool {
/// so that we reserve enough space for the program header table up-front.
/// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding
/// more special-purpose program headers.
-pub const number_of_zig_segments = 4;
const max_number_of_object_segments = 9;
const max_number_of_special_phdrs = 5;
@@ -5546,8 +5584,14 @@ const Section = struct {
phndx: ?u32 = null,
/// List of atoms contributing to this section.
+ /// TODO currently this is only used for relocations tracking in relocatable mode
+ /// but will be merged with atom_list_2.
atom_list: std.ArrayListUnmanaged(Ref) = .{},
+ /// List of atoms contributing to this section.
+ /// This can be used by sections that require special handling such as init/fini array, etc.
+ atom_list_2: AtomList = .{},
+
/// Index of the last allocated atom in this section.
last_atom: Ref = .{ .index = 0, .file = 0 },
@@ -5576,9 +5620,10 @@ fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
};
}
-fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
+fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
+
// A branch will need an extender if its target is larger than
// `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
const max_distance = switch (cpu_arch) {
@@ -5586,36 +5631,44 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
.x86_64, .riscv64 => unreachable,
else => @panic("unhandled arch"),
};
- const atoms = elf_file.sections.items(.atom_list)[shndx].items;
- assert(atoms.len > 0);
- for (atoms) |ref| {
+ const advance = struct {
+ fn advance(list: *AtomList, size: u64, alignment: Atom.Alignment) !i64 {
+ const offset = alignment.forward(list.size);
+ const padding = offset - list.size;
+ list.size += padding + size;
+ list.alignment = list.alignment.max(alignment);
+ return @intCast(offset);
+ }
+ }.advance;
+
+ for (atom_list.atoms.items) |ref| {
elf_file.atom(ref).?.value = -1;
}
var i: usize = 0;
- while (i < atoms.len) {
+ while (i < atom_list.atoms.items.len) {
const start = i;
- const start_atom = elf_file.atom(atoms[start]).?;
+ const start_atom = elf_file.atom(atom_list.atoms.items[start]).?;
assert(start_atom.alive);
- start_atom.value = try advanceSection(shdr, start_atom.size, start_atom.alignment);
+ start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment);
i += 1;
- while (i < atoms.len) : (i += 1) {
- const atom_ptr = elf_file.atom(atoms[i]).?;
+ while (i < atom_list.atoms.items.len) : (i += 1) {
+ const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?;
assert(atom_ptr.alive);
- if (@as(i64, @intCast(atom_ptr.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
+ if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance)
break;
- atom_ptr.value = try advanceSection(shdr, atom_ptr.size, atom_ptr.alignment);
+ atom_ptr.value = try advance(atom_list, atom_ptr.size, atom_ptr.alignment);
}
// Insert a thunk at the group end
const thunk_index = try elf_file.addThunk();
const thunk_ptr = elf_file.thunk(thunk_index);
- thunk_ptr.output_section_index = shndx;
+ thunk_ptr.output_section_index = atom_list.output_section_index;
// Scan relocs in the group and create trampolines for any unreachable callsite
- for (atoms[start..i]) |ref| {
+ for (atom_list.atoms.items[start..i]) |ref| {
const atom_ptr = elf_file.atom(ref).?;
const file_ptr = atom_ptr.file(elf_file).?;
log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) });
@@ -5645,18 +5698,11 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
atom_ptr.addExtra(.{ .thunk = thunk_index }, elf_file);
}
- thunk_ptr.value = try advanceSection(shdr, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
+ thunk_ptr.value = try advance(atom_list, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
log.debug("thunk({d}) : {}", .{ thunk_index, thunk_ptr.fmt(elf_file) });
}
}
-fn advanceSection(shdr: *elf.Elf64_Shdr, adv_size: u64, alignment: Atom.Alignment) !i64 {
- const offset = alignment.forward(shdr.sh_size);
- const padding = offset - shdr.sh_size;
- shdr.sh_size += padding + adv_size;
- shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
- return @intCast(offset);
-}
const std = @import("std");
const build_options = @import("build_options");
@@ -5687,6 +5733,7 @@ const Air = @import("../Air.zig");
const Allocator = std.mem.Allocator;
const Archive = @import("Elf/Archive.zig");
pub const Atom = @import("Elf/Atom.zig");
+const AtomList = @import("Elf/AtomList.zig");
const Cache = std.Build.Cache;
const Path = Cache.Path;
const Compilation = @import("../Compilation.zig");
diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig
index ef2301f1cd..ab0e98440b 100644
--- a/src/link/Elf/Atom.zig
+++ b/src/link/Elf/Atom.zig
@@ -51,6 +51,11 @@ pub fn address(self: Atom, elf_file: *Elf) i64 {
return @as(i64, @intCast(shdr.sh_addr)) + self.value;
}
+pub fn offset(self: Atom, elf_file: *Elf) u64 {
+ const shdr = elf_file.sections.items(.shdr)[self.output_section_index];
+ return shdr.sh_offset + @as(u64, @intCast(self.value));
+}
+
pub fn ref(self: Atom) Elf.Ref {
return .{ .index = self.atom_index, .file = self.file_index };
}
@@ -123,140 +128,6 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
return surplus >= Elf.min_text_capacity;
}
-pub fn allocate(self: *Atom, elf_file: *Elf) !void {
- const slice = elf_file.sections.slice();
- const shdr = &slice.items(.shdr)[self.output_section_index];
- const free_list = &slice.items(.free_list)[self.output_section_index];
- const last_atom_ref = &slice.items(.last_atom)[self.output_section_index];
- const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
-
- // We use these to indicate our intention to update metadata, placing the new atom,
- // and possibly removing a free list node.
- // It would be simpler to do it inside the for loop below, but that would cause a
- // problem if an error was returned later in the function. So this action
- // is actually carried out at the end of the function, when errors are no longer possible.
- var atom_placement: ?Elf.Ref = null;
- var free_list_removal: ?usize = null;
-
- // First we look for an appropriately sized free list node.
- // The list is unordered. We'll just take the first thing that works.
- self.value = blk: {
- var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
- while (i < free_list.items.len) {
- const big_atom_ref = free_list.items[i];
- const big_atom = elf_file.atom(big_atom_ref).?;
- // We now have a pointer to a live atom that has too much capacity.
- // Is it enough that we could fit this new atom?
- const cap = big_atom.capacity(elf_file);
- const ideal_capacity = Elf.padToIdeal(cap);
- const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
- const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
- const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
- if (new_start_vaddr < ideal_capacity_end_vaddr) {
- // Additional bookkeeping here to notice if this free list node
- // should be deleted because the block that it points to has grown to take up
- // more of the extra capacity.
- if (!big_atom.freeListEligible(elf_file)) {
- _ = free_list.swapRemove(i);
- } else {
- i += 1;
- }
- continue;
- }
- // At this point we know that we will place the new block here. But the
- // remaining question is whether there is still yet enough capacity left
- // over for there to still be a free list node.
- const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
- const keep_free_list_node = remaining_capacity >= Elf.min_text_capacity;
-
- // Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = big_atom_ref;
- if (!keep_free_list_node) {
- free_list_removal = i;
- }
- break :blk @intCast(new_start_vaddr);
- } else if (elf_file.atom(last_atom_ref.*)) |last_atom| {
- const ideal_capacity = Elf.padToIdeal(last_atom.size);
- const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
- const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
- // Set up the metadata to be updated, after errors are no longer possible.
- atom_placement = last_atom.ref();
- break :blk @intCast(new_start_vaddr);
- } else {
- break :blk 0;
- }
- };
-
- log.debug("allocated atom({}) : '{s}' at 0x{x} to 0x{x}", .{
- self.ref(),
- self.name(elf_file),
- self.address(elf_file),
- self.address(elf_file) + @as(i64, @intCast(self.size)),
- });
-
- const expand_section = if (atom_placement) |placement_ref|
- elf_file.atom(placement_ref).?.nextAtom(elf_file) == null
- else
- true;
- if (expand_section) {
- const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
- try elf_file.growAllocSection(self.output_section_index, needed_size);
- last_atom_ref.* = self.ref();
-
- switch (self.file(elf_file).?) {
- .zig_object => |zo| if (zo.dwarf) |_| {
- // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
- // range of the compilation unit. When we expand the text section, this range changes,
- // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
- zo.debug_info_section_dirty = true;
- // This becomes dirty for the same reason. We could potentially make this more
- // fine-grained with the addition of support for more compilation units. It is planned to
- // model each package as a different compilation unit.
- zo.debug_aranges_section_dirty = true;
- zo.debug_rnglists_section_dirty = true;
- },
- else => {},
- }
- }
- shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
-
- // This function can also reallocate an atom.
- // In this case we need to "unplug" it from its previous location before
- // plugging it in to its new location.
- if (self.prevAtom(elf_file)) |prev| {
- prev.next_atom_ref = self.next_atom_ref;
- }
- if (self.nextAtom(elf_file)) |next| {
- next.prev_atom_ref = self.prev_atom_ref;
- }
-
- if (atom_placement) |big_atom_ref| {
- const big_atom = elf_file.atom(big_atom_ref).?;
- self.prev_atom_ref = big_atom_ref;
- self.next_atom_ref = big_atom.next_atom_ref;
- big_atom.next_atom_ref = self.ref();
- } else {
- self.prev_atom_ref = .{ .index = 0, .file = 0 };
- self.next_atom_ref = .{ .index = 0, .file = 0 };
- }
- if (free_list_removal) |i| {
- _ = free_list.swapRemove(i);
- }
-
- self.alive = true;
-}
-
-pub fn shrink(self: *Atom, elf_file: *Elf) void {
- _ = self;
- _ = elf_file;
-}
-
-pub fn grow(self: *Atom, elf_file: *Elf) !void {
- if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
- try self.allocate(elf_file);
-}
-
pub fn free(self: *Atom, elf_file: *Elf) void {
log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
@@ -1807,7 +1678,7 @@ const aarch64 = struct {
=> {
// TODO: NC means no overflow check
const taddr = @as(u64, @intCast(S + A));
- const offset: u12 = switch (r_type) {
+ const off: u12 = switch (r_type) {
.LDST8_ABS_LO12_NC => @truncate(taddr),
.LDST16_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 2),
.LDST32_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 4),
@@ -1815,7 +1686,7 @@ const aarch64 = struct {
.LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
else => unreachable,
};
- aarch64_util.writeLoadStoreRegInst(offset, code);
+ aarch64_util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
@@ -1839,8 +1710,8 @@ const aarch64 = struct {
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(offset, code);
+ const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
+ aarch64_util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
@@ -1853,8 +1724,8 @@ const aarch64 = struct {
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(offset, code);
+ const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
+ aarch64_util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
@@ -1873,8 +1744,8 @@ const aarch64 = struct {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(offset, code);
+ const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
+ aarch64_util.writeLoadStoreRegInst(off, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
@@ -1885,8 +1756,8 @@ const aarch64 = struct {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(offset, code);
+ const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
+ aarch64_util.writeAddImmInst(off, code);
} else {
const old_inst = Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(
diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig
new file mode 100644
index 0000000000..51407ca6d9
--- /dev/null
+++ b/src/link/Elf/AtomList.zig
@@ -0,0 +1,208 @@
+value: i64 = 0,
+size: u64 = 0,
+alignment: Atom.Alignment = .@"1",
+output_section_index: u32 = 0,
+atoms: std.ArrayListUnmanaged(Elf.Ref) = .{},
+
+pub fn deinit(list: *AtomList, allocator: Allocator) void {
+ list.atoms.deinit(allocator);
+}
+
+pub fn address(list: AtomList, elf_file: *Elf) i64 {
+ const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
+ return @as(i64, @intCast(shdr.sh_addr)) + list.value;
+}
+
+pub fn offset(list: AtomList, elf_file: *Elf) u64 {
+ const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
+ return shdr.sh_offset + @as(u64, @intCast(list.value));
+}
+
+pub fn updateSize(list: *AtomList, elf_file: *Elf) void {
+ for (list.atoms.items) |ref| {
+ const atom_ptr = elf_file.atom(ref).?;
+ assert(atom_ptr.alive);
+ const off = atom_ptr.alignment.forward(list.size);
+ const padding = off - list.size;
+ atom_ptr.value = @intCast(off);
+ list.size += padding + atom_ptr.size;
+ list.alignment = list.alignment.max(atom_ptr.alignment);
+ }
+}
+
+pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
+ const alloc_res = try elf_file.allocateChunk(.{
+ .shndx = list.output_section_index,
+ .size = list.size,
+ .alignment = list.alignment,
+ .requires_padding = false,
+ });
+ list.value = @intCast(alloc_res.value);
+
+ const slice = elf_file.sections.slice();
+ const shdr = &slice.items(.shdr)[list.output_section_index];
+ const last_atom_ref = &slice.items(.last_atom)[list.output_section_index];
+
+ const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
+ placement_atom.nextAtom(elf_file) == null
+ else
+ true;
+ if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref();
+ shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?);
+
+ // FIXME:JK this currently ignores Thunks as valid chunks.
+ {
+ var idx: usize = 0;
+ while (idx < list.atoms.items.len) : (idx += 1) {
+ const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?;
+ if (idx > 0) {
+ curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1];
+ }
+ if (idx + 1 < list.atoms.items.len) {
+ curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1];
+ }
+ }
+ }
+
+ if (elf_file.atom(alloc_res.placement)) |placement_atom| {
+ list.firstAtom(elf_file).prev_atom_ref = placement_atom.ref();
+ list.lastAtom(elf_file).next_atom_ref = placement_atom.next_atom_ref;
+ placement_atom.next_atom_ref = list.firstAtom(elf_file).ref();
+ }
+
+ // FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index
+ for (list.atoms.items) |ref| {
+ const atom_ptr = elf_file.atom(ref).?;
+ atom_ptr.output_section_index = list.output_section_index;
+ atom_ptr.value += list.value;
+ }
+}
+
+pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void {
+ const gpa = elf_file.base.comp.gpa;
+ const osec = elf_file.sections.items(.shdr)[list.output_section_index];
+ assert(osec.sh_type != elf.SHT_NOBITS);
+
+ log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
+
+ const list_size = math.cast(usize, list.size) orelse return error.Overflow;
+ try buffer.ensureUnusedCapacity(list_size);
+ buffer.appendNTimesAssumeCapacity(0, list_size);
+
+ for (list.atoms.items) |ref| {
+ const atom_ptr = elf_file.atom(ref).?;
+ assert(atom_ptr.alive);
+
+ const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
+ const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
+
+ log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
+
+ const object = atom_ptr.file(elf_file).?.object;
+ const code = try object.codeDecompressAlloc(elf_file, ref.index);
+ defer gpa.free(code);
+ const out_code = buffer.items[off..][0..size];
+ @memcpy(out_code, code);
+
+ if (osec.sh_flags & elf.SHF_ALLOC == 0)
+ try atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, undefs)
+ else
+ try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
+ }
+
+ try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
+ buffer.clearRetainingCapacity();
+}
+
+pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *Elf) !void {
+ const gpa = elf_file.base.comp.gpa;
+ const osec = elf_file.sections.items(.shdr)[list.output_section_index];
+ assert(osec.sh_type != elf.SHT_NOBITS);
+
+ log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
+
+ const list_size = math.cast(usize, list.size) orelse return error.Overflow;
+ try buffer.ensureUnusedCapacity(list_size);
+ buffer.appendNTimesAssumeCapacity(0, list_size);
+
+ for (list.atoms.items) |ref| {
+ const atom_ptr = elf_file.atom(ref).?;
+ assert(atom_ptr.alive);
+
+ const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
+ const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
+
+ log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
+
+ const object = atom_ptr.file(elf_file).?.object;
+ const code = try object.codeDecompressAlloc(elf_file, ref.index);
+ defer gpa.free(code);
+ const out_code = buffer.items[off..][0..size];
+ @memcpy(out_code, code);
+ }
+
+ try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
+ buffer.clearRetainingCapacity();
+}
+
+pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom {
+ assert(list.atoms.items.len > 0);
+ return elf_file.atom(list.atoms.items[0]).?;
+}
+
+pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom {
+ assert(list.atoms.items.len > 0);
+ return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?;
+}
+
+pub fn format(
+ list: AtomList,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = list;
+ _ = unused_fmt_string;
+ _ = options;
+ _ = writer;
+ @compileError("do not format AtomList directly");
+}
+
+const FormatCtx = struct { AtomList, *Elf };
+
+pub fn fmt(list: AtomList, elf_file: *Elf) std.fmt.Formatter(format2) {
+ return .{ .data = .{ list, elf_file } };
+}
+
+fn format2(
+ ctx: FormatCtx,
+ comptime unused_fmt_string: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+) !void {
+ _ = unused_fmt_string;
+ _ = options;
+ const list, const elf_file = ctx;
+ try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
+ list.address(elf_file), list.output_section_index,
+ list.alignment.toByteUnits() orelse 0, list.size,
+ });
+ try writer.writeAll(" : atoms{ ");
+ for (list.atoms.items, 0..) |ref, i| {
+ try writer.print("{}", .{ref});
+ if (i < list.atoms.items.len - 1) try writer.writeAll(", ");
+ }
+ try writer.writeAll(" }");
+}
+
+const assert = std.debug.assert;
+const elf = std.elf;
+const log = std.log.scoped(.link);
+const math = std.math;
+const std = @import("std");
+
+const Allocator = std.mem.Allocator;
+const Atom = @import("Atom.zig");
+const AtomList = @This();
+const Elf = @import("../Elf.zig");
+const Object = @import("Object.zig");
diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig
index 40d09c8ec4..18c7a91c8f 100644
--- a/src/link/Elf/Object.zig
+++ b/src/link/Elf/Object.zig
@@ -311,58 +311,6 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
};
}
-fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u32 {
- const name = blk: {
- const name = self.getString(shdr.sh_name);
- if (elf_file.base.isRelocatable()) break :blk name;
- if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
- const sh_name_prefixes: []const [:0]const u8 = &.{
- ".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
- ".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
- ".dtors", ".gnu.warning",
- };
- inline for (sh_name_prefixes) |prefix| {
- if (std.mem.eql(u8, name, prefix) or std.mem.startsWith(u8, name, prefix ++ ".")) {
- break :blk prefix;
- }
- }
- break :blk name;
- };
- const @"type" = tt: {
- if (elf_file.getTarget().cpu.arch == .x86_64 and
- shdr.sh_type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS;
-
- const @"type" = switch (shdr.sh_type) {
- elf.SHT_NULL => unreachable,
- elf.SHT_PROGBITS => blk: {
- if (std.mem.eql(u8, name, ".init_array") or std.mem.startsWith(u8, name, ".init_array."))
- break :blk elf.SHT_INIT_ARRAY;
- if (std.mem.eql(u8, name, ".fini_array") or std.mem.startsWith(u8, name, ".fini_array."))
- break :blk elf.SHT_FINI_ARRAY;
- break :blk shdr.sh_type;
- },
- else => shdr.sh_type,
- };
- break :tt @"type";
- };
- const flags = blk: {
- var flags = shdr.sh_flags;
- if (!elf_file.base.isRelocatable()) {
- flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN);
- }
- break :blk switch (@"type") {
- elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE,
- else => flags,
- };
- };
- const out_shndx = elf_file.sectionByName(name) orelse try elf_file.addSection(.{
- .type = @"type",
- .flags = flags,
- .name = try elf_file.insertShString(name),
- });
- return out_shndx;
-}
-
fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
const comp = elf_file.base.comp;
const shdr = self.shdrs.items[index];
@@ -438,15 +386,24 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
.input_section_index = shndx,
.file_index = self.index,
}),
- .fde => try self.fdes.append(allocator, .{
- .offset = data_start + rec.offset,
- .size = rec.size,
- .cie_index = undefined,
- .rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
- .rel_num = @as(u32, @intCast(rel_range.len)),
- .input_section_index = shndx,
- .file_index = self.index,
- }),
+ .fde => {
+ if (rel_range.len == 0) {
+ // No relocs for an FDE means we cannot associate this FDE to an Atom
+ // so we skip it. According to mold source code
+ // (https://github.com/rui314/mold/blob/a3e69502b0eaf1126d6093e8ea5e6fdb95219811/src/input-files.cc#L525-L528)
+ // this can happen for object files built with -r flag by the linker.
+ continue;
+ }
+ try self.fdes.append(allocator, .{
+ .offset = data_start + rec.offset,
+ .size = rec.size,
+ .cie_index = undefined,
+ .rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
+ .rel_num = @as(u32, @intCast(rel_range.len)),
+ .input_section_index = shndx,
+ .file_index = self.index,
+ });
+ },
}
}
@@ -622,7 +579,7 @@ pub fn claimUnresolved(self: *Object, elf_file: *Elf) void {
}
}
-pub fn claimUnresolvedObject(self: *Object, elf_file: *Elf) void {
+pub fn claimUnresolvedRelocatable(self: *Object, elf_file: *Elf) void {
const first_global = self.first_global orelse return;
for (self.globals(), 0..) |*sym, i| {
const esym_index = @as(u32, @intCast(first_global + i));
@@ -985,21 +942,14 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
const shdr = atom_ptr.inputShdr(elf_file);
- _ = try self.initOutputSection(elf_file, shdr);
- }
-}
-
-pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
- for (self.atoms_indexes.items) |atom_index| {
- const atom_ptr = self.atom(atom_index) orelse continue;
- if (!atom_ptr.alive) continue;
- const shdr = atom_ptr.inputShdr(elf_file);
- atom_ptr.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable;
-
- const comp = elf_file.base.comp;
- const gpa = comp.gpa;
- const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
- try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
+ const osec = try elf_file.initOutputSection(.{
+ .name = self.getString(shdr.sh_name),
+ .flags = shdr.sh_flags,
+ .type = shdr.sh_type,
+ });
+ const atom_list = &elf_file.sections.items(.atom_list_2)[osec];
+ atom_list.output_section_index = osec;
+ try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref());
}
}
@@ -1007,9 +957,14 @@ pub fn initRelaSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
+ if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
- const out_shndx = try self.initOutputSection(elf_file, shdr);
+ const out_shndx = try elf_file.initOutputSection(.{
+ .name = self.getString(shdr.sh_name),
+ .flags = shdr.sh_flags,
+ .type = shdr.sh_type,
+ });
const out_shdr = &elf_file.sections.items(.shdr)[out_shndx];
out_shdr.sh_type = elf.SHT_RELA;
out_shdr.sh_addralign = @alignOf(elf.Elf64_Rela);
@@ -1022,10 +977,15 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
+ if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const shndx = blk: {
const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
- break :blk self.initOutputSection(elf_file, shdr) catch unreachable;
+ break :blk elf_file.initOutputSection(.{
+ .name = self.getString(shdr.sh_name),
+ .flags = shdr.sh_flags,
+ .type = shdr.sh_type,
+ }) catch unreachable;
};
const slice = elf_file.sections.slice();
const shdr = &slice.items(.shdr)[shndx];
@@ -1538,12 +1498,12 @@ fn formatComdatGroups(
}
}
-pub fn fmtPath(self: *Object) std.fmt.Formatter(formatPath) {
+pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
return .{ .data = self };
}
fn formatPath(
- object: *Object,
+ object: Object,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@@ -1578,6 +1538,7 @@ const mem = std.mem;
const Allocator = mem.Allocator;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
+const AtomList = @import("AtomList.zig");
const Cie = eh_frame.Cie;
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;
diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig
index 549657800c..0dc4bd9dae 100644
--- a/src/link/Elf/ZigObject.zig
+++ b/src/link/Elf/ZigObject.zig
@@ -51,6 +51,14 @@ debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
eh_frame_section_dirty: bool = false,
+text_index: ?Symbol.Index = null,
+rodata_index: ?Symbol.Index = null,
+data_relro_index: ?Symbol.Index = null,
+data_index: ?Symbol.Index = null,
+bss_index: ?Symbol.Index = null,
+tdata_index: ?Symbol.Index = null,
+tbss_index: ?Symbol.Index = null,
+eh_frame_index: ?Symbol.Index = null,
debug_info_index: ?Symbol.Index = null,
debug_abbrev_index: ?Symbol.Index = null,
debug_aranges_index: ?Symbol.Index = null,
@@ -59,7 +67,6 @@ debug_line_index: ?Symbol.Index = null,
debug_line_str_index: ?Symbol.Index = null,
debug_loclists_index: ?Symbol.Index = null,
debug_rnglists_index: ?Symbol.Index = null,
-eh_frame_index: ?Symbol.Index = null,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@@ -71,6 +78,7 @@ const InitOptions = struct {
};
pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
+ _ = options;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
@@ -88,190 +96,13 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
esym.st_shndx = elf.SHN_ABS;
}
- const fillSection = struct {
- fn fillSection(ef: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
- if (ef.base.isRelocatable()) {
- const off = try ef.findFreeSpace(size, shdr.sh_addralign);
- shdr.sh_offset = off;
- shdr.sh_size = size;
- } else {
- const phdr = ef.phdrs.items[phndx.?];
- shdr.sh_addr = phdr.p_vaddr;
- shdr.sh_offset = phdr.p_offset;
- shdr.sh_size = phdr.p_memsz;
- }
- }
- }.fillSection;
-
- comptime assert(Elf.number_of_zig_segments == 4);
-
- if (!elf_file.base.isRelocatable()) {
- if (elf_file.phdr_zig_load_re_index == null) {
- const filesz = options.program_code_size_hint;
- const off = try elf_file.findFreeSpace(filesz, elf_file.page_size);
- elf_file.phdr_zig_load_re_index = try elf_file.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_size >= 4) 0x4000000 else 0x4000,
- .memsz = filesz,
- .@"align" = elf_file.page_size,
- .flags = elf.PF_X | elf.PF_R | elf.PF_W,
- });
- }
-
- if (elf_file.phdr_zig_load_ro_index == null) {
- const alignment = elf_file.page_size;
- const filesz: u64 = 1024;
- const off = try elf_file.findFreeSpace(filesz, alignment);
- elf_file.phdr_zig_load_ro_index = try elf_file.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_size >= 4) 0xc000000 else 0xa000,
- .memsz = filesz,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
-
- if (elf_file.phdr_zig_load_rw_index == null) {
- const alignment = elf_file.page_size;
- const filesz: u64 = 1024;
- const off = try elf_file.findFreeSpace(filesz, alignment);
- elf_file.phdr_zig_load_rw_index = try elf_file.addPhdr(.{
- .type = elf.PT_LOAD,
- .offset = off,
- .filesz = filesz,
- .addr = if (ptr_size >= 4) 0x10000000 else 0xc000,
- .memsz = filesz,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
-
- if (elf_file.phdr_zig_load_zerofill_index == null) {
- const alignment = elf_file.page_size;
- elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{
- .type = elf.PT_LOAD,
- .addr = if (ptr_size >= 4) 0x14000000 else 0xf000,
- .memsz = 1024,
- .@"align" = alignment,
- .flags = elf.PF_R | elf.PF_W,
- });
- }
- }
-
- if (elf_file.zig_text_section_index == null) {
- elf_file.zig_text_section_index = try elf_file.addSection(.{
- .name = try elf_file.insertShString(".text.zig"),
- .type = elf.SHT_PROGBITS,
- .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .addralign = 1,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_text_section_index.?];
- const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_text_section_index.?];
- try fillSection(elf_file, shdr, options.program_code_size_hint, elf_file.phdr_zig_load_re_index);
- if (elf_file.base.isRelocatable()) {
- _ = try elf_file.addRelaShdr(
- try elf_file.insertShString(".rela.text.zig"),
- elf_file.zig_text_section_index.?,
- );
- } else {
- phndx.* = elf_file.phdr_zig_load_re_index.?;
- }
- }
-
- if (elf_file.zig_data_rel_ro_section_index == null) {
- elf_file.zig_data_rel_ro_section_index = try elf_file.addSection(.{
- .name = try elf_file.insertShString(".data.rel.ro.zig"),
- .type = elf.SHT_PROGBITS,
- .addralign = 1,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_rel_ro_section_index.?];
- const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_rel_ro_section_index.?];
- try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_ro_index);
- if (elf_file.base.isRelocatable()) {
- _ = try elf_file.addRelaShdr(
- try elf_file.insertShString(".rela.data.rel.ro.zig"),
- elf_file.zig_data_rel_ro_section_index.?,
- );
- } else {
- phndx.* = elf_file.phdr_zig_load_ro_index.?;
- }
- }
-
- if (elf_file.zig_data_section_index == null) {
- elf_file.zig_data_section_index = try elf_file.addSection(.{
- .name = try elf_file.insertShString(".data.zig"),
- .type = elf.SHT_PROGBITS,
- .addralign = ptr_size,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = std.math.maxInt(u64),
- });
- const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_section_index.?];
- const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_section_index.?];
- try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_rw_index);
- if (elf_file.base.isRelocatable()) {
- _ = try elf_file.addRelaShdr(
- try elf_file.insertShString(".rela.data.zig"),
- elf_file.zig_data_section_index.?,
- );
- } else {
- phndx.* = elf_file.phdr_zig_load_rw_index.?;
- }
- }
-
- if (elf_file.zig_bss_section_index == null) {
- elf_file.zig_bss_section_index = try elf_file.addSection(.{
- .name = try elf_file.insertShString(".bss.zig"),
- .type = elf.SHT_NOBITS,
- .addralign = ptr_size,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
- .offset = 0,
- });
- const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_bss_section_index.?];
- const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_bss_section_index.?];
- if (elf_file.base.isRelocatable()) {
- shdr.sh_size = 1024;
- } else {
- phndx.* = elf_file.phdr_zig_load_zerofill_index.?;
- const phdr = elf_file.phdrs.items[phndx.*.?];
- shdr.sh_addr = phdr.p_vaddr;
- shdr.sh_size = phdr.p_memsz;
- }
- }
-
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
var dwarf = Dwarf.init(&elf_file.base, v);
- const addSectionSymbol = struct {
- fn addSectionSymbol(
- zig_object: *ZigObject,
- alloc: Allocator,
- name: [:0]const u8,
- alignment: Atom.Alignment,
- shndx: u32,
- ) !Symbol.Index {
- const name_off = try zig_object.addString(alloc, name);
- const index = try zig_object.newSymbolWithAtom(alloc, name_off);
- const sym = zig_object.symbol(index);
- const esym = &zig_object.symtab.items(.elf_sym)[sym.esym_index];
- esym.st_info |= elf.STT_SECTION;
- const atom_ptr = zig_object.atom(sym.ref.index).?;
- atom_ptr.alignment = alignment;
- atom_ptr.output_section_index = shndx;
- return index;
- }
- }.addSectionSymbol;
-
- if (elf_file.debug_str_section_index == null) {
- elf_file.debug_str_section_index = try elf_file.addSection(.{
+ if (self.debug_str_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
@@ -279,51 +110,56 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = 1,
});
self.debug_str_section_dirty = true;
- self.debug_str_index = try addSectionSymbol(self, gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?);
+ self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref;
}
- if (elf_file.debug_info_section_index == null) {
- elf_file.debug_info_section_index = try elf_file.addSection(.{
+ if (self.debug_info_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_info"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_info_section_dirty = true;
- self.debug_info_index = try addSectionSymbol(self, gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?);
+ self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref;
}
- if (elf_file.debug_abbrev_section_index == null) {
- elf_file.debug_abbrev_section_index = try elf_file.addSection(.{
+ if (self.debug_abbrev_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_abbrev"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_abbrev_section_dirty = true;
- self.debug_abbrev_index = try addSectionSymbol(self, gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?);
+ self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref;
}
- if (elf_file.debug_aranges_section_index == null) {
- elf_file.debug_aranges_section_index = try elf_file.addSection(.{
+ if (self.debug_aranges_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_aranges"),
.type = elf.SHT_PROGBITS,
.addralign = 16,
});
self.debug_aranges_section_dirty = true;
- self.debug_aranges_index = try addSectionSymbol(self, gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?);
+ self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref;
}
- if (elf_file.debug_line_section_index == null) {
- elf_file.debug_line_section_index = try elf_file.addSection(.{
+ if (self.debug_line_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_line"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_line_section_dirty = true;
- self.debug_line_index = try addSectionSymbol(self, gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?);
+ self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref;
}
- if (elf_file.debug_line_str_section_index == null) {
- elf_file.debug_line_str_section_index = try elf_file.addSection(.{
+ if (self.debug_line_str_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_line_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
@@ -331,31 +167,34 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = 1,
});
self.debug_line_str_section_dirty = true;
- self.debug_line_str_index = try addSectionSymbol(self, gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?);
+ self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref;
}
- if (elf_file.debug_loclists_section_index == null) {
- elf_file.debug_loclists_section_index = try elf_file.addSection(.{
+ if (self.debug_loclists_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_loclists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_loclists_section_dirty = true;
- self.debug_loclists_index = try addSectionSymbol(self, gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?);
+ self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref;
}
- if (elf_file.debug_rnglists_section_index == null) {
- elf_file.debug_rnglists_section_index = try elf_file.addSection(.{
+ if (self.debug_rnglists_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_rnglists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_rnglists_section_dirty = true;
- self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?);
+ self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref;
}
- if (elf_file.eh_frame_section_index == null) {
- elf_file.eh_frame_section_index = try elf_file.addSection(.{
+ if (self.eh_frame_index == null) {
+ const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@@ -365,7 +204,8 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = ptr_size,
});
self.eh_frame_section_dirty = true;
- self.eh_frame_index = try addSectionSymbol(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?);
+ self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec);
+ elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref;
}
try dwarf.initMetadata();
@@ -404,10 +244,6 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
meta.exports.deinit(allocator);
}
self.uavs.deinit(allocator);
-
- for (self.tls_variables.values()) |*tlv| {
- tlv.deinit(allocator);
- }
self.tls_variables.deinit(allocator);
if (self.dwarf) |*dwarf| {
@@ -499,12 +335,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const sym = self.symbol(sym_index);
const atom_ptr = self.atom(sym.ref.index).?;
if (!atom_ptr.alive) continue;
- const shndx = sym.outputShndx(elf_file).?;
- const shdr = elf_file.sections.items(.shdr)[shndx];
- const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
- esym.st_size = shdr.sh_size;
- atom_ptr.size = shdr.sh_size;
- atom_ptr.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
log.debug("parsing relocs in {s}", .{sym.name(elf_file)});
@@ -665,14 +495,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
}
}
}
-
- if (elf_file.base.isRelocatable() and relocs.items.len > 0) {
- const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{elf_file.getShString(shdr.sh_name)});
- defer gpa.free(rela_sect_name);
- if (elf_file.sectionByName(rela_sect_name) == null) {
- _ = try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), shndx);
- }
- }
}
self.debug_abbrev_section_dirty = false;
@@ -835,7 +657,7 @@ pub fn claimUnresolved(self: *ZigObject, elf_file: *Elf) void {
}
}
-pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void {
+pub fn claimUnresolvedRelocatable(self: ZigObject, elf_file: *Elf) void {
for (self.global_symbols.items, 0..) |index, i| {
const global = &self.symbols.items[index];
const esym = self.symtab.items(.elf_sym)[index];
@@ -990,21 +812,48 @@ pub fn writeAr(self: ZigObject, writer: anytype) !void {
try writer.writeAll(self.data.items);
}
+pub fn initRelaSections(self: *ZigObject, elf_file: *Elf) !void {
+ const gpa = elf_file.base.comp.gpa;
+ for (self.atoms_indexes.items) |atom_index| {
+ const atom_ptr = self.atom(atom_index) orelse continue;
+ if (!atom_ptr.alive) continue;
+ if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
+ const rela_shndx = atom_ptr.relocsShndx() orelse continue;
+ // TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
+ if (self.relocs.items[rela_shndx].items.len == 0) continue;
+ const out_shndx = atom_ptr.output_section_index;
+ const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
+ if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
+ const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{
+ elf_file.getShString(out_shdr.sh_name),
+ });
+ defer gpa.free(rela_sect_name);
+ _ = elf_file.sectionByName(rela_sect_name) orelse
+ try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), out_shndx);
+ }
+}
+
pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
+ const gpa = elf_file.base.comp.gpa;
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
+ if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const rela_shndx = atom_ptr.relocsShndx() orelse continue;
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
const out_shndx = atom_ptr.output_section_index;
const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
- const out_rela_shndx = for (elf_file.sections.items(.shdr), 0..) |out_rela_shdr, out_rela_shndx| {
- if (out_rela_shdr.sh_type == elf.SHT_RELA and out_rela_shdr.sh_info == out_shndx) break out_rela_shndx;
- } else unreachable;
+ const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{
+ elf_file.getShString(out_shdr.sh_name),
+ });
+ defer gpa.free(rela_sect_name);
+ const out_rela_shndx = elf_file.sectionByName(rela_sect_name).?;
+ const out_rela_shdr = &elf_file.sections.items(.shdr)[out_rela_shndx];
+ out_rela_shdr.sh_info = out_shndx;
+ out_rela_shdr.sh_link = elf_file.symtab_section_index.?;
const atom_list = &elf_file.sections.items(.atom_list)[out_rela_shndx];
- const gpa = elf_file.base.comp.gpa;
try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@@ -1075,15 +924,7 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const gpa = elf_file.base.comp.gpa;
const atom_ptr = self.atom(atom_index).?;
- const shdr = &elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
-
- if (shdr.sh_flags & elf.SHF_TLS != 0) {
- const tlv = self.tls_variables.get(atom_index).?;
- const code = try gpa.dupe(u8, tlv.code);
- return code;
- }
-
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
+ const file_offset = atom_ptr.offset(elf_file);
const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
@@ -1168,6 +1009,20 @@ pub fn lowerUav(
return .{ .mcv = .{ .load_symbol = metadata.symbol_index } };
}
+ const osec = if (self.data_relro_index) |sym_index|
+ self.symbol(sym_index).atom(elf_file).?.output_section_index
+ else osec: {
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data.rel.ro"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = std.math.maxInt(u64),
+ });
+ self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec);
+ break :osec osec;
+ };
+
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(uav),
@@ -1178,7 +1033,7 @@ pub fn lowerUav(
name,
val,
uav_alignment,
- elf_file.zig_data_rel_ro_section_index.?,
+ osec,
src_loc,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -1270,6 +1125,27 @@ pub fn getOrCreateMetadataForNav(
return gop.value_ptr.symbol_index;
}
+// FIXME: we always create an atom to basically store size and alignment, however, this is only true for
+// sections that have a single atom like the debug sections. It would be a better solution to decouple this
+// concept from the atom, maybe.
+fn addSectionSymbol(
+ self: *ZigObject,
+ allocator: Allocator,
+ name: [:0]const u8,
+ alignment: Atom.Alignment,
+ shndx: u32,
+) !Symbol.Index {
+ const name_off = try self.addString(allocator, name);
+ const index = try self.newSymbolWithAtom(allocator, name_off);
+ const sym = self.symbol(index);
+ const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
+ esym.st_info |= elf.STT_SECTION;
+ const atom_ptr = self.atom(sym.ref.index).?;
+ atom_ptr.alignment = alignment;
+ atom_ptr.output_section_index = shndx;
+ return index;
+}
+
fn getNavShdrIndex(
self: *ZigObject,
elf_file: *Elf,
@@ -1278,10 +1154,24 @@ fn getNavShdrIndex(
sym_index: Symbol.Index,
code: []const u8,
) error{OutOfMemory}!u32 {
+ const gpa = elf_file.base.comp.gpa;
+ const ptr_size = elf_file.ptrWidthBytes();
const ip = &zcu.intern_pool;
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const nav_val = zcu.navValue(nav_index);
- if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return elf_file.zig_text_section_index.?;
+ if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) {
+ if (self.text_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .type = elf.SHT_PROGBITS,
+ .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .name = try elf_file.insertShString(".text"),
+ .addralign = 1,
+ .offset = std.math.maxInt(u64),
+ });
+ self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
+ return osec;
+ }
const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| .{ false, variable.is_threadlocal, variable.init },
.@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none },
@@ -1292,30 +1182,107 @@ fn getNavShdrIndex(
const is_bss = !has_relocs and for (code) |byte| {
if (byte != 0) break false;
} else true;
- if (is_bss) return elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{
- .type = elf.SHT_NOBITS,
+ if (is_bss) {
+ if (self.tbss_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".tbss"),
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
+ .type = elf.SHT_NOBITS,
+ .addralign = 1,
+ });
+ self.tbss_index = try self.addSectionSymbol(gpa, ".tbss", .@"1", osec);
+ return osec;
+ }
+ if (self.tdata_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
- .name = try elf_file.insertShString(".tbss"),
+ .name = try elf_file.insertShString(".tdata"),
+ .addralign = 1,
.offset = std.math.maxInt(u64),
});
- return elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{
+ self.tdata_index = try self.addSectionSymbol(gpa, ".tdata", .@"1", osec);
+ return osec;
+ }
+ if (is_const) {
+ if (self.data_relro_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data.rel.ro"),
.type = elf.SHT_PROGBITS,
- .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
- .name = try elf_file.insertShString(".tdata"),
+ .addralign = 1,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
+ self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec);
+ return osec;
}
- if (is_const) return elf_file.zig_data_rel_ro_section_index.?;
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
return switch (zcu.navFileScope(nav_index).mod.optimize_mode) {
- .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?,
- .ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?,
+ .Debug, .ReleaseSafe => {
+ if (self.data_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = ptr_size,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = std.math.maxInt(u64),
+ });
+ self.data_index = try self.addSectionSymbol(
+ gpa,
+ ".data",
+ Atom.Alignment.fromNonzeroByteUnits(ptr_size),
+ osec,
+ );
+ return osec;
+ },
+ .ReleaseFast, .ReleaseSmall => {
+ if (self.bss_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .type = elf.SHT_NOBITS,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .name = try elf_file.insertShString(".bss"),
+ .addralign = 1,
+ });
+ self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec);
+ return osec;
+ },
};
const is_bss = !has_relocs and for (code) |byte| {
if (byte != 0) break false;
} else true;
- if (is_bss) return elf_file.zig_bss_section_index.?;
- return elf_file.zig_data_section_index.?;
+ if (is_bss) {
+ if (self.bss_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .type = elf.SHT_NOBITS,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .name = try elf_file.insertShString(".bss"),
+ .addralign = 1,
+ });
+ self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec);
+ return osec;
+ }
+ if (self.data_index) |symbol_index|
+ return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".data"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = ptr_size,
+ .flags = elf.SHF_ALLOC | elf.SHF_WRITE,
+ .offset = std.math.maxInt(u64),
+ });
+ self.data_index = try self.addSectionSymbol(
+ gpa,
+ ".data",
+ Atom.Alignment.fromNonzeroByteUnits(ptr_size),
+ osec,
+ );
+ return osec;
}
fn updateNavCode(
@@ -1362,19 +1329,18 @@ fn updateNavCode(
const capacity = atom_ptr.capacity(elf_file);
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
if (need_realloc) {
- try atom_ptr.grow(elf_file);
+ try self.growAtom(atom_ptr, elf_file);
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
if (old_vaddr != atom_ptr.value) {
sym.value = 0;
esym.st_value = 0;
}
} else if (code.len < old_size) {
- atom_ptr.shrink(elf_file);
+ // TODO shrink section size
}
} else {
- try atom_ptr.allocate(elf_file);
+ try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, sym_index);
-
sym.value = 0;
esym.st_value = 0;
}
@@ -1404,7 +1370,7 @@ fn updateNavCode(
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
+ const file_offset = atom_ptr.offset(elf_file);
try elf_file.base.file.?.pwriteAll(code, file_offset);
log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
@@ -1433,15 +1399,11 @@ fn updateTlv(
const atom_ptr = sym.atom(elf_file).?;
const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip));
- sym.value = 0;
- sym.name_offset = name_offset;
-
- atom_ptr.output_section_index = shndx;
atom_ptr.alive = true;
atom_ptr.name_offset = name_offset;
+ atom_ptr.output_section_index = shndx;
sym.name_offset = name_offset;
- esym.st_value = 0;
esym.st_name = name_offset;
esym.st_info = elf.STT_TLS;
esym.st_size = code.len;
@@ -1449,21 +1411,25 @@ fn updateTlv(
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
- self.navs.getPtr(nav_index).?.allocated = true;
+ const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
+ assert(!gop.found_existing); // TODO incremental updates
- {
- const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
- assert(!gop.found_existing); // TODO incremental updates
- gop.value_ptr.* = .{ .symbol_index = sym_index };
+ try self.allocateAtom(atom_ptr, elf_file);
+ sym.value = 0;
+ esym.st_value = 0;
- // We only store the data for the TLV if it's non-zerofill.
- if (elf_file.sections.items(.shdr)[shndx].sh_type != elf.SHT_NOBITS) {
- gop.value_ptr.code = try gpa.dupe(u8, code);
- }
- }
+ self.navs.getPtr(nav_index).?.allocated = true;
- const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
- try atom_list.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index });
+ const shdr = elf_file.sections.items(.shdr)[shndx];
+ if (shdr.sh_type != elf.SHT_NOBITS) {
+ const file_offset = atom_ptr.offset(elf_file);
+ try elf_file.base.file.?.pwriteAll(code, file_offset);
+ log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
+ atom_ptr.name(elf_file),
+ file_offset,
+ file_offset + code.len,
+ });
+ }
}
pub fn updateFunc(
@@ -1558,6 +1524,19 @@ pub fn updateFunc(
self.symbol(sym_index).name(elf_file),
});
defer gpa.free(name);
+ const osec = if (self.text_index) |sect_sym_index|
+ self.symbol(sect_sym_index).atom(elf_file).?.output_section_index
+ else osec: {
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".text"),
+ .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ .offset = std.math.maxInt(u64),
+ });
+ self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
+ break :osec osec;
+ };
const name_off = try self.addString(gpa, name);
const tr_size = trampolineSize(elf_file.getTarget().cpu.arch);
const tr_sym_index = try self.newSymbolWithAtom(gpa, name_off);
@@ -1569,7 +1548,7 @@ pub fn updateFunc(
tr_atom_ptr.value = old_rva;
tr_atom_ptr.alive = true;
tr_atom_ptr.alignment = old_alignment;
- tr_atom_ptr.output_section_index = elf_file.zig_text_section_index.?;
+ tr_atom_ptr.output_section_index = osec;
tr_atom_ptr.size = tr_size;
const target_sym = self.symbol(sym_index);
target_sym.addExtra(.{ .trampoline = tr_sym_index }, elf_file);
@@ -1723,8 +1702,32 @@ fn updateLazySymbol(
};
const output_section_index = switch (sym.kind) {
- .code => elf_file.zig_text_section_index.?,
- .const_data => elf_file.zig_data_rel_ro_section_index.?,
+ .code => if (self.text_index) |sym_index|
+ self.symbol(sym_index).atom(elf_file).?.output_section_index
+ else osec: {
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".text"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .offset = std.math.maxInt(u64),
+ });
+ self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
+ break :osec osec;
+ },
+ .const_data => if (self.rodata_index) |sym_index|
+ self.symbol(sym_index).atom(elf_file).?.output_section_index
+ else osec: {
+ const osec = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".rodata"),
+ .type = elf.SHT_PROGBITS,
+ .addralign = 1,
+ .flags = elf.SHF_ALLOC,
+ .offset = std.math.maxInt(u64),
+ });
+ self.rodata_index = try self.addSectionSymbol(gpa, ".rodata", .@"1", osec);
+ break :osec osec;
+ },
};
const local_sym = self.symbol(symbol_index);
local_sym.name_offset = name_str_index;
@@ -1739,15 +1742,13 @@ fn updateLazySymbol(
atom_ptr.size = code.len;
atom_ptr.output_section_index = output_section_index;
- try atom_ptr.allocate(elf_file);
+ try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, symbol_index);
local_sym.value = 0;
local_esym.st_value = 0;
- const shdr = elf_file.sections.items(.shdr)[output_section_index];
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
- try elf_file.base.file.?.pwriteAll(code, file_offset);
+ try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
}
const LowerConstResult = union(enum) {
@@ -1797,13 +1798,10 @@ fn lowerConst(
atom_ptr.size = code.len;
atom_ptr.output_section_index = output_section_index;
- try atom_ptr.allocate(elf_file);
- // TODO rename and re-audit this method
+ try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, sym_index);
- const shdr = elf_file.sections.items(.shdr)[output_section_index];
- const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
- try elf_file.base.file.?.pwriteAll(code, file_offset);
+ try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
return .{ .ok = sym_index };
}
@@ -1965,8 +1963,7 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
const atom_ptr = tr_sym.atom(elf_file).?;
- const shdr = elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
- const fileoff = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
+ const fileoff = atom_ptr.offset(elf_file);
const source_addr = tr_sym.address(.{}, elf_file);
const target_addr = target.address(.{ .trampoline = false }, elf_file);
var buf: [max_trampoline_len]u8 = undefined;
@@ -1998,6 +1995,80 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
}
}
+fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
+ const alloc_res = try elf_file.allocateChunk(.{
+ .shndx = atom_ptr.output_section_index,
+ .size = atom_ptr.size,
+ .alignment = atom_ptr.alignment,
+ });
+ atom_ptr.value = @intCast(alloc_res.value);
+
+ const slice = elf_file.sections.slice();
+ const shdr = &slice.items(.shdr)[atom_ptr.output_section_index];
+ const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index];
+
+ const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
+ placement_atom.nextAtom(elf_file) == null
+ else
+ true;
+ if (expand_section) {
+ last_atom_ref.* = atom_ptr.ref();
+ if (self.dwarf) |_| {
+ // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
+ // range of the compilation unit. When we expand the text section, this range changes,
+ // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
+ self.debug_info_section_dirty = true;
+ // This becomes dirty for the same reason. We could potentially make this more
+ // fine-grained with the addition of support for more compilation units. It is planned to
+ // model each package as a different compilation unit.
+ self.debug_aranges_section_dirty = true;
+ self.debug_rnglists_section_dirty = true;
+ }
+ }
+ shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?);
+
+ const sect_atom_ptr = for ([_]?Symbol.Index{
+ self.text_index,
+ self.rodata_index,
+ self.data_relro_index,
+ self.data_index,
+ self.tdata_index,
+ }) |maybe_sym_index| {
+ const sect_sym_index = maybe_sym_index orelse continue;
+ const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?;
+ if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr;
+ } else null;
+ if (sect_atom_ptr) |sap| {
+ sap.size = shdr.sh_size;
+ sap.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
+ }
+
+ // This function can also reallocate an atom.
+ // In this case we need to "unplug" it from its previous location before
+ // plugging it in to its new location.
+ if (atom_ptr.prevAtom(elf_file)) |prev| {
+ prev.next_atom_ref = atom_ptr.next_atom_ref;
+ }
+ if (atom_ptr.nextAtom(elf_file)) |next| {
+ next.prev_atom_ref = atom_ptr.prev_atom_ref;
+ }
+
+ if (elf_file.atom(alloc_res.placement)) |big_atom| {
+ atom_ptr.prev_atom_ref = alloc_res.placement;
+ atom_ptr.next_atom_ref = big_atom.next_atom_ref;
+ big_atom.next_atom_ref = atom_ptr.ref();
+ } else {
+ atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 };
+ atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 };
+ }
+}
+
+fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
+ if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) {
+ try self.allocateAtom(atom_ptr, elf_file);
+ }
+}
+
pub fn asFile(self: *ZigObject) File {
return .{ .zig_object = self };
}
@@ -2271,7 +2342,7 @@ const AtomList = std.ArrayListUnmanaged(Atom.Index);
const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata);
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
-const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable);
+const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, void);
const x86_64 = struct {
fn writeTrampolineCode(source_addr: i64, target_addr: i64, buf: *[max_trampoline_len]u8) ![]u8 {
diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig
index d660486010..b520c94aaf 100644
--- a/src/link/Elf/eh_frame.zig
+++ b/src/link/Elf/eh_frame.zig
@@ -233,7 +233,10 @@ pub fn calcEhFrameSize(elf_file: *Elf) !usize {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
- var offset: usize = 0;
+ var offset: usize = if (elf_file.zigObjectPtr()) |zo| blk: {
+ const sym = zo.symbol(zo.eh_frame_index orelse break :blk 0);
+ break :blk math.cast(usize, sym.atom(elf_file).?.size) orelse return error.Overflow;
+ } else 0;
var cies = std.ArrayList(Cie).init(gpa);
defer cies.deinit();
@@ -288,6 +291,13 @@ pub fn calcEhFrameHdrSize(elf_file: *Elf) usize {
pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
var count: usize = 0;
+ if (elf_file.zigObjectPtr()) |zo| zo: {
+ const sym_index = zo.eh_frame_index orelse break :zo;
+ const sym = zo.symbol(sym_index);
+ const atom_ptr = zo.atom(sym.ref.index).?;
+ if (!atom_ptr.alive) break :zo;
+ count += atom_ptr.relocs(elf_file).len;
+ }
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
for (object.cies.items) |cie| {
@@ -386,7 +396,7 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
if (has_reloc_errors) return error.RelocFailure;
}
-pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
+pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void {
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@@ -416,9 +426,8 @@ pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
}
}
-fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
+fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
const cpu_arch = elf_file.getTarget().cpu.arch;
- const r_offset = rec.address(elf_file) + rel.r_offset - rec.offset;
const r_type = rel.r_type();
var r_addend = rel.r_addend;
var r_sym: u32 = 0;
@@ -452,6 +461,19 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
elf_file.sections.items(.shdr)[elf_file.eh_frame_section_index.?].sh_addr,
});
+ if (elf_file.zigObjectPtr()) |zo| zo: {
+ const sym_index = zo.eh_frame_index orelse break :zo;
+ const sym = zo.symbol(sym_index);
+ const atom_ptr = zo.atom(sym.ref.index).?;
+ if (!atom_ptr.alive) break :zo;
+ for (atom_ptr.relocs(elf_file)) |rel| {
+ const ref = zo.resolveSymbol(rel.r_sym(), elf_file);
+ const target = elf_file.symbol(ref).?;
+ const out_rel = emitReloc(elf_file, rel.r_offset, target, rel);
+ try writer.writeStruct(out_rel);
+ }
+ }
+
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@@ -460,7 +482,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (cie.relocs(elf_file)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
- const out_rel = emitReloc(elf_file, cie, sym, rel);
+ const r_offset = cie.address(elf_file) + rel.r_offset - cie.offset;
+ const out_rel = emitReloc(elf_file, r_offset, sym, rel);
try writer.writeStruct(out_rel);
}
}
@@ -470,7 +493,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (fde.relocs(elf_file)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
- const out_rel = emitReloc(elf_file, fde, sym, rel);
+ const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset;
+ const out_rel = emitReloc(elf_file, r_offset, sym, rel);
try writer.writeStruct(out_rel);
}
}
diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig
index 8f5ea8e25b..5fc0d5790b 100644
--- a/src/link/Elf/relocatable.zig
+++ b/src/link/Elf/relocatable.zig
@@ -18,7 +18,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
}
for (positionals.items) |obj| {
- parsePositional(elf_file, obj.path) catch |err| switch (err) {
+ parsePositionalStaticLib(elf_file, obj.path) catch |err| switch (err) {
error.MalformedObject,
error.MalformedArchive,
error.InvalidMachineType,
@@ -38,17 +38,12 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
// First, we flush relocatable object file generated with our backends.
if (elf_file.zigObjectPtr()) |zig_object| {
try zig_object.resolveSymbols(elf_file);
+ elf_file.markEhFrameAtomsDead();
try elf_file.addCommentString();
try elf_file.finalizeMergeSections();
- zig_object.claimUnresolvedObject(elf_file);
+ zig_object.claimUnresolvedRelocatable(elf_file);
- for (elf_file.merge_sections.items) |*msec| {
- if (msec.finalized_subsections.items.len == 0) continue;
- try msec.initOutputSection(elf_file);
- }
-
- try elf_file.initSymtab();
- try elf_file.initShStrtab();
+ try initSections(elf_file);
try elf_file.sortShdrs();
try zig_object.addAtomsToRelaSections(elf_file);
try elf_file.updateMergeSectionSizes();
@@ -208,7 +203,6 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
- try object.addAtomsToOutputSections(elf_file);
try object.addAtomsToRelaSections(elf_file);
}
try elf_file.updateMergeSectionSizes();
@@ -230,17 +224,17 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
if (elf_file.base.hasErrors()) return error.FlushFailure;
}
-fn parsePositional(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
+fn parsePositionalStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
if (try Object.isObject(path)) {
- try parseObject(elf_file, path);
+ try parseObjectStaticLib(elf_file, path);
} else if (try Archive.isArchive(path)) {
- try parseArchive(elf_file, path);
+ try parseArchiveStaticLib(elf_file, path);
} else return error.UnknownFileType;
// TODO: should we check for LD script?
// Actually, should we even unpack an archive?
}
-fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
+fn parseObjectStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try std.fs.cwd().openFile(path, .{});
const fh = try elf_file.addFileHandle(handle);
@@ -257,7 +251,7 @@ fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
try object.parseAr(elf_file);
}
-fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
+fn parseArchiveStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try std.fs.cwd().openFile(path, .{});
const fh = try elf_file.addFileHandle(handle);
@@ -281,14 +275,17 @@ fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
fn claimUnresolved(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zig_object| {
- zig_object.claimUnresolvedObject(elf_file);
+ zig_object.claimUnresolvedRelocatable(elf_file);
}
for (elf_file.objects.items) |index| {
- elf_file.file(index).?.object.claimUnresolvedObject(elf_file);
+ elf_file.file(index).?.object.claimUnresolvedRelocatable(elf_file);
}
}
fn initSections(elf_file: *Elf) !void {
+ if (elf_file.zigObjectPtr()) |zo| {
+ try zo.initRelaSections(elf_file);
+ }
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
try object.initOutputSections(elf_file);
@@ -300,12 +297,17 @@ fn initSections(elf_file: *Elf) !void {
try msec.initOutputSection(elf_file);
}
- const needs_eh_frame = for (elf_file.objects.items) |index| {
- if (elf_file.file(index).?.object.cies.items.len > 0) break true;
- } else false;
+ const needs_eh_frame = blk: {
+ if (elf_file.zigObjectPtr()) |zo|
+ if (zo.eh_frame_index != null) break :blk true;
+ break :blk for (elf_file.objects.items) |index| {
+ if (elf_file.file(index).?.object.cies.items.len > 0) break true;
+ } else false;
+ };
if (needs_eh_frame) {
if (elf_file.eh_frame_section_index == null) {
- elf_file.eh_frame_section_index = try elf_file.addSection(.{
+ elf_file.eh_frame_section_index = elf_file.sectionByName(".eh_frame") orelse
+ try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@@ -316,7 +318,8 @@ fn initSections(elf_file: *Elf) !void {
.offset = std.math.maxInt(u64),
});
}
- elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr(
+ elf_file.eh_frame_rela_section_index = elf_file.sectionByName(".rela.eh_frame") orelse
+ try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.eh_frame"),
elf_file.eh_frame_section_index.?,
);
@@ -351,36 +354,28 @@ fn initComdatGroups(elf_file: *Elf) !void {
fn updateSectionSizes(elf_file: *Elf) !void {
const slice = elf_file.sections.slice();
+ for (slice.items(.atom_list_2)) |*atom_list| {
+ if (atom_list.atoms.items.len == 0) continue;
+ atom_list.updateSize(elf_file);
+ try atom_list.allocate(elf_file);
+ }
+
for (slice.items(.shdr), 0..) |*shdr, shndx| {
const atom_list = slice.items(.atom_list)[shndx];
- if (shdr.sh_type != elf.SHT_RELA) {
- for (atom_list.items) |ref| {
- const atom_ptr = elf_file.atom(ref) orelse continue;
- if (!atom_ptr.alive) continue;
- const offset = atom_ptr.alignment.forward(shdr.sh_size);
- const padding = offset - shdr.sh_size;
- atom_ptr.value = @intCast(offset);
- shdr.sh_size += padding + atom_ptr.size;
- shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
- }
- } else {
- for (atom_list.items) |ref| {
- const atom_ptr = elf_file.atom(ref) orelse continue;
- if (!atom_ptr.alive) continue;
- const relocs = atom_ptr.relocs(elf_file);
- shdr.sh_size += shdr.sh_entsize * relocs.len;
- }
-
- if (shdr.sh_size == 0) shdr.sh_offset = 0;
+ if (shdr.sh_type != elf.SHT_RELA) continue;
+ if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue;
+ for (atom_list.items) |ref| {
+ const atom_ptr = elf_file.atom(ref) orelse continue;
+ if (!atom_ptr.alive) continue;
+ const relocs = atom_ptr.relocs(elf_file);
+ shdr.sh_size += shdr.sh_entsize * relocs.len;
}
+
+ if (shdr.sh_size == 0) shdr.sh_offset = 0;
}
if (elf_file.eh_frame_section_index) |index| {
- slice.items(.shdr)[index].sh_size = existing_size: {
- const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
- const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
- break :existing_size sym.atom(elf_file).?.size;
- } + try eh_frame.calcEhFrameSize(elf_file);
+ slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
}
if (elf_file.eh_frame_rela_section_index) |index| {
const shdr = &slice.items(.shdr)[index];
@@ -405,7 +400,7 @@ fn updateComdatGroupsSizes(elf_file: *Elf) void {
/// Allocates alloc sections when merging relocatable objects files together.
fn allocateAllocSections(elf_file: *Elf) !void {
- for (elf_file.sections.items(.shdr)) |*shdr| {
+ for (elf_file.sections.items(.shdr), 0..) |*shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) {
@@ -416,6 +411,34 @@ fn allocateAllocSections(elf_file: *Elf) !void {
if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
+
+ if (elf_file.zigObjectPtr()) |zo| blk: {
+ const existing_size = for ([_]?Symbol.Index{
+ zo.text_index,
+ zo.rodata_index,
+ zo.data_relro_index,
+ zo.data_index,
+ zo.tdata_index,
+ zo.eh_frame_index,
+ }) |maybe_sym_index| {
+ const sect_sym_index = maybe_sym_index orelse continue;
+ const sect_atom_ptr = zo.symbol(sect_sym_index).atom(elf_file).?;
+ if (sect_atom_ptr.output_section_index == shndx) break sect_atom_ptr.size;
+ } else break :blk;
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ elf_file.getShString(shdr.sh_name),
+ shdr.sh_offset,
+ new_offset,
+ });
+ const amt = try elf_file.base.file.?.copyRangeAll(
+ shdr.sh_offset,
+ elf_file.base.file.?,
+ new_offset,
+ existing_size,
+ );
+ if (amt != existing_size) return error.InputOutput;
+ }
+
shdr.sh_offset = new_offset;
shdr.sh_size = needed_size;
}
@@ -424,73 +447,15 @@ fn allocateAllocSections(elf_file: *Elf) !void {
fn writeAtoms(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
- const slice = elf_file.sections.slice();
-
- // TODO iterate over `output_sections` directly
- for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
- if (shdr.sh_type == elf.SHT_NULL) continue;
- if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (shdr.sh_type == elf.SHT_RELA) continue;
- if (atom_list.items.len == 0) continue;
-
- log.debug("writing atoms in '{s}' section", .{elf_file.getShString(shdr.sh_name)});
-
- // TODO really, really handle debug section separately
- const base_offset = if (elf_file.isDebugSection(@intCast(shndx))) blk: {
- const zo = elf_file.zigObjectPtr().?;
- break :blk for ([_]Symbol.Index{
- zo.debug_info_index.?,
- zo.debug_abbrev_index.?,
- zo.debug_aranges_index.?,
- zo.debug_str_index.?,
- zo.debug_line_index.?,
- zo.debug_line_str_index.?,
- zo.debug_loclists_index.?,
- zo.debug_rnglists_index.?,
- }) |sym_index| {
- const sym = zo.symbol(sym_index);
- const atom_ptr = sym.atom(elf_file).?;
- if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
- } else 0;
- } else 0;
- const sh_offset = shdr.sh_offset + base_offset;
- const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
-
- const buffer = try gpa.alloc(u8, sh_size);
- defer gpa.free(buffer);
- const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and
- shdr.sh_flags & elf.SHF_EXECINSTR != 0)
- 0xcc // int3
- else
- 0;
- @memset(buffer, padding_byte);
-
- for (atom_list.items) |ref| {
- const atom_ptr = elf_file.atom(ref).?;
- assert(atom_ptr.alive);
-
- const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
- return error.Overflow;
- const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
- log.debug("writing atom({}) from 0x{x} to 0x{x}", .{
- ref,
- sh_offset + offset,
- sh_offset + offset + size,
- });
-
- // TODO decompress directly into provided buffer
- const out_code = buffer[offset..][0..size];
- const in_code = switch (atom_ptr.file(elf_file).?) {
- .object => |x| try x.codeDecompressAlloc(elf_file, ref.index),
- .zig_object => |x| try x.codeAlloc(elf_file, ref.index),
- else => unreachable,
- };
- defer gpa.free(in_code);
- @memcpy(out_code, in_code);
- }
+ var buffer = std.ArrayList(u8).init(gpa);
+ defer buffer.deinit();
- try elf_file.base.file.?.pwriteAll(buffer, sh_offset);
+ const slice = elf_file.sections.slice();
+ for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
+ if (shdr.sh_type == elf.SHT_NOBITS) continue;
+ if (atom_list.atoms.items.len == 0) continue;
+ try atom_list.writeRelocatable(&buffer, elf_file);
}
}
@@ -498,9 +463,10 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const slice = elf_file.sections.slice();
- for (slice.items(.shdr), slice.items(.atom_list)) |shdr, atom_list| {
+ for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type != elf.SHT_RELA) continue;
if (atom_list.items.len == 0) continue;
+ if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue;
const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
return error.Overflow;
@@ -542,7 +508,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
- try eh_frame.writeEhFrameObject(elf_file, buffer.writer());
+ try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer());
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
shdr.sh_offset + existing_size,
shdr.sh_offset + sh_size,
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 8ae63df650..f5c83c98e2 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -277,6 +277,8 @@ const Writer = struct {
.opt_eu_base_ptr_init,
.restore_err_ret_index_unconditional,
.restore_err_ret_index_fn_entry,
+ .try_operand_ty,
+ .try_ref_operand_ty,
=> try self.writeUnNode(stream, inst),
.ref,
@@ -461,6 +463,8 @@ const Writer = struct {
.field_val,
.field_ptr,
+ .decl_literal,
+ .decl_literal_no_coerce,
=> try self.writePlNodeField(stream, inst),
.field_ptr_named,
diff --git a/src/target.zig b/src/target.zig
index 9147347f93..bc2ba4c831 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -49,10 +49,10 @@ pub fn requiresPIC(target: std.Target, linking_libc: bool) bool {
(target.abi == .ohos and target.cpu.arch == .aarch64);
}
-pub fn usesLargePIC(target: std.Target) bool {
+pub fn picLevel(target: std.Target) u32 {
// MIPS always uses PIC level 1; other platforms vary in their default PIC levels, but they
// support both level 1 and 2, in which case we prefer 2.
- return !target.cpu.arch.isMIPS();
+ return if (target.cpu.arch.isMIPS()) 1 else 2;
}
/// This is not whether the target supports Position Independent Code, but whether the -fPIC
diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp
index dd0243b660..351b28df6b 100644
--- a/src/zig_llvm.cpp
+++ b/src/zig_llvm.cpp
@@ -435,20 +435,6 @@ void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv) {
cl::ParseCommandLineOptions(argc, argv);
}
-void ZigLLVMSetModulePICLevel(LLVMModuleRef module, bool big) {
- unwrap(module)->setPICLevel(big ? PICLevel::Level::BigPIC : PICLevel::Level::SmallPIC);
-}
-
-void ZigLLVMSetModulePIELevel(LLVMModuleRef module, bool large) {
- unwrap(module)->setPIELevel(large ? PIELevel::Level::Large : PIELevel::Level::Small);
-}
-
-void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model) {
- bool JIT;
- unwrap(module)->setCodeModel(*unwrap(code_model, JIT));
- assert(!JIT);
-}
-
bool ZigLLVMWriteImportLibrary(const char *def_path, const ZigLLVM_ArchType arch,
const char *output_lib_path, bool kill_at)
{
diff --git a/src/zig_llvm.h b/src/zig_llvm.h
index a88183c2cc..e831e9cf8b 100644
--- a/src/zig_llvm.h
+++ b/src/zig_llvm.h
@@ -155,10 +155,6 @@ enum ZigLLVM_CallingConv {
ZigLLVM_MaxID = 1023,
};
-ZIG_EXTERN_C void ZigLLVMSetModulePICLevel(LLVMModuleRef module, bool big);
-ZIG_EXTERN_C void ZigLLVMSetModulePIELevel(LLVMModuleRef module, bool large);
-ZIG_EXTERN_C void ZigLLVMSetModuleCodeModel(LLVMModuleRef module, LLVMCodeModel code_model);
-
ZIG_EXTERN_C void ZigLLVMParseCommandLineOptions(size_t argc, const char *const *argv);
// synchronize with llvm/include/ADT/Triple.h::ArchType
diff --git a/test/behavior.zig b/test/behavior.zig
index 650be5a91a..f0d05a146a 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -21,6 +21,7 @@ test {
_ = @import("behavior/cast_int.zig");
_ = @import("behavior/comptime_memory.zig");
_ = @import("behavior/const_slice_child.zig");
+ _ = @import("behavior/decl_literals.zig");
_ = @import("behavior/decltest.zig");
_ = @import("behavior/duplicated_test_names.zig");
_ = @import("behavior/defer.zig");
diff --git a/test/behavior/cast_int.zig b/test/behavior/cast_int.zig
index 9faa123a62..10bb445ca7 100644
--- a/test/behavior/cast_int.zig
+++ b/test/behavior/cast_int.zig
@@ -164,8 +164,6 @@ const Piece = packed struct {
};
test "load non byte-sized optional value" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
// Originally reported at https://github.com/ziglang/zig/issues/14200
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@@ -181,8 +179,6 @@ test "load non byte-sized optional value" {
}
test "load non byte-sized value in struct" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
diff --git a/test/behavior/decl_literals.zig b/test/behavior/decl_literals.zig
new file mode 100644
index 0000000000..7956689122
--- /dev/null
+++ b/test/behavior/decl_literals.zig
@@ -0,0 +1,38 @@
+const builtin = @import("builtin");
+const std = @import("std");
+const expect = std.testing.expect;
+
+test "decl literal" {
+ const S = struct {
+ x: u32,
+ const foo: @This() = .{ .x = 123 };
+ };
+
+ const val: S = .foo;
+ try expect(val.x == 123);
+}
+
+test "call decl literal" {
+ const S = struct {
+ x: u32,
+ fn init() @This() {
+ return .{ .x = 123 };
+ }
+ };
+
+ const val: S = .init();
+ try expect(val.x == 123);
+}
+
+test "call decl literal with error union" {
+ const S = struct {
+ x: u32,
+ fn init(err: bool) !@This() {
+ if (err) return error.Bad;
+ return .{ .x = 123 };
+ }
+ };
+
+ const val: S = try .init(false);
+ try expect(val.x == 123);
+}
diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig
index 64bd1a5e0d..07519f16b5 100644
--- a/test/behavior/defer.zig
+++ b/test/behavior/defer.zig
@@ -197,3 +197,40 @@ const defer_assign = switch (block: {
comptime {
if (defer_assign != 0) @compileError("defer_assign failed!");
}
+
+test "errdefer capture" {
+ const S = struct {
+ fail: bool = undefined,
+ fn bar0(self: *@This()) error{a}!void {
+ self.fail = false;
+ errdefer |err| if (@TypeOf(err) != error{a}) {
+ self.fail = true;
+ };
+ return error.a;
+ }
+ fn bar1(self: *@This()) error{a}!void {
+ self.fail = false;
+ errdefer |err| if (@TypeOf(err) != error{a}) {
+ self.fail = true;
+ };
+ const rv: error{a}!void = @errorCast(@as(error{a}!void, error.a));
+ return rv;
+ }
+ // https://github.com/ziglang/zig/issues/20371
+ fn bar2(self: *@This()) error{a}!void {
+ self.fail = false;
+ errdefer |err| if (@TypeOf(err) != error{a}) {
+ self.fail = true;
+ };
+ return @errorCast(@as(error{a}!void, error.a));
+ }
+ };
+
+ var s: S = .{};
+ s.bar0() catch {};
+ if (s.fail) return error.TestExpectedError;
+ s.bar1() catch {};
+ if (s.fail) return error.TestExpectedError;
+ s.bar2() catch {};
+ if (s.fail) return error.TestExpectedError;
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 44f035f46f..4694758e19 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -1214,7 +1214,6 @@ test "anon init through error union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
a: u32,
diff --git a/test/behavior/try.zig b/test/behavior/try.zig
index 53fdc48934..f17133fabe 100644
--- a/test/behavior/try.zig
+++ b/test/behavior/try.zig
@@ -67,3 +67,22 @@ test "`try`ing an if/else expression" {
try std.testing.expectError(error.Test, S.getError2());
}
+
+test "try forwards result location" {
+ if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+
+ const S = struct {
+ fn foo(err: bool) error{Foo}!u32 {
+ const result: error{ Foo, Bar }!u32 = if (err) error.Foo else 123;
+ const res_int: u32 = try @errorCast(result);
+ return res_int;
+ }
+ };
+
+ try expect((S.foo(false) catch return error.TestUnexpectedResult) == 123);
+ try std.testing.expectError(error.Foo, S.foo(true));
+}
diff --git a/test/behavior/while.zig b/test/behavior/while.zig
index 532bac258d..7f4d025d63 100644
--- a/test/behavior/while.zig
+++ b/test/behavior/while.zig
@@ -347,7 +347,6 @@ test "try terminating an infinite loop" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Test coverage for https://github.com/ziglang/zig/issues/13546
const Foo = struct {
diff --git a/test/cases/compile_errors/cast_enum_literal_to_enum_but_it_doesnt_match.zig b/test/cases/compile_errors/cast_enum_literal_to_enum_but_it_doesnt_match.zig
index bd89dc402a..2275e9dc92 100644
--- a/test/cases/compile_errors/cast_enum_literal_to_enum_but_it_doesnt_match.zig
+++ b/test/cases/compile_errors/cast_enum_literal_to_enum_but_it_doesnt_match.zig
@@ -11,5 +11,5 @@ export fn entry() void {
// backend=stage2
// target=native
//
-// :6:21: error: no field named 'c' in enum 'tmp.Foo'
+// :6:21: error: enum 'tmp.Foo' has no member named 'c'
// :1:13: note: enum declared here
diff --git a/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
index efc3f556a9..e9f356ca78 100644
--- a/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
+++ b/test/cases/compile_errors/comptime_arg_to_generic_fn_callee_error.zig
@@ -17,5 +17,5 @@ pub export fn entry() void {
// backend=stage2
// target=native
//
-// :7:28: error: no field named 'c' in enum 'meta.FieldEnum(tmp.MyStruct)'
+// :7:28: error: enum 'meta.FieldEnum(tmp.MyStruct)' has no member named 'c'
// :?:?: note: enum declared here
diff --git a/test/link/elf.zig b/test/link/elf.zig
index 159703bbb3..9d169faf1d 100644
--- a/test/link/elf.zig
+++ b/test/link/elf.zig
@@ -55,6 +55,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
// Exercise linker in ar mode
elf_step.dependOn(testEmitStaticLib(b, .{ .target = musl_target }));
+ elf_step.dependOn(testEmitStaticLibZig(b, .{ .target = musl_target }));
// Exercise linker with LLVM backend
// musl tests
@@ -66,6 +67,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
+ elf_step.dependOn(testGcSectionsZig(b, .{ .target = musl_target }));
elf_step.dependOn(testImageBase(b, .{ .target = musl_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = musl_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = musl_target }));