aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/std/Build.zig4
-rw-r--r--lib/std/Build/OptionsStep.zig96
-rw-r--r--lib/std/os/windows.zig17
-rw-r--r--lib/std/wasm.zig108
-rw-r--r--src/arch/wasm/CodeGen.zig14
-rw-r--r--src/arch/wasm/Emit.zig18
-rw-r--r--src/arch/wasm/Mir.zig23
-rw-r--r--src/link/Elf.zig2
-rw-r--r--src/link/Wasm.zig531
-rw-r--r--src/link/Wasm/Atom.zig7
-rw-r--r--src/link/Wasm/Object.zig49
-rw-r--r--src/link/Wasm/Symbol.zig4
-rw-r--r--src/link/Wasm/types.zig53
-rw-r--r--test/link/wasm/bss/build.zig108
-rw-r--r--test/link/wasm/bss/lib2.zig5
-rw-r--r--test/standalone.zig5
-rw-r--r--test/standalone/options/build.zig7
17 files changed, 886 insertions, 165 deletions
diff --git a/lib/std/Build.zig b/lib/std/Build.zig
index 056a7ec639..5b974bb816 100644
--- a/lib/std/Build.zig
+++ b/lib/std/Build.zig
@@ -1737,7 +1737,7 @@ pub fn makeTempPath(b: *Build) []const u8 {
const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ hex64(rand_int);
const result_path = b.cache_root.join(b.allocator, &.{tmp_dir_sub_path}) catch @panic("OOM");
- fs.cwd().makePath(result_path) catch |err| {
+ b.cache_root.handle.makePath(tmp_dir_sub_path) catch |err| {
std.debug.print("unable to make tmp path '{s}': {s}\n", .{
result_path, @errorName(err),
});
@@ -1747,7 +1747,7 @@ pub fn makeTempPath(b: *Build) []const u8 {
/// There are a few copies of this function in miscellaneous places. Would be nice to find
/// a home for them.
-fn hex64(x: u64) [16]u8 {
+pub fn hex64(x: u64) [16]u8 {
const hex_charset = "0123456789abcdef";
var result: [16]u8 = undefined;
var i: usize = 0;
diff --git a/lib/std/Build/OptionsStep.zig b/lib/std/Build/OptionsStep.zig
index 859d0b68c9..a0e72e3695 100644
--- a/lib/std/Build/OptionsStep.zig
+++ b/lib/std/Build/OptionsStep.zig
@@ -241,33 +241,75 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
);
}
- var options_dir = try b.cache_root.handle.makeOpenPath("options", .{});
- defer options_dir.close();
-
- const basename = self.hashContentsToFileName();
-
- try options_dir.writeFile(&basename, self.contents.items);
-
- self.generated_file.path = try b.cache_root.join(b.allocator, &.{ "options", &basename });
-}
-
-fn hashContentsToFileName(self: *OptionsStep) [64]u8 {
- // TODO update to use the cache system instead of this
- // This implementation is copied from `WriteFileStep.make`
-
- var hash = std.crypto.hash.blake2.Blake2b384.init(.{});
-
- // Random bytes to make OptionsStep unique. Refresh this with
- // new random bytes when OptionsStep implementation is modified
- // in a non-backwards-compatible way.
- hash.update("yL0Ya4KkmcCjBlP8");
- hash.update(self.contents.items);
-
- var digest: [48]u8 = undefined;
- hash.final(&digest);
- var hash_basename: [64]u8 = undefined;
- _ = fs.base64_encoder.encode(&hash_basename, &digest);
- return hash_basename;
+ const basename = "options.zig";
+
+ // Hash contents to file name.
+ var hash = b.cache.hash;
+ // Random bytes to make unique. Refresh this with new random bytes when
+ // implementation is modified in a non-backwards-compatible way.
+ hash.add(@as(u32, 0x38845ef8));
+ hash.addBytes(self.contents.items);
+ const sub_path = "c" ++ fs.path.sep_str ++ hash.final() ++ fs.path.sep_str ++ basename;
+
+ self.generated_file.path = try b.cache_root.join(b.allocator, &.{sub_path});
+
+ // Optimize for the hot path. Stat the file, and if it already exists,
+ // cache hit.
+ if (b.cache_root.handle.access(sub_path, .{})) |_| {
+ // This is the hot path, success.
+ step.result_cached = true;
+ return;
+ } else |outer_err| switch (outer_err) {
+ error.FileNotFound => {
+ const sub_dirname = fs.path.dirname(sub_path).?;
+ b.cache_root.handle.makePath(sub_dirname) catch |e| {
+ return step.fail("unable to make path '{}{s}': {s}", .{
+ b.cache_root, sub_dirname, @errorName(e),
+ });
+ };
+
+ const rand_int = std.crypto.random.int(u64);
+ const tmp_sub_path = "tmp" ++ fs.path.sep_str ++
+ std.Build.hex64(rand_int) ++ fs.path.sep_str ++
+ basename;
+ const tmp_sub_path_dirname = fs.path.dirname(tmp_sub_path).?;
+
+ b.cache_root.handle.makePath(tmp_sub_path_dirname) catch |err| {
+ return step.fail("unable to make temporary directory '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path_dirname, @errorName(err),
+ });
+ };
+
+ b.cache_root.handle.writeFile(tmp_sub_path, self.contents.items) catch |err| {
+ return step.fail("unable to write options to '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path, @errorName(err),
+ });
+ };
+
+ b.cache_root.handle.rename(tmp_sub_path, sub_path) catch |err| switch (err) {
+ error.PathAlreadyExists => {
+ // Other process beat us to it. Clean up the temp file.
+ b.cache_root.handle.deleteFile(tmp_sub_path) catch |e| {
+ try step.addError("warning: unable to delete temp file '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path, @errorName(e),
+ });
+ };
+ step.result_cached = true;
+ return;
+ },
+ else => {
+ return step.fail("unable to rename options from '{}{s}' to '{}{s}': {s}", .{
+ b.cache_root, tmp_sub_path,
+ b.cache_root, sub_path,
+ @errorName(err),
+ });
+ },
+ };
+ },
+ else => |e| return step.fail("unable to access options file '{}{s}': {s}", .{
+ b.cache_root, sub_path, @errorName(e),
+ }),
+ }
}
const OptionArtifactArg = struct {
diff --git a/lib/std/os/windows.zig b/lib/std/os/windows.zig
index 5576200ea5..28dac40c9a 100644
--- a/lib/std/os/windows.zig
+++ b/lib/std/os/windows.zig
@@ -1230,23 +1230,6 @@ test "GetFinalPathNameByHandle" {
_ = try GetFinalPathNameByHandle(handle, .{ .volume_name = .Dos }, buffer[0..required_len_in_u16]);
}
-pub const QueryInformationFileError = error{Unexpected};
-
-pub fn QueryInformationFile(
- handle: HANDLE,
- info_class: FILE_INFORMATION_CLASS,
- out_buffer: []u8,
-) QueryInformationFileError!void {
- var io: IO_STATUS_BLOCK = undefined;
- const len_bytes = std.math.cast(u32, out_buffer.len) orelse unreachable;
- const rc = ntdll.NtQueryInformationFile(handle, &io, out_buffer.ptr, len_bytes, info_class);
- switch (rc) {
- .SUCCESS => {},
- .INVALID_PARAMETER => unreachable,
- else => return unexpectedStatus(rc),
- }
-}
-
pub const GetFileSizeError = error{Unexpected};
pub fn GetFileSizeEx(hFile: HANDLE) GetFileSizeError!u64 {
diff --git a/lib/std/wasm.zig b/lib/std/wasm.zig
index 25a0bb7abf..d54e998b67 100644
--- a/lib/std/wasm.zig
+++ b/lib/std/wasm.zig
@@ -189,7 +189,9 @@ pub const Opcode = enum(u8) {
i64_extend16_s = 0xC3,
i64_extend32_s = 0xC4,
- prefixed = 0xFC,
+ misc_prefix = 0xFC,
+ simd_prefix = 0xFD,
+ atomics_prefix = 0xFE,
_,
};
@@ -217,7 +219,7 @@ test "Wasm - opcodes" {
/// Opcodes that require a prefix `0xFC`
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
-pub const PrefixedOpcode = enum(u32) {
+pub const MiscOpcode = enum(u32) {
i32_trunc_sat_f32_s = 0x00,
i32_trunc_sat_f32_u = 0x01,
i32_trunc_sat_f64_s = 0x02,
@@ -239,6 +241,12 @@ pub const PrefixedOpcode = enum(u32) {
_,
};
+/// Returns the integer value of an `MiscOpcode`. Used by the Zig compiler
+/// to write instructions to the wasm binary file
+pub fn miscOpcode(op: MiscOpcode) u32 {
+ return @enumToInt(op);
+}
+
/// Simd opcodes that require a prefix `0xFD`.
/// Each opcode represents a varuint32, meaning
/// they are encoded as leb128 in binary.
@@ -510,6 +518,86 @@ pub fn simdOpcode(op: SimdOpcode) u32 {
return @enumToInt(op);
}
+/// Simd opcodes that require a prefix `0xFE`.
+/// Each opcode represents a varuint32, meaning
+/// they are encoded as leb128 in binary.
+pub const AtomicsOpcode = enum(u32) {
+ memory_atomic_notify = 0x00,
+ memory_atomic_wait32 = 0x01,
+ memory_atomic_wait64 = 0x02,
+ atomic_fence = 0x03,
+ i32_atomic_load = 0x10,
+ i64_atomic_load = 0x11,
+ i32_atomic_load8_u = 0x12,
+ i32_atomic_load16_u = 0x13,
+ i64_atomic_load8_u = 0x14,
+ i64_atomic_load16_u = 0x15,
+ i64_atomic_load32_u = 0x16,
+ i32_atomic_store = 0x17,
+ i64_atomic_store = 0x18,
+ i32_atomic_store8 = 0x19,
+ i32_atomic_store16 = 0x1A,
+ i64_atomic_store8 = 0x1B,
+ i64_atomic_store16 = 0x1C,
+ i64_atomic_store32 = 0x1D,
+ i32_atomic_rmw_add = 0x1E,
+ i64_atomic_rmw_add = 0x1F,
+ i32_atomic_rmw8_add_u = 0x20,
+ i32_atomic_rmw16_add_u = 0x21,
+ i64_atomic_rmw8_add_u = 0x22,
+ i64_atomic_rmw16_add_u = 0x23,
+ i64_atomic_rmw32_add_u = 0x24,
+ i32_atomic_rmw_sub = 0x25,
+ i64_atomic_rmw_sub = 0x26,
+ i32_atomic_rmw8_sub_u = 0x27A,
+ i32_atomic_rmw16_sub_u = 0x28A,
+ i64_atomic_rmw8_sub_u = 0x29A,
+ i64_atomic_rmw16_sub_u = 0x2A,
+ i64_atomic_rmw32_sub_u = 0x2B,
+ i32_atomic_rmw_and = 0x2C,
+ i64_atomic_rmw_and = 0x2D,
+ i32_atomic_rmw8_and_u = 0x2E,
+ i32_atomic_rmw16_and_u = 0x2F,
+ i64_atomic_rmw8_and_u = 0x30,
+ i64_atomic_rmw16_and_u = 0x31,
+ i64_atomic_rmw32_and_u = 0x32,
+ i32_atomic_rmw_or = 0x33,
+ i64_atomic_rmw_or = 0x34,
+ i32_atomic_rmw8_or_u = 0x35,
+ i32_atomic_rmw16_or_u = 0x36,
+ i64_atomic_rmw8_or_u = 0x37,
+ i64_atomic_rmw16_or_u = 0x38,
+ i64_atomic_rmw32_or_u = 0x39,
+ i32_atomic_rmw_xor = 0x3A,
+ i64_atomic_rmw_xor = 0x3B,
+ i32_atomic_rmw8_xor_u = 0x3C,
+ i32_atomic_rmw16_xor_u = 0x3D,
+ i64_atomic_rmw8_xor_u = 0x3E,
+ i64_atomic_rmw16_xor_u = 0x3F,
+ i64_atomic_rmw32_xor_u = 0x40,
+ i32_atomic_rmw_xchg = 0x41,
+ i64_atomic_rmw_xchg = 0x42,
+ i32_atomic_rmw8_xchg_u = 0x43,
+ i32_atomic_rmw16_xchg_u = 0x44,
+ i64_atomic_rmw8_xchg_u = 0x45,
+ i64_atomic_rmw16_xchg_u = 0x46,
+ i64_atomic_rmw32_xchg_u = 0x47,
+
+ i32_atomic_rmw_cmpxchg = 0x48,
+ i64_atomic_rmw_cmpxchg = 0x49,
+ i32_atomic_rmw8_cmpxchg_u = 0x4A,
+ i32_atomic_rmw16_cmpxchg_u = 0x4B,
+ i64_atomic_rmw8_cmpxchg_u = 0x4C,
+ i64_atomic_rmw16_cmpxchg_u = 0x4D,
+ i64_atomic_rmw32_cmpxchg_u = 0x4E,
+};
+
+/// Returns the integer value of an `AtomicsOpcode`. Used by the Zig compiler
+/// to write instructions to the wasm binary file
+pub fn atomicsOpcode(op: AtomicsOpcode) u32 {
+ return @enumToInt(op);
+}
+
/// Enum representing all Wasm value types as per spec:
/// https://webassembly.github.io/spec/core/binary/types.html
pub const Valtype = enum(u8) {
@@ -551,8 +639,22 @@ test "Wasm - valtypes" {
/// Limits classify the size range of resizeable storage associated with memory types and table types.
pub const Limits = struct {
+ flags: u8,
min: u32,
- max: ?u32,
+ max: u32,
+
+ pub const Flags = enum(u8) {
+ WASM_LIMITS_FLAG_HAS_MAX = 0x1,
+ WASM_LIMITS_FLAG_IS_SHARED = 0x2,
+ };
+
+ pub fn hasFlag(limits: Limits, flag: Flags) bool {
+ return limits.flags & @enumToInt(flag) != 0;
+ }
+
+ pub fn setFlag(limits: *Limits, flag: Flags) void {
+ limits.flags |= @enumToInt(flag);
+ }
};
/// Initialization expressions are used to set the initial value on an object
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index e79129ddb8..c05f07a602 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -895,10 +895,10 @@ fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
}
-fn addExtended(func: *CodeGen, opcode: wasm.PrefixedOpcode) error{OutOfMemory}!void {
+fn addExtended(func: *CodeGen, opcode: wasm.MiscOpcode) error{OutOfMemory}!void {
const extra_index = @intCast(u32, func.mir_extra.items.len);
try func.mir_extra.append(func.gpa, @enumToInt(opcode));
- try func.addInst(.{ .tag = .extended, .data = .{ .payload = extra_index } });
+ try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
}
fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
@@ -925,7 +925,7 @@ fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
func.mir_extra.appendAssumeCapacity(std.wasm.simdOpcode(.v128_const));
func.mir_extra.appendSliceAssumeCapacity(@alignCast(4, mem.bytesAsSlice(u32, &simd_values)));
- try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } });
+ try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
}
fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void {
@@ -2310,7 +2310,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
offset + lhs.offset(),
ty.abiAlignment(func.target),
});
- return func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } });
+ return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
},
.Pointer => {
@@ -2420,7 +2420,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
offset + operand.offset(),
ty.abiAlignment(func.target),
});
- try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } });
+ try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
}
@@ -4477,7 +4477,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
operand.offset(),
elem_ty.abiAlignment(func.target),
});
- try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } });
+ try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
@@ -4493,7 +4493,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
const extra_index = @intCast(u32, func.mir_extra.items.len);
try func.mir_extra.append(func.gpa, opcode);
- try func.addInst(.{ .tag = .simd, .data = .{ .payload = extra_index } });
+ try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
return func.finishAir(inst, result, &.{ty_op.operand});
},
diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig
index 7d44d3622f..5982d3b48c 100644
--- a/src/arch/wasm/Emit.zig
+++ b/src/arch/wasm/Emit.zig
@@ -239,8 +239,9 @@ pub fn emitMir(emit: *Emit) InnerError!void {
.i64_clz => try emit.emitTag(tag),
.i64_ctz => try emit.emitTag(tag),
- .extended => try emit.emitExtended(inst),
- .simd => try emit.emitSimd(inst),
+ .misc_prefix => try emit.emitExtended(inst),
+ .simd_prefix => try emit.emitSimd(inst),
+ .atomics_prefix => try emit.emitAtomic(inst),
}
}
}
@@ -433,9 +434,9 @@ fn emitExtended(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const opcode = emit.mir.extra[extra_index];
const writer = emit.code.writer();
- try emit.code.append(0xFC);
+ try emit.code.append(std.wasm.opcode(.misc_prefix));
try leb128.writeULEB128(writer, opcode);
- switch (@intToEnum(std.wasm.PrefixedOpcode, opcode)) {
+ switch (@intToEnum(std.wasm.MiscOpcode, opcode)) {
// bulk-memory opcodes
.data_drop => {
const segment = emit.mir.extra[extra_index + 1];
@@ -472,7 +473,7 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
const extra_index = emit.mir.instructions.items(.data)[inst].payload;
const opcode = emit.mir.extra[extra_index];
const writer = emit.code.writer();
- try emit.code.append(0xFD);
+ try emit.code.append(std.wasm.opcode(.simd_prefix));
try leb128.writeULEB128(writer, opcode);
switch (@intToEnum(std.wasm.SimdOpcode, opcode)) {
.v128_store,
@@ -496,10 +497,15 @@ fn emitSimd(emit: *Emit, inst: Mir.Inst.Index) !void {
.f32x4_splat,
.f64x2_splat,
=> {}, // opcode already written
- else => |tag| return emit.fail("TODO: Implement simd instruction: {s}\n", .{@tagName(tag)}),
+ else => |tag| return emit.fail("TODO: Implement simd instruction: {s}", .{@tagName(tag)}),
}
}
+fn emitAtomic(emit: *Emit, inst: Mir.Inst.Index) !void {
+ _ = inst;
+ return emit.fail("TODO: Implement atomics instructions", .{});
+}
+
fn emitMemFill(emit: *Emit) !void {
try emit.code.append(0xFC);
try emit.code.append(0x0B);
diff --git a/src/arch/wasm/Mir.zig b/src/arch/wasm/Mir.zig
index 2d59c09e18..4c550d8637 100644
--- a/src/arch/wasm/Mir.zig
+++ b/src/arch/wasm/Mir.zig
@@ -87,6 +87,13 @@ pub const Inst = struct {
///
/// Uses `label`
call_indirect = 0x11,
+ /// Contains a symbol to a function pointer
+ /// uses `label`
+ ///
+ /// Note: This uses `0x16` as value which is reserved by the WebAssembly
+ /// specification but unused, meaning we must update this if the specification were to
+ /// use this value.
+ function_index = 0x16,
/// Pops three values from the stack and pushes
/// the first or second value dependent on the third value.
/// Uses `tag`
@@ -510,24 +517,24 @@ pub const Inst = struct {
i64_extend16_s = 0xC3,
/// Uses `tag`
i64_extend32_s = 0xC4,
- /// The instruction consists of an extension opcode.
+ /// The instruction consists of a prefixed opcode.
/// The prefixed opcode can be found at payload's index.
///
/// The `data` field depends on the extension instruction and
/// may contain additional data.
- extended = 0xFC,
+ misc_prefix = 0xFC,
/// The instruction consists of a simd opcode.
/// The actual simd-opcode is found at payload's index.
///
/// The `data` field depends on the simd instruction and
/// may contain additional data.
- simd = 0xFD,
- /// Contains a symbol to a function pointer
- /// uses `label`
+ simd_prefix = 0xFD,
+ /// The instruction consists of an atomics opcode.
+ /// The actual atomics-opcode is found at payload's index.
///
- /// Note: This uses `0xFE` as value as it is unused and not reserved
- /// by the wasm specification, making it safe to use.
- function_index = 0xFE,
+ /// The `data` field depends on the atomics instruction and
+ /// may contain additional data.
+ atomics_prefix = 0xFE,
/// Contains a symbol to a memory address
/// Uses `label`
///
diff --git a/src/link/Elf.zig b/src/link/Elf.zig
index f1ab98372e..fcab34bf5e 100644
--- a/src/link/Elf.zig
+++ b/src/link/Elf.zig
@@ -1636,7 +1636,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) !v
}
for (self.base.options.objects) |obj| {
if (Compilation.classifyFileExt(obj.path) == .shared_library) {
- const lib_dir_path = std.fs.path.dirname(obj.path).?;
+ const lib_dir_path = std.fs.path.dirname(obj.path) orelse continue;
if ((try rpath_table.fetchPut(lib_dir_path, {})) == null) {
try argv.append("-rpath");
try argv.append(lib_dir_path);
diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig
index e998a8d50e..3561c86ce8 100644
--- a/src/link/Wasm.zig
+++ b/src/link/Wasm.zig
@@ -111,7 +111,11 @@ functions: std.AutoArrayHashMapUnmanaged(struct { file: ?u16, index: u32 }, std.
/// Output global section
wasm_globals: std.ArrayListUnmanaged(std.wasm.Global) = .{},
/// Memory section
-memories: std.wasm.Memory = .{ .limits = .{ .min = 0, .max = null } },
+memories: std.wasm.Memory = .{ .limits = .{
+ .min = 0,
+ .max = undefined,
+ .flags = 0,
+} },
/// Output table section
tables: std.ArrayListUnmanaged(std.wasm.Table) = .{},
/// Output export section
@@ -135,6 +139,8 @@ archives: std.ArrayListUnmanaged(Archive) = .{},
/// A map of global names (read: offset into string table) to their symbol location
globals: std.AutoHashMapUnmanaged(u32, SymbolLoc) = .{},
+/// The list of GOT symbols and their location
+got_symbols: std.ArrayListUnmanaged(SymbolLoc) = .{},
/// Maps discarded symbols and their positions to the location of the symbol
/// it was resolved to
discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .{},
@@ -176,6 +182,24 @@ pub const Segment = struct {
alignment: u32,
size: u32,
offset: u32,
+ flags: u32,
+
+ pub const Flag = enum(u32) {
+ WASM_DATA_SEGMENT_IS_PASSIVE = 0x01,
+ WASM_DATA_SEGMENT_HAS_MEMINDEX = 0x02,
+ };
+
+ pub fn isPassive(segment: Segment) bool {
+ return segment.flags & @enumToInt(Flag.WASM_DATA_SEGMENT_IS_PASSIVE) != 0;
+ }
+
+ /// For a given segment, determines if it needs passive initialization
+ fn needsPassiveInitialization(segment: Segment, import_mem: bool, name: []const u8) bool {
+ if (import_mem and !std.mem.eql(u8, name, ".bss")) {
+ return true;
+ }
+ return segment.isPassive();
+ }
};
pub const Export = struct {
@@ -396,7 +420,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
const loc = try wasm_bin.createSyntheticSymbol("__indirect_function_table", .table);
const symbol = loc.getSymbol(wasm_bin);
const table: std.wasm.Table = .{
- .limits = .{ .min = 0, .max = null }, // will be overwritten during `mapFunctionTable`
+ .limits = .{ .flags = 0, .min = 0, .max = undefined }, // will be overwritten during `mapFunctionTable`
.reftype = .funcref,
};
if (options.output_mode == .Obj or options.import_table) {
@@ -429,6 +453,30 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
// at the end during `initializeCallCtorsFunction`.
}
+ // shared-memory symbols for TLS support
+ if (wasm_bin.base.options.shared_memory) {
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__tls_base", .global);
+ const symbol = loc.getSymbol(wasm_bin);
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ }
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__tls_size", .global);
+ const symbol = loc.getSymbol(wasm_bin);
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ }
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__tls_align", .global);
+ const symbol = loc.getSymbol(wasm_bin);
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ }
+ {
+ const loc = try wasm_bin.createSyntheticSymbol("__wasm_tls_init", .function);
+ const symbol = loc.getSymbol(wasm_bin);
+ symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
+ }
+ }
+
// if (!options.strip and options.module != null) {
// wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
// try wasm_bin.initDebugSections();
@@ -597,6 +645,15 @@ fn parseArchive(wasm: *Wasm, path: []const u8, force_load: bool) !bool {
return true;
}
+fn requiresTLSReloc(wasm: *const Wasm) bool {
+ for (wasm.got_symbols.items) |loc| {
+ if (loc.getSymbol(wasm).isTLS()) {
+ return true;
+ }
+ }
+ return false;
+}
+
fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
const object: Object = wasm.objects.items[object_index];
log.debug("Resolving symbols in object: '{s}'", .{object.name});
@@ -775,6 +832,220 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
}
}
+fn setupInitMemoryFunction(wasm: *Wasm) !void {
+ // Passive segments are used to avoid memory being reinitialized on each
+ // thread's instantiation. These passive segments are initialized and
+ // dropped in __wasm_init_memory, which is registered as the start function
+ // We also initialize bss segments (using memory.fill) as part of this
+ // function.
+ if (!wasm.hasPassiveInitializationSegments()) {
+ return;
+ }
+
+ const flag_address: u32 = if (wasm.base.options.shared_memory) address: {
+ // when we have passive initialization segments and shared memory
+ // `setupMemory` will create this symbol and set its virtual address.
+ const loc = wasm.findGlobalSymbol("__wasm_init_memory_flag").?;
+ break :address loc.getSymbol(wasm).virtual_address;
+ } else 0;
+
+ var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+ defer function_body.deinit();
+ const writer = function_body.writer();
+
+ // we have 0 locals
+ try leb.writeULEB128(writer, @as(u32, 0));
+
+ if (wasm.base.options.shared_memory) {
+ // destination blocks
+ // based on values we jump to corresponding label
+ try writer.writeByte(std.wasm.opcode(.block)); // $drop
+ try writer.writeByte(std.wasm.block_empty); // block type
+
+ try writer.writeByte(std.wasm.opcode(.block)); // $wait
+ try writer.writeByte(std.wasm.block_empty); // block type
+
+ try writer.writeByte(std.wasm.opcode(.block)); // $init
+ try writer.writeByte(std.wasm.block_empty); // block type
+
+ // atomically check
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, flag_address);
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 0));
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 1));
+ try writer.writeByte(std.wasm.opcode(.atomics_prefix));
+ try leb.writeULEB128(writer, std.wasm.atomicsOpcode(.i32_atomic_rmw_cmpxchg));
+ try leb.writeULEB128(writer, @as(u32, 2)); // alignment
+ try leb.writeULEB128(writer, @as(u32, 0)); // offset
+
+ // based on the value from the atomic check, jump to the label.
+ try writer.writeByte(std.wasm.opcode(.br_table));
+ try leb.writeULEB128(writer, @as(u32, 2)); // length of the table (we have 3 blocks but because of the mandatory default the length is 2).
+ try leb.writeULEB128(writer, @as(u32, 0)); // $init
+ try leb.writeULEB128(writer, @as(u32, 1)); // $wait
+ try leb.writeULEB128(writer, @as(u32, 2)); // $drop
+ try writer.writeByte(std.wasm.opcode(.end));
+ }
+
+ var it = wasm.data_segments.iterator();
+ var segment_index: u32 = 0;
+ while (it.next()) |entry| : (segment_index += 1) {
+ const segment: Segment = wasm.segments.items[entry.value_ptr.*];
+ if (segment.needsPassiveInitialization(wasm.base.options.import_memory, entry.key_ptr.*)) {
+ // For passive BSS segments we can simple issue a memory.fill(0).
+ // For non-BSS segments we do a memory.init. Both these
+ // instructions take as their first argument the destination
+ // address.
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, segment.offset);
+
+ if (wasm.base.options.shared_memory and std.mem.eql(u8, entry.key_ptr.*, ".tdata")) {
+ // When we initialize the TLS segment we also set the `__tls_base`
+ // global. This allows the runtime to use this static copy of the
+ // TLS data for the first/main thread.
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, segment.offset);
+ try writer.writeByte(std.wasm.opcode(.global_set));
+ const loc = wasm.findGlobalSymbol("__tls_base").?;
+ try leb.writeULEB128(writer, loc.getSymbol(wasm).index);
+ }
+
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 0));
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, segment.size);
+ try writer.writeByte(std.wasm.opcode(.misc_prefix));
+ if (std.mem.eql(u8, entry.key_ptr.*, ".bss")) {
+ // fill bss segment with zeroes
+ try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_fill));
+ } else {
+ // initialize the segment
+ try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init));
+ try leb.writeULEB128(writer, segment_index);
+ }
+ try writer.writeByte(0); // memory index immediate
+ }
+ }
+
+ if (wasm.base.options.shared_memory) {
+ // we set the init memory flag to value '2'
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, flag_address);
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 2));
+ try writer.writeByte(std.wasm.opcode(.atomics_prefix));
+ try leb.writeULEB128(writer, std.wasm.atomicsOpcode(.i32_atomic_store));
+ try leb.writeULEB128(writer, @as(u32, 2)); // alignment
+ try leb.writeULEB128(writer, @as(u32, 0)); // offset
+
+ // notify any waiters for segment initialization completion
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, flag_address);
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, @as(i32, -1)); // number of waiters
+ try writer.writeByte(std.wasm.opcode(.atomics_prefix));
+ try leb.writeULEB128(writer, std.wasm.atomicsOpcode(.memory_atomic_notify));
+ try leb.writeULEB128(writer, @as(u32, 2)); // alignment
+ try leb.writeULEB128(writer, @as(u32, 0)); // offset
+ try writer.writeByte(std.wasm.opcode(.drop));
+
+ // branch and drop segments
+ try writer.writeByte(std.wasm.opcode(.br));
+ try leb.writeULEB128(writer, @as(u32, 1));
+
+ // wait for thread to initialize memory segments
+ try writer.writeByte(std.wasm.opcode(.end)); // end $wait
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, flag_address);
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 1)); // expected flag value
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeILEB128(writer, @as(i32, -1)); // timeout
+ try writer.writeByte(std.wasm.opcode(.atomics_prefix));
+ try leb.writeULEB128(writer, std.wasm.atomicsOpcode(.memory_atomic_wait32));
+ try leb.writeULEB128(writer, @as(u32, 2)); // alignment
+ try leb.writeULEB128(writer, @as(u32, 0)); // offset
+ try writer.writeByte(std.wasm.opcode(.drop));
+
+ try writer.writeByte(std.wasm.opcode(.end)); // end $drop
+ }
+
+ it.reset();
+ segment_index = 0;
+ while (it.next()) |entry| : (segment_index += 1) {
+ const name = entry.key_ptr.*;
+ const segment: Segment = wasm.segments.items[entry.value_ptr.*];
+ if (segment.needsPassiveInitialization(wasm.base.options.import_memory, name) and
+ !std.mem.eql(u8, name, ".bss"))
+ {
+ // The TLS region should not be dropped since its is needed
+ // during the initialization of each thread (__wasm_init_tls).
+ if (wasm.base.options.shared_memory and std.mem.eql(u8, name, ".tdata")) {
+ continue;
+ }
+
+ try writer.writeByte(std.wasm.opcode(.misc_prefix));
+ try leb.writeULEB128(writer, std.wasm.miscOpcode(.data_drop));
+ try leb.writeULEB128(writer, segment_index);
+ }
+ }
+
+ // End of the function body
+ try writer.writeByte(std.wasm.opcode(.end));
+
+ try wasm.createSyntheticFunction(
+ "__wasm_init_memory",
+ std.wasm.Type{ .params = &.{}, .returns = &.{} },
+ &function_body,
+ );
+}
+
+/// Constructs a synthetic function that performs runtime relocations for
+/// TLS symbols. This function is called by `__wasm_init_tls`.
+fn setupTLSRelocationsFunction(wasm: *Wasm) !void {
+ // When we have TLS GOT entries and shared memory is enabled,
+ // we must perform runtime relocations or else we don't create the function.
+ if (!wasm.base.options.shared_memory or !wasm.requiresTLSReloc()) {
+ return;
+ }
+
+ // const loc = try wasm.createSyntheticSymbol("__wasm_apply_global_tls_relocs");
+ var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+ defer function_body.deinit();
+ const writer = function_body.writer();
+
+ // locals (we have none)
+ try writer.writeByte(0);
+ for (wasm.got_symbols.items, 0..) |got_loc, got_index| {
+ const sym: *Symbol = got_loc.getSymbol(wasm);
+ if (!sym.isTLS()) continue; // only relocate TLS symbols
+ if (sym.tag == .data and sym.isDefined()) {
+ // get __tls_base
+ try writer.writeByte(std.wasm.opcode(.global_get));
+ try leb.writeULEB128(writer, wasm.findGlobalSymbol("__tls_base").?.getSymbol(wasm).index);
+
+ // add the virtual address of the symbol
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, sym.virtual_address);
+ } else if (sym.tag == .function) {
+ @panic("TODO: relocate GOT entry of function");
+ } else continue;
+
+ try writer.writeByte(std.wasm.opcode(.i32_add));
+ try writer.writeByte(std.wasm.opcode(.global_set));
+ try leb.writeULEB128(writer, wasm.imported_globals_count + @intCast(u32, wasm.wasm_globals.items.len + got_index));
+ }
+ try writer.writeByte(std.wasm.opcode(.end));
+
+ try wasm.createSyntheticFunction(
+ "__wasm_apply_global_tls_relocs",
+ std.wasm.Type{ .params = &.{}, .returns = &.{} },
+ &function_body,
+ );
+}
+
fn validateFeatures(
wasm: *const Wasm,
to_emit: *[@typeInfo(types.Feature.Tag).Enum.fields.len]bool,
@@ -791,6 +1062,8 @@ fn validateFeatures(
// when false, we fail linking. We only verify this after a loop to catch all invalid features.
var valid_feature_set = true;
+ // will be set to true when there's any TLS segment found in any of the object files
+ var has_tls = false;
// When the user has given an explicit list of features to enable,
// we extract them and insert each into the 'allowed' list.
@@ -821,6 +1094,12 @@ fn validateFeatures(
},
}
}
+
+ for (object.segment_info) |segment| {
+ if (segment.isTLS()) {
+ has_tls = true;
+ }
+ }
}
// when we infer the features, we allow each feature found in the 'used' set
@@ -832,7 +1111,7 @@ fn validateFeatures(
allowed[used_index] = is_enabled;
emit_features_count.* += @boolToInt(is_enabled);
} else if (is_enabled and !allowed[used_index]) {
- log.err("feature '{s}' not allowed, but used by linked object", .{(@intToEnum(types.Feature.Tag, used_index)).toString()});
+ log.err("feature '{}' not allowed, but used by linked object", .{@intToEnum(types.Feature.Tag, used_index)});
log.err(" defined in '{s}'", .{wasm.objects.items[used_set >> 1].name});
valid_feature_set = false;
}
@@ -842,6 +1121,30 @@ fn validateFeatures(
return error.InvalidFeatureSet;
}
+ if (wasm.base.options.shared_memory) {
+ const disallowed_feature = disallowed[@enumToInt(types.Feature.Tag.shared_mem)];
+ if (@truncate(u1, disallowed_feature) != 0) {
+ log.err(
+ "shared-memory is disallowed by '{s}' because it wasn't compiled with 'atomics' and 'bulk-memory' features enabled",
+ .{wasm.objects.items[disallowed_feature >> 1].name},
+ );
+ valid_feature_set = false;
+ }
+
+ for ([_]types.Feature.Tag{ .atomics, .bulk_memory }) |feature| {
+ if (!allowed[@enumToInt(feature)]) {
+ log.err("feature '{}' is not used but is required for shared-memory", .{feature});
+ }
+ }
+ }
+
+ if (has_tls) {
+ for ([_]types.Feature.Tag{ .atomics, .bulk_memory }) |feature| {
+ if (!allowed[@enumToInt(feature)]) {
+ log.err("feature '{}' is not used but is required for thread-local storage", .{feature});
+ }
+ }
+ }
// For each linked object, validate the required and disallowed features
for (wasm.objects.items) |object| {
var object_used_features = [_]bool{false} ** known_features_count;
@@ -850,7 +1153,7 @@ fn validateFeatures(
// from here a feature is always used
const disallowed_feature = disallowed[@enumToInt(feature.tag)];
if (@truncate(u1, disallowed_feature) != 0) {
- log.err("feature '{s}' is disallowed, but used by linked object", .{feature.tag.toString()});
+ log.err("feature '{}' is disallowed, but used by linked object", .{feature.tag});
log.err(" disallowed by '{s}'", .{wasm.objects.items[disallowed_feature >> 1].name});
log.err(" used in '{s}'", .{object.name});
valid_feature_set = false;
@@ -863,7 +1166,7 @@ fn validateFeatures(
for (required, 0..) |required_feature, feature_index| {
const is_required = @truncate(u1, required_feature) != 0;
if (is_required and !object_used_features[feature_index]) {
- log.err("feature '{s}' is required but not used in linked object", .{(@intToEnum(types.Feature.Tag, feature_index)).toString()});
+ log.err("feature '{}' is required but not used in linked object", .{@intToEnum(types.Feature.Tag, feature_index)});
log.err(" required by '{s}'", .{wasm.objects.items[required_feature >> 1].name});
log.err(" missing in '{s}'", .{object.name});
valid_feature_set = false;
@@ -894,6 +1197,13 @@ fn resolveLazySymbols(wasm: *Wasm) !void {
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
}
+
+ if (!wasm.base.options.shared_memory) {
+ if (wasm.undefs.fetchSwapRemove("__tls_base")) |kv| {
+ const loc = try wasm.createSyntheticSymbol("__tls_base", .global);
+ try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
+ }
+ }
}
// Tries to find a global symbol by its name. Returns null when not found,
@@ -1517,7 +1827,7 @@ fn mapFunctionTable(wasm: *Wasm) void {
const sym_loc = wasm.findGlobalSymbol("__indirect_function_table").?;
const symbol = sym_loc.getSymbol(wasm);
const table = &wasm.tables.items[symbol.index - wasm.imported_tables_count];
- table.limits = .{ .min = index, .max = index };
+ table.limits = .{ .min = index, .max = index, .flags = 0x1 };
}
}
@@ -1630,6 +1940,7 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
.alignment = atom.alignment,
.size = atom.size,
.offset = 0,
+ .flags = 0,
});
}
@@ -1668,10 +1979,15 @@ fn parseAtom(wasm: *Wasm, atom_index: Atom.Index, kind: Kind) !void {
break :result index;
} else {
const index = @intCast(u32, wasm.segments.items.len);
+ var flags: u32 = 0;
+ if (wasm.base.options.shared_memory) {
+ flags |= @enumToInt(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
+ }
try wasm.segments.append(wasm.base.allocator, .{
.alignment = atom.alignment,
.size = 0,
.offset = 0,
+ .flags = flags,
});
gop.value_ptr.* = index;
@@ -1907,10 +2223,23 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
try writer.writeByte(std.wasm.opcode(.end));
}
- const loc = wasm.findGlobalSymbol("__wasm_call_ctors").?;
+ try wasm.createSyntheticFunction(
+ "__wasm_call_ctors",
+ std.wasm.Type{ .params = &.{}, .returns = &.{} },
+ &function_body,
+ );
+}
+
+fn createSyntheticFunction(
+ wasm: *Wasm,
+ symbol_name: []const u8,
+ func_ty: std.wasm.Type,
+ function_body: *std.ArrayList(u8),
+) !void {
+ const loc = wasm.findGlobalSymbol(symbol_name) orelse
+ try wasm.createSyntheticSymbol(symbol_name, .function);
const symbol = loc.getSymbol(wasm);
- // create type (() -> nil) as we do not have any parameters or return value.
- const ty_index = try wasm.putOrGetFuncType(.{ .params = &[_]std.wasm.Valtype{}, .returns = &[_]std.wasm.Valtype{} });
+ const ty_index = try wasm.putOrGetFuncType(func_ty);
// create function with above type
const func_index = wasm.imported_functions_count + @intCast(u32, wasm.functions.count());
try wasm.functions.putNoClobber(
@@ -1942,6 +2271,68 @@ fn initializeCallCtorsFunction(wasm: *Wasm) !void {
atom.offset = prev_atom.offset + prev_atom.size;
}
+fn initializeTLSFunction(wasm: *Wasm) !void {
+ if (!wasm.base.options.shared_memory) return;
+
+ var function_body = std.ArrayList(u8).init(wasm.base.allocator);
+ defer function_body.deinit();
+ const writer = function_body.writer();
+
+ // locals
+ try writer.writeByte(0);
+
+ // If there's a TLS segment, initialize it during runtime using the bulk-memory feature
+ if (wasm.data_segments.getIndex(".tdata")) |data_index| {
+ const segment_index = wasm.data_segments.entries.items(.value)[data_index];
+ const segment = wasm.segments.items[segment_index];
+
+ const param_local: u32 = 0;
+
+ try writer.writeByte(std.wasm.opcode(.local_get));
+ try leb.writeULEB128(writer, param_local);
+
+ const tls_base_loc = wasm.findGlobalSymbol("__tls_base").?;
+ try writer.writeByte(std.wasm.opcode(.global_get));
+ try leb.writeULEB128(writer, tls_base_loc.getSymbol(wasm).index);
+
+ // load stack values for the bulk-memory operation
+ {
+ try writer.writeByte(std.wasm.opcode(.local_get));
+ try leb.writeULEB128(writer, param_local);
+
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, 0)); //segment offset
+
+ try writer.writeByte(std.wasm.opcode(.i32_const));
+ try leb.writeULEB128(writer, @as(u32, segment.size)); //segment offset
+ }
+
+ // perform the bulk-memory operation to initialize the data segment
+ try writer.writeByte(std.wasm.opcode(.misc_prefix));
+ try leb.writeULEB128(writer, std.wasm.miscOpcode(.memory_init));
+ // segment immediate
+ try leb.writeULEB128(writer, @intCast(u32, data_index));
+ // memory index immediate (always 0)
+ try leb.writeULEB128(writer, @as(u32, 0));
+ }
+
+ // If we have to perform any TLS relocations, call the corresponding function
+ // which performs all runtime TLS relocations. This is a synthetic function,
+ // generated by the linker.
+ if (wasm.findGlobalSymbol("__wasm_apply_global_tls_relocs")) |loc| {
+ try writer.writeByte(std.wasm.opcode(.call));
+ try leb.writeULEB128(writer, loc.getSymbol(wasm).index);
+ }
+
+ try writer.writeByte(std.wasm.opcode(.end));
+
+ try wasm.createSyntheticFunction(
+ "__wasm_init_tls",
+ std.wasm.Type{ .params = &.{.i32}, .returns = &.{} },
+ &function_body,
+ );
+}
+
fn setupImports(wasm: *Wasm) !void {
log.debug("Merging imports", .{});
var discarded_it = wasm.discarded.keyIterator();
@@ -2224,11 +2615,50 @@ fn setupMemory(wasm: *Wasm) !void {
while (data_seg_it.next()) |entry| {
const segment = &wasm.segments.items[entry.value_ptr.*];
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, segment.alignment);
+
+ // set TLS-related symbols
+ if (mem.eql(u8, entry.key_ptr.*, ".tdata")) {
+ if (wasm.findGlobalSymbol("__tls_size")) |loc| {
+ const sym = loc.getSymbol(wasm);
+ sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ try wasm.wasm_globals.append(wasm.base.allocator, .{
+ .global_type = .{ .valtype = .i32, .mutable = false },
+ .init = .{ .i32_const = @intCast(i32, segment.size) },
+ });
+ }
+ if (wasm.findGlobalSymbol("__tls_align")) |loc| {
+ const sym = loc.getSymbol(wasm);
+ sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ try wasm.wasm_globals.append(wasm.base.allocator, .{
+ .global_type = .{ .valtype = .i32, .mutable = false },
+ .init = .{ .i32_const = @intCast(i32, segment.alignment) },
+ });
+ }
+ if (wasm.findGlobalSymbol("__tls_base")) |loc| {
+ const sym = loc.getSymbol(wasm);
+ sym.index = @intCast(u32, wasm.wasm_globals.items.len) + wasm.imported_globals_count;
+ try wasm.wasm_globals.append(wasm.base.allocator, .{
+ .global_type = .{ .valtype = .i32, .mutable = wasm.base.options.shared_memory },
+ .init = .{ .i32_const = if (wasm.base.options.shared_memory) @as(u32, 0) else @intCast(i32, memory_ptr) },
+ });
+ }
+ }
+
memory_ptr += segment.size;
segment.offset = offset;
offset += segment.size;
}
+ // create the memory init flag which is used by the init memory function
+ if (wasm.base.options.shared_memory and wasm.hasPassiveInitializationSegments()) {
+ // align to pointer size
+ memory_ptr = mem.alignForwardGeneric(u64, memory_ptr, 4);
+ const loc = try wasm.createSyntheticSymbol("__wasm_init_memory_flag", .data);
+ const sym = loc.getSymbol(wasm);
+ sym.virtual_address = @intCast(u32, memory_ptr);
+ memory_ptr += 4;
+ }
+
if (!place_stack_first and !is_obj) {
memory_ptr = std.mem.alignForwardGeneric(u64, memory_ptr, stack_alignment);
memory_ptr += stack_size;
@@ -2286,6 +2716,10 @@ fn setupMemory(wasm: *Wasm) !void {
return error.MemoryTooBig;
}
wasm.memories.limits.max = @intCast(u32, max_memory / page_size);
+ wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_HAS_MAX);
+ if (wasm.base.options.shared_memory) {
+ wasm.memories.limits.setFlag(.WASM_LIMITS_FLAG_IS_SHARED);
+ }
log.debug("Maximum memory pages: {?d}", .{wasm.memories.limits.max});
}
}
@@ -2305,7 +2739,16 @@ pub fn getMatchingSegment(wasm: *Wasm, object_index: u16, relocatable_index: u32
const result = try wasm.data_segments.getOrPut(wasm.base.allocator, segment_info.outputName(merge_segment));
if (!result.found_existing) {
result.value_ptr.* = index;
- try wasm.appendDummySegment();
+ var flags: u32 = 0;
+ if (wasm.base.options.shared_memory) {
+ flags |= @enumToInt(Segment.Flag.WASM_DATA_SEGMENT_IS_PASSIVE);
+ }
+ try wasm.segments.append(wasm.base.allocator, .{
+ .alignment = 1,
+ .size = 0,
+ .offset = 0,
+ .flags = flags,
+ });
return index;
} else return result.value_ptr.*;
},
@@ -2379,6 +2822,7 @@ fn appendDummySegment(wasm: *Wasm) !void {
.alignment = 1,
.size = 0,
.offset = 0,
+ .flags = 0,
});
}
@@ -2746,6 +3190,9 @@ fn linkWithZld(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) l
try wasm.mergeSections();
try wasm.mergeTypes();
try wasm.initializeCallCtorsFunction();
+ try wasm.setupInitMemoryFunction();
+ try wasm.setupTLSRelocationsFunction();
+ try wasm.initializeTLSFunction();
try wasm.setupExports();
try wasm.writeToFile(enabled_features, emit_features_count, arena);
@@ -2828,22 +3275,31 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
const atom_index = entry.value_ptr.*;
+ const atom = wasm.getAtomPtr(atom_index);
if (decl.ty.zigTypeTag() == .Fn) {
try wasm.parseAtom(atom_index, .function);
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
- try wasm.parseAtom(atom_index, .{ .data = .uninitialized });
+ // for safe build modes, we store the atom in the data segment,
+ // whereas for unsafe build modes we store it in bss.
+ const is_initialized = wasm.base.options.optimize_mode == .Debug or
+ wasm.base.options.optimize_mode == .ReleaseSafe;
+ try wasm.parseAtom(atom_index, .{ .data = if (is_initialized) .initialized else .uninitialized });
} else {
- try wasm.parseAtom(atom_index, .{ .data = .initialized });
+ // when the decl is all zeroes, we store the atom in the bss segment,
+ // in all other cases it will be in the data segment.
+ const is_zeroes = for (atom.code.items) |byte| {
+ if (byte != 0) break false;
+ } else true;
+ try wasm.parseAtom(atom_index, .{ .data = if (is_zeroes) .uninitialized else .initialized });
}
} else {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
}
// also parse atoms for a decl's locals
- const atom = wasm.getAtomPtr(atom_index);
for (atom.locals.items) |local_atom_index| {
try wasm.parseAtom(local_atom_index, .{ .data = .read_only });
}
@@ -2865,6 +3321,9 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
try wasm.mergeSections();
try wasm.mergeTypes();
try wasm.initializeCallCtorsFunction();
+ try wasm.setupInitMemoryFunction();
+ try wasm.setupTLSRelocationsFunction();
+ try wasm.initializeTLSFunction();
try wasm.setupExports();
try wasm.writeToFile(enabled_features, emit_features_count, arena);
}
@@ -3087,6 +3546,19 @@ fn writeToFile(
section_count += 1;
}
+ // When the shared-memory option is enabled, we *must* emit the 'data count' section.
+ const data_segments_count = wasm.data_segments.count() - @boolToInt(wasm.data_segments.contains(".bss") and import_memory);
+ if (data_segments_count != 0 and wasm.base.options.shared_memory) {
+ const header_offset = try reserveVecSectionHeader(&binary_bytes);
+ try writeVecSectionHeader(
+ binary_bytes.items,
+ header_offset,
+ .data_count,
+ @intCast(u32, binary_bytes.items.len - header_offset - header_size),
+ @intCast(u32, data_segments_count),
+ );
+ }
+
// Code section
var code_section_size: u32 = 0;
if (wasm.code_section_index) |code_index| {
@@ -3137,7 +3609,7 @@ fn writeToFile(
}
// Data section
- if (wasm.data_segments.count() != 0) {
+ if (data_segments_count != 0) {
const header_offset = try reserveVecSectionHeader(&binary_bytes);
var it = wasm.data_segments.iterator();
@@ -3152,10 +3624,15 @@ fn writeToFile(
segment_count += 1;
var atom_index = wasm.atoms.get(segment_index).?;
- // flag and index to memory section (currently, there can only be 1 memory section in wasm)
- try leb.writeULEB128(binary_writer, @as(u32, 0));
+ try leb.writeULEB128(binary_writer, segment.flags);
+ if (segment.flags & @enumToInt(Wasm.Segment.Flag.WASM_DATA_SEGMENT_HAS_MEMINDEX) != 0) {
+ try leb.writeULEB128(binary_writer, @as(u32, 0)); // memory is always index 0 as we only have 1 memory entry
+ }
+ // when a segment is passive, it's initialized during runtime.
+ if (!segment.isPassive()) {
+ try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) });
+ }
// offset into data section
- try emitInit(binary_writer, .{ .i32_const = @bitCast(i32, segment.offset) });
try leb.writeULEB128(binary_writer, segment.size);
// fill in the offset table and the data segments
@@ -3404,7 +3881,8 @@ fn emitFeaturesSection(binary_bytes: *std.ArrayList(u8), enabled_features: []con
if (enabled) {
const feature: types.Feature = .{ .prefix = .used, .tag = @intToEnum(types.Feature.Tag, feature_index) };
try leb.writeULEB128(writer, @enumToInt(feature.prefix));
- const string = feature.tag.toString();
+ var buf: [100]u8 = undefined;
+ const string = try std.fmt.bufPrint(&buf, "{}", .{feature.tag});
try leb.writeULEB128(writer, @intCast(u32, string.len));
try writer.writeAll(string);
}
@@ -3498,10 +3976,10 @@ fn emitNameSubsection(wasm: *Wasm, section_id: std.wasm.NameSubsection, names: a
}
fn emitLimits(writer: anytype, limits: std.wasm.Limits) !void {
- try leb.writeULEB128(writer, @boolToInt(limits.max != null));
+ try writer.writeByte(limits.flags);
try leb.writeULEB128(writer, limits.min);
- if (limits.max) |max| {
- try leb.writeULEB128(writer, max);
+ if (limits.hasFlag(.WASM_LIMITS_FLAG_HAS_MAX)) {
+ try leb.writeULEB128(writer, limits.max);
}
}
@@ -4196,6 +4674,17 @@ fn emitDataRelocations(
try writeCustomSectionHeader(binary_bytes.items, header_offset, size);
}
+fn hasPassiveInitializationSegments(wasm: *const Wasm) bool {
+ var it = wasm.data_segments.iterator();
+ while (it.next()) |entry| {
+ const segment: Segment = wasm.segments.items[entry.value_ptr.*];
+ if (segment.needsPassiveInitialization(wasm.base.options.import_memory, entry.key_ptr.*)) {
+ return true;
+ }
+ }
+ return false;
+}
+
pub fn getTypeIndex(wasm: *const Wasm, func_type: std.wasm.Type) ?u32 {
var index: u32 = 0;
while (index < wasm.func_types.items.len) : (index += 1) {
diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig
index 0c9d761f05..7d2f5a6696 100644
--- a/src/link/Wasm/Atom.zig
+++ b/src/link/Wasm/Atom.zig
@@ -126,10 +126,12 @@ pub fn resolveRelocs(atom: *Atom, wasm_bin: *const Wasm) void {
.R_WASM_TABLE_INDEX_SLEB,
.R_WASM_TABLE_NUMBER_LEB,
.R_WASM_TYPE_INDEX_LEB,
+ .R_WASM_MEMORY_ADDR_TLS_SLEB,
=> leb.writeUnsignedFixed(5, atom.code.items[reloc.offset..][0..5], @intCast(u32, value)),
.R_WASM_MEMORY_ADDR_LEB64,
.R_WASM_MEMORY_ADDR_SLEB64,
.R_WASM_TABLE_INDEX_SLEB64,
+ .R_WASM_MEMORY_ADDR_TLS_SLEB64,
=> leb.writeUnsignedFixed(10, atom.code.items[reloc.offset..][0..10], value),
}
}
@@ -190,5 +192,10 @@ fn relocationValue(atom: Atom, relocation: types.Relocation, wasm_bin: *const Wa
const rel_value = @intCast(i32, target_atom.offset + offset) + relocation.addend;
return @intCast(u32, rel_value);
},
+ .R_WASM_MEMORY_ADDR_TLS_SLEB,
+ .R_WASM_MEMORY_ADDR_TLS_SLEB64,
+ => {
+ @panic("TODO: Implement TLS relocations");
+ },
}
}
diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig
index 45c9464ec8..363648971a 100644
--- a/src/link/Wasm/Object.zig
+++ b/src/link/Wasm/Object.zig
@@ -601,8 +601,8 @@ fn Parser(comptime ReaderType: type) type {
});
for (relocations) |*relocation| {
- const rel_type = try leb.readULEB128(u8, reader);
- const rel_type_enum = @intToEnum(types.Relocation.RelocationType, rel_type);
+ const rel_type = try reader.readByte();
+ const rel_type_enum = std.meta.intToEnum(types.Relocation.RelocationType, rel_type) catch return error.MalformedSection;
relocation.* = .{
.relocation_type = rel_type_enum,
.offset = try leb.readULEB128(u32, reader),
@@ -674,6 +674,12 @@ fn Parser(comptime ReaderType: type) type {
segment.alignment,
segment.flags,
});
+
+ // support legacy object files that specified being TLS by the name instead of the TLS flag.
+ if (!segment.isTLS() and (std.mem.startsWith(u8, segment.name, ".tdata") or std.mem.startsWith(u8, segment.name, ".tbss"))) {
+ // set the flag so we can simply check for the flag in the rest of the linker.
+ segment.flags |= @enumToInt(types.Segment.Flags.WASM_SEG_FLAG_TLS);
+ }
}
parser.object.segment_info = segments;
},
@@ -846,12 +852,17 @@ fn readEnum(comptime T: type, reader: anytype) !T {
}
fn readLimits(reader: anytype) !std.wasm.Limits {
- const flags = try readLeb(u1, reader);
+ const flags = try reader.readByte();
const min = try readLeb(u32, reader);
- return std.wasm.Limits{
+ var limits: std.wasm.Limits = .{
+ .flags = flags,
.min = min,
- .max = if (flags == 0) null else try readLeb(u32, reader),
+ .max = undefined,
};
+ if (limits.hasFlag(.WASM_LIMITS_FLAG_HAS_MAX)) {
+ limits.max = try readLeb(u32, reader);
+ }
+ return limits;
}
fn readInit(reader: anytype) !std.wasm.InitExpression {
@@ -919,11 +930,29 @@ pub fn parseIntoAtoms(object: *Object, gpa: Allocator, object_index: u16, wasm_b
reloc.offset -= relocatable_data.offset;
try atom.relocs.append(gpa, reloc);
- if (relocation.isTableIndex()) {
- try wasm_bin.function_table.put(gpa, .{
- .file = object_index,
- .index = relocation.index,
- }, 0);
+ switch (relocation.relocation_type) {
+ .R_WASM_TABLE_INDEX_I32,
+ .R_WASM_TABLE_INDEX_I64,
+ .R_WASM_TABLE_INDEX_SLEB,
+ .R_WASM_TABLE_INDEX_SLEB64,
+ => {
+ try wasm_bin.function_table.put(gpa, .{
+ .file = object_index,
+ .index = relocation.index,
+ }, 0);
+ },
+ .R_WASM_GLOBAL_INDEX_I32,
+ .R_WASM_GLOBAL_INDEX_LEB,
+ => {
+ const sym = object.symtable[relocation.index];
+ if (sym.tag != .global) {
+ try wasm_bin.got_symbols.append(
+ wasm_bin.base.allocator,
+ .{ .file = object_index, .index = relocation.index },
+ );
+ }
+ },
+ else => {},
}
}
}
diff --git a/src/link/Wasm/Symbol.zig b/src/link/Wasm/Symbol.zig
index 156b507a32..8a1c4c5fdb 100644
--- a/src/link/Wasm/Symbol.zig
+++ b/src/link/Wasm/Symbol.zig
@@ -90,6 +90,10 @@ pub fn requiresImport(symbol: Symbol) bool {
return true;
}
+pub fn isTLS(symbol: Symbol) bool {
+ return symbol.flags & @enumToInt(Flag.WASM_SYM_TLS) != 0;
+}
+
pub fn hasFlag(symbol: Symbol, flag: Flag) bool {
return symbol.flags & @enumToInt(flag) != 0;
}
diff --git a/src/link/Wasm/types.zig b/src/link/Wasm/types.zig
index 964ba04ba0..801c25e9d9 100644
--- a/src/link/Wasm/types.zig
+++ b/src/link/Wasm/types.zig
@@ -38,6 +38,8 @@ pub const Relocation = struct {
R_WASM_TABLE_INDEX_SLEB64 = 18,
R_WASM_TABLE_INDEX_I64 = 19,
R_WASM_TABLE_NUMBER_LEB = 20,
+ R_WASM_MEMORY_ADDR_TLS_SLEB = 21,
+ R_WASM_MEMORY_ADDR_TLS_SLEB64 = 25,
/// Returns true for relocation types where the `addend` field is present.
pub fn addendIsPresent(self: RelocationType) bool {
@@ -48,6 +50,8 @@ pub const Relocation = struct {
.R_WASM_MEMORY_ADDR_LEB64,
.R_WASM_MEMORY_ADDR_SLEB64,
.R_WASM_MEMORY_ADDR_I64,
+ .R_WASM_MEMORY_ADDR_TLS_SLEB,
+ .R_WASM_MEMORY_ADDR_TLS_SLEB64,
.R_WASM_FUNCTION_OFFSET_I32,
.R_WASM_SECTION_OFFSET_I32,
=> true,
@@ -67,18 +71,6 @@ pub const Relocation = struct {
};
}
- /// Returns true when the relocation represents a table index relocatable
- pub fn isTableIndex(self: Relocation) bool {
- return switch (self.relocation_type) {
- .R_WASM_TABLE_INDEX_I32,
- .R_WASM_TABLE_INDEX_I64,
- .R_WASM_TABLE_INDEX_SLEB,
- .R_WASM_TABLE_INDEX_SLEB64,
- => true,
- else => false,
- };
- }
-
pub fn format(self: Relocation, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
@@ -125,23 +117,34 @@ pub const Segment = struct {
/// Bitfield containing flags for a segment
flags: u32,
+ pub fn isTLS(segment: Segment) bool {
+ return segment.flags & @enumToInt(Flags.WASM_SEG_FLAG_TLS) != 0;
+ }
+
/// Returns the name as how it will be output into the final object
/// file or binary. When `merge_segments` is true, this will return the
/// short name. i.e. ".rodata". When false, it returns the entire name instead.
- pub fn outputName(self: Segment, merge_segments: bool) []const u8 {
- if (std.mem.startsWith(u8, self.name, ".synthetic")) return ".synthetic"; // always merge
- if (!merge_segments) return self.name;
- if (std.mem.startsWith(u8, self.name, ".rodata.")) {
+ pub fn outputName(segment: Segment, merge_segments: bool) []const u8 {
+ if (segment.isTLS()) {
+ return ".tdata";
+ } else if (!merge_segments) {
+ return segment.name;
+ } else if (std.mem.startsWith(u8, segment.name, ".rodata.")) {
return ".rodata";
- } else if (std.mem.startsWith(u8, self.name, ".text.")) {
+ } else if (std.mem.startsWith(u8, segment.name, ".text.")) {
return ".text";
- } else if (std.mem.startsWith(u8, self.name, ".data.")) {
+ } else if (std.mem.startsWith(u8, segment.name, ".data.")) {
return ".data";
- } else if (std.mem.startsWith(u8, self.name, ".bss.")) {
+ } else if (std.mem.startsWith(u8, segment.name, ".bss.")) {
return ".bss";
}
- return self.name;
+ return segment.name;
}
+
+ pub const Flags = enum(u32) {
+ WASM_SEG_FLAG_STRINGS = 0x1,
+ WASM_SEG_FLAG_TLS = 0x2,
+ };
};
pub const InitFunc = struct {
@@ -205,8 +208,10 @@ pub const Feature = struct {
return @intToEnum(Tag, @enumToInt(feature));
}
- pub fn toString(tag: Tag) []const u8 {
- return switch (tag) {
+ pub fn format(tag: Tag, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
+ _ = fmt;
+ _ = opt;
+ try writer.writeAll(switch (tag) {
.atomics => "atomics",
.bulk_memory => "bulk-memory",
.exception_handling => "exception-handling",
@@ -220,7 +225,7 @@ pub const Feature = struct {
.simd128 => "simd128",
.tail_call => "tail-call",
.shared_mem => "shared-mem",
- };
+ });
}
};
@@ -233,7 +238,7 @@ pub const Feature = struct {
pub fn format(feature: Feature, comptime fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void {
_ = opt;
_ = fmt;
- try writer.print("{c} {s}", .{ feature.prefix, feature.tag.toString() });
+ try writer.print("{c} {}", .{ feature.prefix, feature.tag });
}
};
diff --git a/test/link/wasm/bss/build.zig b/test/link/wasm/bss/build.zig
index bba2e7c602..4a26e78a12 100644
--- a/test/link/wasm/bss/build.zig
+++ b/test/link/wasm/bss/build.zig
@@ -6,38 +6,78 @@ pub fn build(b: *std.Build) void {
const test_step = b.step("test", "Test");
b.default_step = test_step;
- const lib = b.addSharedLibrary(.{
- .name = "lib",
- .root_source_file = .{ .path = "lib.zig" },
- .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
- .optimize = .Debug,
- });
- lib.use_llvm = false;
- lib.use_lld = false;
- lib.strip = false;
- // to make sure the bss segment is emitted, we must import memory
- lib.import_memory = true;
- lib.install();
-
- const check_lib = lib.checkObject();
-
- // since we import memory, make sure it exists with the correct naming
- check_lib.checkStart("Section import");
- check_lib.checkNext("entries 1");
- check_lib.checkNext("module env"); // default module name is "env"
- check_lib.checkNext("name memory"); // as per linker specification
-
- // since we are importing memory, ensure it's not exported
- check_lib.checkNotPresent("Section export");
-
- // validate the name of the stack pointer
- check_lib.checkStart("Section custom");
- check_lib.checkNext("type data_segment");
- check_lib.checkNext("names 2");
- check_lib.checkNext("index 0");
- check_lib.checkNext("name .rodata");
- check_lib.checkNext("index 1"); // bss section always last
- check_lib.checkNext("name .bss");
-
- test_step.dependOn(&check_lib.step);
+ add(b, test_step, .Debug, true);
+ add(b, test_step, .ReleaseFast, false);
+ add(b, test_step, .ReleaseSmall, false);
+ add(b, test_step, .ReleaseSafe, true);
+}
+
+fn add(b: *std.Build, test_step: *std.Build.Step, optimize_mode: std.builtin.OptimizeMode, is_safe: bool) void {
+ {
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize_mode,
+ });
+ lib.use_llvm = false;
+ lib.use_lld = false;
+ lib.strip = false;
+ // to make sure the bss segment is emitted, we must import memory
+ lib.import_memory = true;
+
+ const check_lib = lib.checkObject();
+
+ // since we import memory, make sure it exists with the correct naming
+ check_lib.checkStart("Section import");
+ check_lib.checkNext("entries 1");
+ check_lib.checkNext("module env"); // default module name is "env"
+ check_lib.checkNext("name memory"); // as per linker specification
+
+ // since we are importing memory, ensure it's not exported
+ check_lib.checkNotPresent("Section export");
+
+ // validate the name of the stack pointer
+ check_lib.checkStart("Section custom");
+ check_lib.checkNext("type data_segment");
+ check_lib.checkNext("names 2");
+ check_lib.checkNext("index 0");
+ check_lib.checkNext("name .rodata");
+ // for safe optimization modes `undefined` is stored in data instead of bss.
+ if (is_safe) {
+ check_lib.checkNext("index 1");
+ check_lib.checkNext("name .data");
+ check_lib.checkNotPresent("name .bss");
+ } else {
+ check_lib.checkNext("index 1"); // bss section always last
+ check_lib.checkNext("name .bss");
+ }
+ test_step.dependOn(&check_lib.step);
+ }
+
+ // verify zero'd declaration is stored in bss for all optimization modes.
+ {
+ const lib = b.addSharedLibrary(.{
+ .name = "lib",
+ .root_source_file = .{ .path = "lib2.zig" },
+ .target = .{ .cpu_arch = .wasm32, .os_tag = .freestanding },
+ .optimize = optimize_mode,
+ });
+ lib.use_llvm = false;
+ lib.use_lld = false;
+ lib.strip = false;
+ // to make sure the bss segment is emitted, we must import memory
+ lib.import_memory = true;
+
+ const check_lib = lib.checkObject();
+ check_lib.checkStart("Section custom");
+ check_lib.checkNext("type data_segment");
+ check_lib.checkNext("names 2");
+ check_lib.checkNext("index 0");
+ check_lib.checkNext("name .rodata");
+ check_lib.checkNext("index 1");
+ check_lib.checkNext("name .bss");
+
+ test_step.dependOn(&check_lib.step);
+ }
}
diff --git a/test/link/wasm/bss/lib2.zig b/test/link/wasm/bss/lib2.zig
new file mode 100644
index 0000000000..9f43128880
--- /dev/null
+++ b/test/link/wasm/bss/lib2.zig
@@ -0,0 +1,5 @@
+pub var bss: u32 = 0;
+
+export fn foo() void {
+ _ = bss;
+}
diff --git a/test/standalone.zig b/test/standalone.zig
index 4cf795a85f..98297e9578 100644
--- a/test/standalone.zig
+++ b/test/standalone.zig
@@ -213,6 +213,11 @@ pub const build_cases = [_]BuildCase{
.build_root = "test/standalone/issue_13030",
.import = @import("standalone/issue_13030/build.zig"),
},
+ // TODO restore this test
+ //.{
+ // .build_root = "test/standalone/options",
+ // .import = @import("standalone/options/build.zig"),
+ //},
};
const std = @import("std");
diff --git a/test/standalone/options/build.zig b/test/standalone/options/build.zig
index 5e894102a7..28e7e31eb7 100644
--- a/test/standalone/options/build.zig
+++ b/test/standalone/options/build.zig
@@ -1,13 +1,10 @@
const std = @import("std");
pub fn build(b: *std.Build) void {
- const target = b.standardTargetOptions(.{});
- const optimize = b.standardOptimizeOption(.{});
-
const main = b.addTest(.{
.root_source_file = .{ .path = "src/main.zig" },
- .target = target,
- .optimize = optimize,
+ .target = .{},
+ .optimize = .Debug,
});
const options = b.addOptions();