aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/std/Progress.zig142
-rw-r--r--lib/std/Target.zig13
-rw-r--r--lib/std/builtin.zig3
-rw-r--r--src/Sema.zig33
-rw-r--r--src/Sema/comptime_ptr_access.zig9
-rw-r--r--src/Type.zig5
-rw-r--r--src/Zcu.zig1
-rw-r--r--src/codegen/c.zig42
-rw-r--r--src/codegen/llvm.zig4
-rw-r--r--src/target.zig1
-rw-r--r--test/behavior/comptime_memory.zig63
-rw-r--r--test/behavior/tuple.zig18
-rw-r--r--test/cases/compile_errors/@import_zon_bad_type.zig6
-rw-r--r--test/cases/compile_errors/anytype_param_requires_comptime.zig2
-rw-r--r--test/cases/compile_errors/bogus_method_call_on_slice.zig2
-rw-r--r--test/cases/compile_errors/coerce_anon_struct.zig2
-rw-r--r--test/cases/compile_errors/redundant_try.zig4
-rw-r--r--test/cases/compile_errors/runtime_store_to_comptime_field.zig19
18 files changed, 271 insertions, 98 deletions
diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig
index 2213bd9b11..d9ff03a3fe 100644
--- a/lib/std/Progress.zig
+++ b/lib/std/Progress.zig
@@ -39,10 +39,20 @@ draw_buffer: []u8,
/// CPU cache.
node_parents: []Node.Parent,
node_storage: []Node.Storage,
-node_freelist: []Node.OptionalIndex,
-node_freelist_first: Node.OptionalIndex,
+node_freelist_next: []Node.OptionalIndex,
+node_freelist: Freelist,
+/// This is the number of elements in node arrays which have been used so far. Nodes before this
+/// index are either active, or on the freelist. The remaining nodes are implicitly free. This
+/// value may at times temporarily exceed the node count.
node_end_index: u32,
+const Freelist = packed struct(u32) {
+ head: Node.OptionalIndex,
+ /// Whenever `node_freelist` is added to, this generation is incremented
+ /// to avoid ABA bugs when acquiring nodes. Wrapping arithmetic is used.
+ generation: u24,
+};
+
pub const TerminalMode = union(enum) {
off,
ansi_escape_codes,
@@ -112,7 +122,7 @@ pub const Node = struct {
// causes `completed_count` to be treated as a file descriptor, so
// the order here matters.
@atomicStore(u32, &s.completed_count, integer, .monotonic);
- @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release);
+ @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); // synchronizes with acquire in `serialize`
}
/// Not thread-safe.
@@ -184,12 +194,24 @@ pub const Node = struct {
const node_index = node.index.unwrap() orelse return Node.none;
const parent = node_index.toParent();
- const freelist_head = &global_progress.node_freelist_first;
- var opt_free_index = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst);
- while (opt_free_index.unwrap()) |free_index| {
- const freelist_ptr = freelistByIndex(free_index);
- const next = @atomicLoad(Node.OptionalIndex, freelist_ptr, .seq_cst);
- opt_free_index = @cmpxchgWeak(Node.OptionalIndex, freelist_head, opt_free_index, next, .seq_cst, .seq_cst) orelse {
+ const freelist = &global_progress.node_freelist;
+ var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry
+ while (old_freelist.head.unwrap()) |free_index| {
+ const next_ptr = freelistNextByIndex(free_index);
+ const new_freelist: Freelist = .{
+ .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic),
+ // We don't need to increment the generation when removing nodes from the free list,
+ // only when adding them. (This choice is arbitrary; the opposite would also work.)
+ .generation = old_freelist.generation,
+ };
+ old_freelist = @cmpxchgWeak(
+ Freelist,
+ freelist,
+ old_freelist,
+ new_freelist,
+ .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order
+ .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration
+ ) orelse {
// We won the allocation race.
return init(free_index, parent, name, estimated_total_items);
};
@@ -243,18 +265,28 @@ pub const Node = struct {
}
const index = n.index.unwrap() orelse return;
const parent_ptr = parentByIndex(index);
- if (parent_ptr.unwrap()) |parent_index| {
+ if (@atomicLoad(Node.Parent, parent_ptr, .monotonic).unwrap()) |parent_index| {
_ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic);
- @atomicStore(Node.Parent, parent_ptr, .unused, .seq_cst);
+ @atomicStore(Node.Parent, parent_ptr, .unused, .monotonic);
- const freelist_head = &global_progress.node_freelist_first;
- var first = @atomicLoad(Node.OptionalIndex, freelist_head, .seq_cst);
+ const freelist = &global_progress.node_freelist;
+ var old_freelist = @atomicLoad(Freelist, freelist, .monotonic);
while (true) {
- @atomicStore(Node.OptionalIndex, freelistByIndex(index), first, .seq_cst);
- first = @cmpxchgWeak(Node.OptionalIndex, freelist_head, first, index.toOptional(), .seq_cst, .seq_cst) orelse break;
+ @atomicStore(Node.OptionalIndex, freelistNextByIndex(index), old_freelist.head, .monotonic);
+ old_freelist = @cmpxchgWeak(
+ Freelist,
+ freelist,
+ old_freelist,
+ .{ .head = index.toOptional(), .generation = old_freelist.generation +% 1 },
+ .release, // ensure a matching `start` sees the freelist link written above
+ .monotonic, // our write above is irrelevant if we need to retry
+ ) orelse {
+ // We won the race.
+ return;
+ };
}
} else {
- @atomicStore(bool, &global_progress.done, true, .seq_cst);
+ @atomicStore(bool, &global_progress.done, true, .monotonic);
global_progress.redraw_event.set();
if (global_progress.update_thread) |thread| thread.join();
}
@@ -291,8 +323,8 @@ pub const Node = struct {
return &global_progress.node_parents[@intFromEnum(index)];
}
- fn freelistByIndex(index: Node.Index) *Node.OptionalIndex {
- return &global_progress.node_freelist[@intFromEnum(index)];
+ fn freelistNextByIndex(index: Node.Index) *Node.OptionalIndex {
+ return &global_progress.node_freelist_next[@intFromEnum(index)];
}
fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node {
@@ -307,8 +339,10 @@ pub const Node = struct {
@atomicStore(u8, &storage.name[name_len], 0, .monotonic);
const parent_ptr = parentByIndex(free_index);
- assert(parent_ptr.* == .unused);
- @atomicStore(Node.Parent, parent_ptr, parent, .release);
+ if (std.debug.runtime_safety) {
+ assert(@atomicLoad(Node.Parent, parent_ptr, .monotonic) == .unused);
+ }
+ @atomicStore(Node.Parent, parent_ptr, parent, .monotonic);
return .{ .index = free_index.toOptional() };
}
@@ -329,15 +363,15 @@ var global_progress: Progress = .{
.node_parents = &node_parents_buffer,
.node_storage = &node_storage_buffer,
- .node_freelist = &node_freelist_buffer,
- .node_freelist_first = .none,
+ .node_freelist_next = &node_freelist_next_buffer,
+ .node_freelist = .{ .head = .none, .generation = 0 },
.node_end_index = 0,
};
const node_storage_buffer_len = 83;
var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined;
var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined;
-var node_freelist_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined;
+var node_freelist_next_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined;
var default_draw_buffer: [4096]u8 = undefined;
@@ -456,7 +490,7 @@ fn updateThreadRun() void {
{
const resize_flag = wait(global_progress.initial_delay_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return;
+ if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
maybeUpdateSize(resize_flag);
const buffer, _ = computeRedraw(&serialized_buffer);
@@ -470,7 +504,7 @@ fn updateThreadRun() void {
while (true) {
const resize_flag = wait(global_progress.refresh_rate_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst)) {
+ if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
stderr_mutex.lock();
defer stderr_mutex.unlock();
return clearWrittenWithEscapeCodes() catch {};
@@ -500,7 +534,7 @@ fn windowsApiUpdateThreadRun() void {
{
const resize_flag = wait(global_progress.initial_delay_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst)) return;
+ if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
maybeUpdateSize(resize_flag);
const buffer, const nl_n = computeRedraw(&serialized_buffer);
@@ -516,7 +550,7 @@ fn windowsApiUpdateThreadRun() void {
while (true) {
const resize_flag = wait(global_progress.refresh_rate_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst)) {
+ if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
stderr_mutex.lock();
defer stderr_mutex.unlock();
return clearWrittenWindowsApi() catch {};
@@ -558,7 +592,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void {
{
_ = wait(global_progress.initial_delay_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst))
+ if (@atomicLoad(bool, &global_progress.done, .monotonic))
return;
const serialized = serialize(&serialized_buffer);
@@ -570,7 +604,7 @@ fn ipcThreadRun(fd: posix.fd_t) anyerror!void {
while (true) {
_ = wait(global_progress.refresh_rate_ns);
- if (@atomicLoad(bool, &global_progress.done, .seq_cst))
+ if (@atomicLoad(bool, &global_progress.done, .monotonic))
return;
const serialized = serialize(&serialized_buffer);
@@ -765,37 +799,39 @@ fn serialize(serialized_buffer: *Serialized.Buffer) Serialized {
var any_ipc = false;
// Iterate all of the nodes and construct a serializable copy of the state that can be examined
- // without atomics.
- const end_index = @atomicLoad(u32, &global_progress.node_end_index, .monotonic);
+ // without atomics. The `@min` call is here because `node_end_index` might briefly exceed the
+ // node count sometimes.
+ const end_index = @min(@atomicLoad(u32, &global_progress.node_end_index, .monotonic), global_progress.node_storage.len);
for (
global_progress.node_parents[0..end_index],
global_progress.node_storage[0..end_index],
serialized_buffer.map[0..end_index],
) |*parent_ptr, *storage_ptr, *map| {
- var begin_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire);
- while (begin_parent != .unused) {
- const dest_storage = &serialized_buffer.storage[serialized_len];
- copyAtomicLoad(&dest_storage.name, &storage_ptr.name);
- dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire);
- dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic);
- const end_parent = @atomicLoad(Node.Parent, parent_ptr, .acquire);
- if (begin_parent == end_parent) {
- any_ipc = any_ipc or (dest_storage.getIpcFd() != null);
- serialized_buffer.parents[serialized_len] = begin_parent;
- map.* = @enumFromInt(serialized_len);
- serialized_len += 1;
- break;
- }
-
- begin_parent = end_parent;
- } else {
- // A node may be freed during the execution of this loop, causing
- // there to be a parent reference to a nonexistent node. Without
- // this assignment, this would lead to the map entry containing
- // stale data. By assigning none, the child node with the bad
- // parent pointer will be harmlessly omitted from the tree.
+ const parent = @atomicLoad(Node.Parent, parent_ptr, .monotonic);
+ if (parent == .unused) {
+ // We might read "mixed" node data in this loop, due to weird atomic things
+ // or just a node actually being freed while this loop runs. That could cause
+ // there to be a parent reference to a nonexistent node. Without this assignment,
+ // this would lead to the map entry containing stale data. By assigning none, the
+ // child node with the bad parent pointer will be harmlessly omitted from the tree.
+ //
+ // Note that there's no concern of potentially creating "looping" data if we read
+ // "mixed" node data like this, because if a node is (directly or indirectly) its own
+ // parent, it will just not be printed at all. The general idea here is that performance
+ // is more important than 100% correct output every frame, given that this API is likely
+ // to be used in hot paths!
map.* = .none;
+ continue;
}
+ const dest_storage = &serialized_buffer.storage[serialized_len];
+ copyAtomicLoad(&dest_storage.name, &storage_ptr.name);
+ dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); // sychronizes with release in `setIpcFd`
+ dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic);
+
+ any_ipc = any_ipc or (dest_storage.getIpcFd() != null);
+ serialized_buffer.parents[serialized_len] = parent;
+ map.* = @enumFromInt(serialized_len);
+ serialized_len += 1;
}
// Remap parents to point inside serialized arrays.
diff --git a/lib/std/Target.zig b/lib/std/Target.zig
index 3d3cf11484..88477354f6 100644
--- a/lib/std/Target.zig
+++ b/lib/std/Target.zig
@@ -1079,6 +1079,7 @@ pub fn toElfMachine(target: Target) std.elf.EM {
.m68k => .@"68K",
.mips, .mips64, .mipsel, .mips64el => .MIPS,
.msp430 => .MSP430,
+ .or1k => .OR1K,
.powerpc, .powerpcle => .PPC,
.powerpc64, .powerpc64le => .PPC64,
.propeller => .PROPELLER,
@@ -1133,6 +1134,7 @@ pub fn toCoffMachine(target: Target) std.coff.MachineType {
.mips64,
.mips64el,
.msp430,
+ .or1k,
.nvptx,
.nvptx64,
.powerpc,
@@ -1357,6 +1359,7 @@ pub const Cpu = struct {
mips64,
mips64el,
msp430,
+ or1k,
nvptx,
nvptx64,
powerpc,
@@ -1565,6 +1568,7 @@ pub const Cpu = struct {
.m68k,
.mips,
.mips64,
+ .or1k,
.powerpc,
.powerpc64,
.thumbeb,
@@ -1815,6 +1819,9 @@ pub const Cpu = struct {
.msp430_eabi,
=> &.{.msp430},
+ .or1k_sysv,
+ => &.{.or1k},
+
.propeller_sysv,
=> &.{.propeller},
@@ -1911,6 +1918,7 @@ pub const Cpu = struct {
.xtensa => &xtensa.cpu.generic,
.kalimba,
+ .or1k,
=> &S.generic_model,
};
}
@@ -2598,6 +2606,7 @@ pub fn ptrBitWidth_cpu_abi(cpu: Cpu, abi: Abi) u16 {
.m68k,
.mips,
.mipsel,
+ .or1k,
.powerpc,
.powerpcle,
.riscv32,
@@ -3114,6 +3123,7 @@ pub fn cTypeAlignment(target: Target, c_type: CType) u16 {
.csky,
.x86,
.xcore,
+ .or1k,
.kalimba,
.xtensa,
.propeller,
@@ -3204,6 +3214,7 @@ pub fn cTypePreferredAlignment(target: Target, c_type: CType) u16 {
.csky,
.xcore,
+ .or1k,
.kalimba,
.xtensa,
.propeller,
@@ -3276,6 +3287,7 @@ pub fn cMaxIntAlignment(target: std.Target) u16 {
.hexagon,
.mips,
.mipsel,
+ .or1k,
.powerpc,
.powerpcle,
.riscv32,
@@ -3372,6 +3384,7 @@ pub fn cCallingConvention(target: Target) ?std.builtin.CallingConvention {
else
.{ .m68k_sysv = .{} },
.msp430 => .{ .msp430_eabi = .{} },
+ .or1k => .{ .or1k_sysv = .{} },
.propeller => .{ .propeller_sysv = .{} },
.s390x => .{ .s390x_sysv = .{} },
.ve => .{ .ve_sysv = .{} },
diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig
index 5e95474569..de2d8b2156 100644
--- a/lib/std/builtin.zig
+++ b/lib/std/builtin.zig
@@ -370,6 +370,9 @@ pub const CallingConvention = union(enum(u8)) {
/// The standard `msp430` calling convention.
msp430_eabi: CommonOptions,
+ /// The standard `or1k` calling convention.
+ or1k_sysv: CommonOptions,
+
/// The standard `propeller` calling convention.
propeller_sysv: CommonOptions,
diff --git a/src/Sema.zig b/src/Sema.zig
index 644410dc20..8889b475fd 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -27997,12 +27997,17 @@ fn structFieldPtrByIndex(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
- const val = try struct_ptr_val.ptrField(field_index, pt);
- return Air.internedToRef(val.toIntern());
+ const struct_type = zcu.typeToStruct(struct_ty).?;
+ const field_is_comptime = struct_type.fieldIsComptime(ip, field_index);
+
+ // Comptime fields are handled later
+ if (!field_is_comptime) {
+ if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
+ const val = try struct_ptr_val.ptrField(field_index, pt);
+ return Air.internedToRef(val.toIntern());
+ }
}
- const struct_type = zcu.typeToStruct(struct_ty).?;
const field_ty = struct_type.field_types.get(ip)[field_index];
const struct_ptr_ty = sema.typeOf(struct_ptr);
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
@@ -28022,6 +28027,7 @@ fn structFieldPtrByIndex(
try Type.fromInterned(struct_ptr_ty_info.child).abiAlignmentSema(pt);
if (struct_type.layout == .@"packed") {
+ assert(!field_is_comptime);
switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) {
.bit_ptr => |packed_offset| {
ptr_ty_data.flags.alignment = parent_align;
@@ -28032,6 +28038,7 @@ fn structFieldPtrByIndex(
},
}
} else if (struct_type.layout == .@"extern") {
+ assert(!field_is_comptime);
// For extern structs, field alignment might be bigger than type's
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
// second field is aligned as u32.
@@ -28055,7 +28062,7 @@ fn structFieldPtrByIndex(
const ptr_field_ty = try pt.ptrTypeSema(ptr_ty_data);
- if (struct_type.fieldIsComptime(ip, field_index)) {
+ if (field_is_comptime) {
try struct_ty.resolveStructFieldInits(pt);
const val = try pt.intern(.{ .ptr = .{
.ty = ptr_field_ty.toIntern(),
@@ -28602,7 +28609,8 @@ fn tupleFieldPtr(
const pt = sema.pt;
const zcu = pt.zcu;
const tuple_ptr_ty = sema.typeOf(tuple_ptr);
- const tuple_ty = tuple_ptr_ty.childType(zcu);
+ const tuple_ptr_info = tuple_ptr_ty.ptrInfo(zcu);
+ const tuple_ty: Type = .fromInterned(tuple_ptr_info.child);
try tuple_ty.resolveFields(pt);
const field_count = tuple_ty.structFieldCount(zcu);
@@ -28620,9 +28628,16 @@ fn tupleFieldPtr(
const ptr_field_ty = try pt.ptrTypeSema(.{
.child = field_ty.toIntern(),
.flags = .{
- .is_const = !tuple_ptr_ty.ptrIsMutable(zcu),
- .is_volatile = tuple_ptr_ty.isVolatilePtr(zcu),
- .address_space = tuple_ptr_ty.ptrAddressSpace(zcu),
+ .is_const = tuple_ptr_info.flags.is_const,
+ .is_volatile = tuple_ptr_info.flags.is_volatile,
+ .address_space = tuple_ptr_info.flags.address_space,
+ .alignment = a: {
+ if (tuple_ptr_info.flags.alignment == .none) break :a .none;
+ // The tuple pointer isn't naturally aligned, so the field pointer might be underaligned.
+ const tuple_align = tuple_ptr_info.flags.alignment;
+ const field_align = try field_ty.abiAlignmentSema(pt);
+ break :a tuple_align.min(field_align);
+ },
},
});
diff --git a/src/Sema/comptime_ptr_access.zig b/src/Sema/comptime_ptr_access.zig
index ceddb9457d..2e21c31f2b 100644
--- a/src/Sema/comptime_ptr_access.zig
+++ b/src/Sema/comptime_ptr_access.zig
@@ -65,6 +65,15 @@ pub fn storeComptimePtr(
const zcu = pt.zcu;
const ptr_info = ptr.typeOf(zcu).ptrInfo(zcu);
assert(store_val.typeOf(zcu).toIntern() == ptr_info.child);
+
+ {
+ const store_ty: Type = .fromInterned(ptr_info.child);
+ if (!try store_ty.comptimeOnlySema(pt) and !try store_ty.hasRuntimeBitsIgnoreComptimeSema(pt)) {
+ // zero-bit store; nothing to do
+ return .success;
+ }
+ }
+
// TODO: host size for vectors is terrible
const host_bits = switch (ptr_info.flags.vector_index) {
.none => ptr_info.packed_offset.host_size * 8,
diff --git a/src/Type.zig b/src/Type.zig
index a789edefe1..f3e33abbec 100644
--- a/src/Type.zig
+++ b/src/Type.zig
@@ -1637,10 +1637,7 @@ pub fn bitSizeInner(
const len = array_type.lenIncludingSentinel();
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
- const elem_size = @max(
- (try elem_ty.abiAlignmentInner(strat_lazy, zcu, tid)).scalar.toByteUnits() orelse 0,
- (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar,
- );
+ const elem_size = (try elem_ty.abiSizeInner(strat_lazy, zcu, tid)).scalar;
if (elem_size == 0) return 0;
const elem_bit_size = try elem_ty.bitSizeInner(strat, zcu, tid);
return (len - 1) * 8 * elem_size + elem_bit_size;
diff --git a/src/Zcu.zig b/src/Zcu.zig
index 3e2640fa1a..fab40763e8 100644
--- a/src/Zcu.zig
+++ b/src/Zcu.zig
@@ -3687,6 +3687,7 @@ pub fn atomicPtrAlignment(
.mips,
.mipsel,
.nvptx,
+ .or1k,
.powerpc,
.powerpcle,
.riscv32,
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index e124355bc7..91a54db8b1 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -611,7 +611,7 @@ pub const Function = struct {
const a = try Assignment.start(f, writer, ctype);
try f.writeCValue(writer, dst, .Other);
try a.assign(f, writer);
- try f.writeCValue(writer, src, .Initializer);
+ try f.writeCValue(writer, src, .Other);
try a.end(f, writer);
}
@@ -2826,7 +2826,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
});
try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete);
try w.writeAll(" = ");
- try o.dg.renderValue(w, Value.fromInterned(name_val), .Initializer);
+ try o.dg.renderValue(w, Value.fromInterned(name_val), .StaticInitializer);
try w.writeAll(";\n return (");
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
@@ -4045,7 +4045,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const new_local = try f.allocLocal(inst, src_ty);
try f.writeCValue(writer, new_local, .Other);
try writer.writeAll(" = ");
- try f.writeCValue(writer, src_val, .Initializer);
+ try f.writeCValue(writer, src_val, .Other);
try writer.writeAll(";\n");
break :blk new_local;
@@ -4516,7 +4516,7 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, .usize);
try f.writeCValueMember(writer, local, .{ .identifier = "len" });
try a.assign(f, writer);
- try f.writeCValue(writer, len, .Initializer);
+ try f.writeCValue(writer, len, .Other);
try a.end(f, writer);
}
return local;
@@ -4934,7 +4934,7 @@ fn airSwitchDispatch(f: *Function, inst: Air.Inst.Index) !void {
const cond_local = f.loop_switch_conds.get(br.block_inst).?;
try f.writeCValue(writer, .{ .local = cond_local }, .Other);
try writer.writeAll(" = ");
- try f.writeCValue(writer, cond, .Initializer);
+ try f.writeCValue(writer, cond, .Other);
try writer.writeAll(";\n");
try writer.print("goto zig_switch_{d}_loop;", .{@intFromEnum(br.block_inst)});
}
@@ -4979,14 +4979,8 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal
const operand_lval = if (operand == .constant) blk: {
const operand_local = try f.allocLocal(null, operand_ty);
try f.writeCValue(writer, operand_local, .Other);
- if (operand_ty.isAbiInt(zcu)) {
- try writer.writeAll(" = ");
- } else {
- try writer.writeAll(" = (");
- try f.renderType(writer, operand_ty);
- try writer.writeByte(')');
- }
- try f.writeCValue(writer, operand, .Initializer);
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, operand, .Other);
try writer.writeAll(";\n");
break :blk operand_local;
} else operand;
@@ -5698,7 +5692,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, opt_ctype);
try f.writeCValueDeref(writer, operand);
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, Value.false, .Initializer);
+ try f.object.dg.renderValue(writer, Value.false, .Other);
try a.end(f, writer);
return .none;
},
@@ -5718,7 +5712,7 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, opt_ctype);
try f.writeCValueDerefMember(writer, operand, .{ .identifier = "is_null" });
try a.assign(f, writer);
- try f.object.dg.renderValue(writer, Value.false, .Initializer);
+ try f.object.dg.renderValue(writer, Value.false, .Other);
try a.end(f, writer);
}
if (f.liveness.isUnused(inst)) return .none;
@@ -5844,7 +5838,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte(')');
switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) {
- .begin => try f.writeCValue(writer, field_ptr_val, .Initializer),
+ .begin => try f.writeCValue(writer, field_ptr_val, .Other),
.field => |field| {
const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8);
@@ -5898,7 +5892,7 @@ fn fieldPtr(
try writer.writeByte(')');
switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) {
- .begin => try f.writeCValue(writer, container_ptr_val, .Initializer),
+ .begin => try f.writeCValue(writer, container_ptr_val, .Other),
.field => |field| {
try writer.writeByte('&');
try f.writeCValueDerefMember(writer, container_ptr_val, field);
@@ -6021,7 +6015,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_local = try f.allocLocal(inst, struct_ty);
try f.writeCValue(writer, operand_local, .Other);
try writer.writeAll(" = ");
- try f.writeCValue(writer, struct_byval, .Initializer);
+ try f.writeCValue(writer, struct_byval, .Other);
try writer.writeAll(";\n");
break :blk operand_local;
} else struct_byval;
@@ -6119,7 +6113,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
try writer.writeAll(" = (");
try f.renderType(writer, inst_ty);
try writer.writeByte(')');
- try f.writeCValue(writer, operand, .Initializer);
+ try f.writeCValue(writer, operand, .Other);
try writer.writeAll(";\n");
return local;
}
@@ -6164,7 +6158,7 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, operand_ctype);
try f.writeCValueMember(writer, local, .{ .identifier = "payload" });
try a.assign(f, writer);
- try f.writeCValue(writer, operand, .Initializer);
+ try f.writeCValue(writer, operand, .Other);
try a.end(f, writer);
}
return local;
@@ -6365,7 +6359,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValueMember(writer, local, .{ .identifier = "ptr" });
try a.assign(f, writer);
if (operand == .undef) {
- try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Initializer);
+ try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Other);
} else {
const ptr_ctype = try f.ctypeFromType(ptr_ty, .complete);
const ptr_child_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype;
@@ -6382,7 +6376,7 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeByte('&');
try f.writeCValueDeref(writer, operand);
try writer.print("[{}]", .{try f.fmtIntLiteral(try pt.intValue(.usize, 0))});
- } else try f.writeCValue(writer, operand, .Initializer);
+ } else try f.writeCValue(writer, operand, .Other);
}
try a.end(f, writer);
}
@@ -6912,7 +6906,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try writer.writeAll("for (");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" = ");
- try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Initializer);
+ try f.object.dg.renderValue(writer, try pt.intValue(.usize, 0), .Other);
try writer.writeAll("; ");
try f.writeCValue(writer, index, .Other);
try writer.writeAll(" != ");
@@ -7282,7 +7276,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
.float => try pt.floatValue(scalar_ty, std.math.nan(f128)),
else => unreachable,
},
- }, .Initializer);
+ }, .Other);
try writer.writeAll(";\n");
const v = try Vectorize.start(f, inst, writer, operand_ty);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 67dc468c35..891cc0dc52 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -98,6 +98,7 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![]const u8 {
.ve => "ve",
.kalimba,
+ .or1k,
.propeller,
=> unreachable, // Gated by hasLlvmSupport().
};
@@ -454,6 +455,7 @@ pub fn dataLayout(target: std.Target) []const u8 {
.xtensa => "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-n32",
.kalimba,
+ .or1k,
.propeller,
=> unreachable, // Gated by hasLlvmSupport().
};
@@ -11563,6 +11565,7 @@ fn toLlvmCallConvTag(cc_tag: std.builtin.CallingConvention.Tag, target: std.Targ
.m68k_sysv,
.m68k_gnu,
.msp430_eabi,
+ .or1k_sysv,
.propeller_sysv,
.s390x_sysv,
.s390x_sysv_vx,
@@ -12762,6 +12765,7 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
// LLVM does does not have a backend for these.
.kalimba,
+ .or1k,
.propeller,
=> unreachable,
}
diff --git a/src/target.zig b/src/target.zig
index 21b701fc37..6b67de4562 100644
--- a/src/target.zig
+++ b/src/target.zig
@@ -199,6 +199,7 @@ pub fn hasLlvmSupport(target: std.Target, ofmt: std.Target.ObjectFormat) bool {
// No LLVM backend exists.
.kalimba,
+ .or1k,
.propeller,
=> false,
};
diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig
index baca3da72d..64b494b23e 100644
--- a/test/behavior/comptime_memory.zig
+++ b/test/behavior/comptime_memory.zig
@@ -515,3 +515,66 @@ fn fieldPtrTest() u32 {
test "pointer in aggregate field can mutate comptime state" {
try comptime std.testing.expect(fieldPtrTest() == 2);
}
+
+test "comptime store of extern struct with void field" {
+ comptime {
+ var x: extern struct { a: u8, b: void } = undefined;
+ x = .{ .a = 123, .b = {} };
+ std.debug.assert(x.a == 123);
+ }
+}
+
+test "comptime store of extern struct with void field into array" {
+ comptime {
+ var x: [3]extern struct { a: u8, b: void } = undefined;
+ x[1] = .{ .a = 123, .b = {} };
+ std.debug.assert(x[1].a == 123);
+ }
+}
+
+test "comptime store of packed struct with void field" {
+ comptime {
+ var x: packed struct { a: u8, b: void } = undefined;
+ x = .{ .a = 123, .b = {} };
+ std.debug.assert(x.a == 123);
+ }
+}
+
+test "comptime store of packed struct with void field into array" {
+ comptime {
+ var x: [3]packed struct { a: u8, b: void } = undefined;
+ x[1] = .{ .a = 123, .b = {} };
+ std.debug.assert(x[1].a == 123);
+ }
+}
+
+test "comptime store of reinterpreted zero-bit type" {
+ const S = struct {
+ fn doTheTest(comptime T: type) void {
+ comptime var buf: T = undefined;
+ const ptr: *void = @ptrCast(&buf);
+ ptr.* = {};
+ }
+ };
+ S.doTheTest(void);
+ S.doTheTest(u0);
+ S.doTheTest([0]u8);
+ S.doTheTest([1]u0);
+ S.doTheTest([5]u0);
+ S.doTheTest([5]void);
+ S.doTheTest(packed struct(u0) {});
+}
+
+test "comptime store to extern struct reinterpreted as byte array" {
+ const T = extern struct {
+ x: u32,
+ y: f32,
+ z: [2]void,
+ };
+ comptime var val: T = undefined;
+
+ const bytes: *[@sizeOf(T)]u8 = @ptrCast(&val);
+ @memset(bytes, 0);
+
+ comptime std.debug.assert(val.x == 0);
+}
diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig
index 492730df61..c9d3fb4b38 100644
--- a/test/behavior/tuple.zig
+++ b/test/behavior/tuple.zig
@@ -602,3 +602,21 @@ test "empty union in tuple" {
try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name);
try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union");
}
+
+test "field pointer of underaligned tuple" {
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ const S = struct {
+ fn doTheTest() !void {
+ const T = struct { u8, u32 };
+ var val: T align(2) = .{ 1, 2 };
+
+ comptime assert(@TypeOf(&val[0]) == *u8); // `u8` field pointer isn't overaligned
+ comptime assert(@TypeOf(&val[1]) == *align(2) u32); // `u32` field pointer is correctly underaligned
+
+ try expect(val[0] == 1);
+ try expect(val[1] == 2);
+ }
+ };
+ try S.doTheTest();
+ try comptime S.doTheTest();
+}
diff --git a/test/cases/compile_errors/@import_zon_bad_type.zig b/test/cases/compile_errors/@import_zon_bad_type.zig
index edbacf7cb1..80d05c5254 100644
--- a/test/cases/compile_errors/@import_zon_bad_type.zig
+++ b/test/cases/compile_errors/@import_zon_bad_type.zig
@@ -117,9 +117,9 @@ export fn testMutablePointer() void {
// tmp.zig:37:38: note: imported here
// neg_inf.zon:1:1: error: expected type '?u8'
// tmp.zig:57:28: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_491'
+// neg_inf.zon:1:1: error: expected type 'tmp.testNonExhaustiveEnum__enum_492'
// tmp.zig:62:39: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_493'
+// neg_inf.zon:1:1: error: expected type 'tmp.testUntaggedUnion__union_494'
// tmp.zig:67:44: note: imported here
-// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_496'
+// neg_inf.zon:1:1: error: expected type 'tmp.testTaggedUnionVoid__union_497'
// tmp.zig:72:50: note: imported here
diff --git a/test/cases/compile_errors/anytype_param_requires_comptime.zig b/test/cases/compile_errors/anytype_param_requires_comptime.zig
index 637bdb9be2..3ab545d0dd 100644
--- a/test/cases/compile_errors/anytype_param_requires_comptime.zig
+++ b/test/cases/compile_errors/anytype_param_requires_comptime.zig
@@ -15,6 +15,6 @@ pub export fn entry() void {
// error
//
// :7:25: error: unable to resolve comptime value
-// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_465.C' must be comptime-known
+// :7:25: note: initializer of comptime-only struct 'tmp.S.foo__anon_466.C' must be comptime-known
// :4:16: note: struct requires comptime because of this field
// :4:16: note: types are not available at runtime
diff --git a/test/cases/compile_errors/bogus_method_call_on_slice.zig b/test/cases/compile_errors/bogus_method_call_on_slice.zig
index ee758f2755..9ad88c0ba9 100644
--- a/test/cases/compile_errors/bogus_method_call_on_slice.zig
+++ b/test/cases/compile_errors/bogus_method_call_on_slice.zig
@@ -16,5 +16,5 @@ pub export fn entry2() void {
//
// :3:6: error: no field or member function named 'copy' in '[]const u8'
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
-// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_469'
+// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_470'
// :12:6: note: struct declared here
diff --git a/test/cases/compile_errors/coerce_anon_struct.zig b/test/cases/compile_errors/coerce_anon_struct.zig
index af8d949500..43c4c80bb7 100644
--- a/test/cases/compile_errors/coerce_anon_struct.zig
+++ b/test/cases/compile_errors/coerce_anon_struct.zig
@@ -6,6 +6,6 @@ export fn foo() void {
// error
//
-// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_458'
+// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_459'
// :3:16: note: struct declared here
// :1:11: note: struct declared here
diff --git a/test/cases/compile_errors/redundant_try.zig b/test/cases/compile_errors/redundant_try.zig
index 4127a36a1d..a6f8e312ed 100644
--- a/test/cases/compile_errors/redundant_try.zig
+++ b/test/cases/compile_errors/redundant_try.zig
@@ -44,9 +44,9 @@ comptime {
//
// :5:23: error: expected error union type, found 'comptime_int'
// :10:23: error: expected error union type, found '@TypeOf(.{})'
-// :15:23: error: expected error union type, found 'tmp.test2__struct_495'
+// :15:23: error: expected error union type, found 'tmp.test2__struct_496'
// :15:23: note: struct declared here
-// :20:27: error: expected error union type, found 'tmp.test3__struct_497'
+// :20:27: error: expected error union type, found 'tmp.test3__struct_498'
// :20:27: note: struct declared here
// :25:23: error: expected error union type, found 'struct { comptime *const [5:0]u8 = "hello" }'
// :31:13: error: expected error union type, found 'u32'
diff --git a/test/cases/compile_errors/runtime_store_to_comptime_field.zig b/test/cases/compile_errors/runtime_store_to_comptime_field.zig
new file mode 100644
index 0000000000..0c5d6a7ad3
--- /dev/null
+++ b/test/cases/compile_errors/runtime_store_to_comptime_field.zig
@@ -0,0 +1,19 @@
+const init: u32 = 1;
+fn rt() u32 {
+ return 3;
+}
+
+var tuple_val = .{init};
+export fn tuple_field() void {
+ tuple_val[0] = rt();
+}
+
+var struct_val = .{ .x = init };
+export fn struct_field() void {
+ struct_val.x = rt();
+}
+
+// error
+//
+// :8:14: error: cannot store runtime value in compile time variable
+// :13:15: error: cannot store runtime value in compile time variable