aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build.zig3
-rwxr-xr-xci/azure/macos_arm64_script3
-rwxr-xr-xci/azure/macos_script3
-rw-r--r--doc/langref.html.in7
-rw-r--r--lib/std/build/InstallRawStep.zig6
-rw-r--r--lib/std/build/RunStep.zig2
-rw-r--r--lib/std/child_process.zig2
-rw-r--r--lib/std/fmt.zig30
-rw-r--r--lib/std/heap.zig4
-rw-r--r--lib/std/heap/logging_allocator.zig8
-rw-r--r--lib/std/log.zig132
-rw-r--r--lib/std/math/big/int.zig471
-rw-r--r--lib/std/math/big/int_test.zig107
-rw-r--r--lib/std/math/ceil.zig4
-rw-r--r--lib/std/math/floor.zig4
-rw-r--r--lib/std/math/trunc.zig4
-rw-r--r--lib/std/os/linux/arm-eabi.zig4
-rw-r--r--lib/std/os/linux/arm64.zig4
-rw-r--r--lib/std/os/linux/i386.zig5
-rw-r--r--lib/std/os/linux/mips.zig4
-rw-r--r--lib/std/os/linux/powerpc.zig5
-rw-r--r--lib/std/os/linux/powerpc64.zig4
-rw-r--r--lib/std/os/linux/riscv64.zig4
-rw-r--r--lib/std/os/linux/sparc64.zig4
-rw-r--r--lib/std/os/linux/tls.zig6
-rw-r--r--lib/std/os/linux/x86_64.zig5
-rw-r--r--lib/std/os/uefi/tables/boot_services.zig1
-rw-r--r--lib/std/priority_dequeue.zig108
-rw-r--r--lib/std/priority_queue.zig79
-rw-r--r--lib/std/rand.zig2
-rw-r--r--lib/std/special/c_stage1.zig38
-rw-r--r--lib/std/special/compiler_rt.zig159
-rw-r--r--lib/std/zig/parser_test.zig2
-rw-r--r--src/Air.zig56
-rw-r--r--src/AstGen.zig297
-rw-r--r--src/BuiltinFn.zig16
-rw-r--r--src/Compilation.zig103
-rw-r--r--src/Liveness.zig11
-rw-r--r--src/Module.zig169
-rw-r--r--src/Sema.zig2719
-rw-r--r--src/Zir.zig294
-rw-r--r--src/arch/aarch64/CodeGen.zig50
-rw-r--r--src/clang.zig3
-rw-r--r--src/codegen.zig60
-rw-r--r--src/codegen/c.zig68
-rw-r--r--src/codegen/llvm.zig694
-rw-r--r--src/codegen/llvm/bindings.zig30
-rw-r--r--src/codegen/spirv.zig1
-rw-r--r--src/codegen/wasm.zig12
-rw-r--r--src/config.zig.in1
-rw-r--r--src/link.zig19
-rw-r--r--src/link/C/zig.h2
-rw-r--r--src/link/Coff.zig4
-rw-r--r--src/link/MachO.zig431
-rw-r--r--src/link/MachO/Atom.zig8
-rw-r--r--src/link/MachO/Object.zig16
-rw-r--r--src/main.zig22
-rw-r--r--src/print_air.zig66
-rw-r--r--src/print_zir.zig127
-rw-r--r--src/stage1/codegen.cpp28
-rw-r--r--src/translate_c.zig49
-rw-r--r--src/translate_c/ast.zig2
-rw-r--r--src/type.zig323
-rw-r--r--src/value.zig453
-rw-r--r--src/zig_clang.cpp5
-rw-r--r--src/zig_clang.h1
-rw-r--r--test/behavior.zig38
-rw-r--r--test/behavior/align.zig249
-rw-r--r--test/behavior/align_stage1.zig225
-rw-r--r--test/behavior/array.zig52
-rw-r--r--test/behavior/array_stage1.zig52
-rw-r--r--test/behavior/basic.zig13
-rw-r--r--test/behavior/bitcast.zig22
-rw-r--r--test/behavior/bitcast_stage1.zig22
-rw-r--r--test/behavior/bugs/9967.zig8
-rw-r--r--test/behavior/cast.zig55
-rw-r--r--test/behavior/cast_stage1.zig55
-rw-r--r--test/behavior/error.zig84
-rw-r--r--test/behavior/error_stage1.zig89
-rw-r--r--test/behavior/eval.zig83
-rw-r--r--test/behavior/eval_stage1.zig89
-rw-r--r--test/behavior/floatop.zig397
-rw-r--r--test/behavior/floatop_stage1.zig405
-rw-r--r--test/behavior/fn.zig201
-rw-r--r--test/behavior/fn_stage1.zig222
-rw-r--r--test/behavior/for.zig58
-rw-r--r--test/behavior/for_stage1.zig58
-rw-r--r--test/behavior/math.zig209
-rw-r--r--test/behavior/math_stage1.zig201
-rw-r--r--test/behavior/misc.zig12
-rw-r--r--test/behavior/null.zig39
-rw-r--r--test/behavior/null_stage1.zig37
-rw-r--r--test/behavior/pointers.zig49
-rw-r--r--test/behavior/pointers_stage1.zig49
-rw-r--r--test/behavior/ptrcast.zig69
-rw-r--r--test/behavior/ptrcast_stage1.zig73
-rw-r--r--test/behavior/saturating_arithmetic.zig4
-rw-r--r--test/behavior/slice.zig85
-rw-r--r--test/behavior/slice_stage1.zig85
-rw-r--r--test/behavior/switch.zig297
-rw-r--r--test/behavior/switch_stage1.zig303
-rw-r--r--test/behavior/union.zig31
-rw-r--r--test/behavior/union_stage1.zig31
-rw-r--r--test/compare_output.zig42
-rw-r--r--test/run_translated_c.zig17
-rw-r--r--test/stage2/cbe.zig15
-rw-r--r--test/standalone/link_common_symbols/b.c1
-rw-r--r--test/standalone/link_common_symbols/build.zig2
-rw-r--r--test/standalone/link_common_symbols/c.c5
-rw-r--r--test/standalone/link_common_symbols/main.zig5
-rw-r--r--test/translate_c.zig10
111 files changed, 7028 insertions, 4484 deletions
diff --git a/build.zig b/build.zig
index 1bb4a159e9..5df7f035fd 100644
--- a/build.zig
+++ b/build.zig
@@ -205,6 +205,7 @@ pub fn build(b: *Builder) !void {
}
const enable_logging = b.option(bool, "log", "Whether to enable logging") orelse false;
+ const enable_link_snapshots = b.option(bool, "link-snapshot", "Whether to enable linker state snapshots") orelse false;
const opt_version_string = b.option([]const u8, "version-string", "Override Zig version string. Default is to find out with git.");
const version = if (opt_version_string) |version| version else v: {
@@ -261,6 +262,7 @@ pub fn build(b: *Builder) !void {
exe_options.addOption(std.SemanticVersion, "semver", semver);
exe_options.addOption(bool, "enable_logging", enable_logging);
+ exe_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
exe_options.addOption(bool, "enable_tracy", tracy != null);
exe_options.addOption(bool, "is_stage1", is_stage1);
exe_options.addOption(bool, "omit_stage2", omit_stage2);
@@ -301,6 +303,7 @@ pub fn build(b: *Builder) !void {
test_stage2.addOptions("build_options", test_stage2_options);
test_stage2_options.addOption(bool, "enable_logging", enable_logging);
+ test_stage2_options.addOption(bool, "enable_link_snapshots", enable_link_snapshots);
test_stage2_options.addOption(bool, "skip_non_native", skip_non_native);
test_stage2_options.addOption(bool, "skip_compile_errors", skip_compile_errors);
test_stage2_options.addOption(bool, "is_stage1", is_stage1);
diff --git a/ci/azure/macos_arm64_script b/ci/azure/macos_arm64_script
index 138e365404..fc9fc2f954 100755
--- a/ci/azure/macos_arm64_script
+++ b/ci/azure/macos_arm64_script
@@ -53,7 +53,8 @@ cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DZIG_TARGET_TRIPLE="$HOST_TARGET" \
-DZIG_TARGET_MCPU="$HOST_MCPU" \
- -DZIG_STATIC=ON
+ -DZIG_STATIC=ON \
+ -DZIG_OMIT_STAGE2=ON
unset CC
unset CXX
diff --git a/ci/azure/macos_script b/ci/azure/macos_script
index ec384768d6..13e064f0e6 100755
--- a/ci/azure/macos_script
+++ b/ci/azure/macos_script
@@ -39,7 +39,8 @@ cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DZIG_TARGET_TRIPLE="$TARGET" \
-DZIG_TARGET_MCPU="$MCPU" \
- -DZIG_STATIC=ON
+ -DZIG_STATIC=ON \
+ -DZIG_OMIT_STAGE2=ON
# Now cmake will use zig as the C/C++ compiler. We reset the environment variables
# so that installation and testing do not get affected by them.
diff --git a/doc/langref.html.in b/doc/langref.html.in
index 4fbc4ef302..9e5edee9f8 100644
--- a/doc/langref.html.in
+++ b/doc/langref.html.in
@@ -3872,7 +3872,7 @@ test "labeled break from labeled block expression" {
{#see_also|Labeled while|Labeled for#}
{#header_open|Shadowing#}
- <p>It is never allowed for an identifier to "hide" another one by using the same name:</p>
+ <p>Identifiers are never allowed to "hide" other identifiers by using the same name:</p>
{#code_begin|test_err|local shadows declaration#}
const pi = 3.14;
@@ -3884,8 +3884,9 @@ test "inside test block" {
}
{#code_end#}
<p>
- Because of this, when you read Zig code you can rely on an identifier always meaning the same thing,
- within the scope it is defined. Note that you can, however use the same name if the scopes are separate:
+ Because of this, when you read Zig code you can always rely on an identifier to consistently mean
+ the same thing within the scope it is defined. Note that you can, however, use the same name if
+ the scopes are separate:
</p>
{#code_begin|test|test_scopes#}
test "separate scopes" {
diff --git a/lib/std/build/InstallRawStep.zig b/lib/std/build/InstallRawStep.zig
index ed01a6ea6e..2802f1ce90 100644
--- a/lib/std/build/InstallRawStep.zig
+++ b/lib/std/build/InstallRawStep.zig
@@ -96,8 +96,7 @@ const BinaryElfOutput = struct {
sort.sort(*BinaryElfSegment, self.segments.items, {}, segmentSortCompare);
- if (self.segments.items.len > 0) {
- const firstSegment = self.segments.items[0];
+ for (self.segments.items) |firstSegment, i| {
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
@@ -107,9 +106,10 @@ const BinaryElfOutput = struct {
const basePhysicalAddress = firstSegment.physicalAddress;
- for (self.segments.items) |segment| {
+ for (self.segments.items[i + 1 ..]) |segment| {
segment.binaryOffset = segment.physicalAddress - basePhysicalAddress;
}
+ break;
}
}
diff --git a/lib/std/build/RunStep.zig b/lib/std/build/RunStep.zig
index 8a21c87f8e..4d366c3cdb 100644
--- a/lib/std/build/RunStep.zig
+++ b/lib/std/build/RunStep.zig
@@ -229,7 +229,7 @@ fn make(step: *Step) !void {
printCmd(cwd, argv);
}
- return error.UncleanExit;
+ return error.UnexpectedExitCode;
}
},
else => {
diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig
index 978a45a2cc..adc7b9e7c0 100644
--- a/lib/std/child_process.zig
+++ b/lib/std/child_process.zig
@@ -354,7 +354,7 @@ pub const ChildProcess = struct {
// TODO collect output in a deadlock-avoiding way on Windows.
// https://github.com/ziglang/zig/issues/6343
- if (builtin.os.tag == .windows or builtin.os.tag == .haiku) {
+ if (builtin.os.tag == .haiku) {
const stdout_in = child.stdout.?.reader();
const stderr_in = child.stderr.?.reader();
diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig
index 79e180f16b..084376db49 100644
--- a/lib/std/fmt.zig
+++ b/lib/std/fmt.zig
@@ -295,21 +295,21 @@ pub fn format(
}
// Parse the width parameter
- options.width = init: {
- if (comptime parser.maybe('[')) {
- const arg_name = comptime parser.until(']');
+ options.width = comptime init: {
+ if (parser.maybe('[')) {
+ const arg_name = parser.until(']');
- if (!comptime parser.maybe(']')) {
+ if (!parser.maybe(']')) {
@compileError("Expected closing ]");
}
- const index = comptime meta.fieldIndex(ArgsType, arg_name) orelse
+ const index = meta.fieldIndex(ArgsType, arg_name) orelse
@compileError("No argument with name '" ++ arg_name ++ "'");
- const arg_index = comptime arg_state.nextArg(index);
+ const arg_index = arg_state.nextArg(index);
break :init @field(args, fields_info[arg_index].name);
} else {
- break :init comptime parser.number();
+ break :init parser.number();
}
};
@@ -321,21 +321,21 @@ pub fn format(
}
// Parse the precision parameter
- options.precision = init: {
- if (comptime parser.maybe('[')) {
- const arg_name = comptime parser.until(']');
+ options.precision = comptime init: {
+ if (parser.maybe('[')) {
+ const arg_name = parser.until(']');
- if (!comptime parser.maybe(']')) {
+ if (!parser.maybe(']')) {
@compileError("Expected closing ]");
}
- const arg_i = comptime meta.fieldIndex(ArgsType, arg_name) orelse
+ const arg_i = meta.fieldIndex(ArgsType, arg_name) orelse
@compileError("No argument with name '" ++ arg_name ++ "'");
- const arg_to_use = comptime arg_state.nextArg(arg_i);
+ const arg_to_use = arg_state.nextArg(arg_i);
break :init @field(args, fields_info[arg_to_use].name);
} else {
- break :init comptime parser.number();
+ break :init parser.number();
}
};
@@ -2576,6 +2576,8 @@ test "runtime width specifier" {
var width: usize = 9;
try expectFmt("~~hello~~", "{s:~^[1]}", .{ "hello", width });
try expectFmt("~~hello~~", "{s:~^[width]}", .{ .string = "hello", .width = width });
+ try expectFmt(" hello", "{s:[1]}", .{ "hello", width });
+ try expectFmt("42 hello", "{d} {s:[2]}", .{ 42, "hello", width });
}
test "runtime precision specifier" {
diff --git a/lib/std/heap.zig b/lib/std/heap.zig
index 47d964c239..fcea90d751 100644
--- a/lib/std/heap.zig
+++ b/lib/std/heap.zig
@@ -295,7 +295,7 @@ const PageAllocator = struct {
}
}
- const max_drop_len = alignment - std.math.min(alignment, mem.page_size);
+ const max_drop_len = alignment - @minimum(alignment, mem.page_size);
const alloc_len = if (max_drop_len <= aligned_len - n)
aligned_len
else
@@ -529,7 +529,7 @@ const WasmPageAllocator = struct {
fn freePages(start: usize, end: usize) void {
if (start < extendedOffset()) {
- conventional.recycle(start, std.math.min(extendedOffset(), end) - start);
+ conventional.recycle(start, @minimum(extendedOffset(), end) - start);
}
if (end > extendedOffset()) {
var new_end = end;
diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig
index 5bd967b696..0c6224b7ce 100644
--- a/lib/std/heap/logging_allocator.zig
+++ b/lib/std/heap/logging_allocator.zig
@@ -40,12 +40,8 @@ pub fn ScopedLoggingAllocator(
// This function is required as the `std.log.log` function is not public
inline fn logHelper(comptime log_level: std.log.Level, comptime format: []const u8, args: anytype) void {
switch (log_level) {
- .emerg => log.emerg(format, args),
- .alert => log.alert(format, args),
- .crit => log.crit(format, args),
.err => log.err(format, args),
.warn => log.warn(format, args),
- .notice => log.notice(format, args),
.info => log.info(format, args),
.debug => log.debug(format, args),
}
@@ -120,6 +116,6 @@ pub fn ScopedLoggingAllocator(
/// This allocator is used in front of another allocator and logs to `std.log`
/// on every call to the allocator.
/// For logging to a `std.io.Writer` see `std.heap.LogToWriterAllocator`
-pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .crit) {
- return LoggingAllocator(.debug, .crit).init(parent_allocator);
+pub fn loggingAllocator(parent_allocator: *Allocator) LoggingAllocator(.debug, .err) {
+ return LoggingAllocator(.debug, .err).init(parent_allocator);
}
diff --git a/lib/std/log.zig b/lib/std/log.zig
index 51eaaae695..96281ba72e 100644
--- a/lib/std/log.zig
+++ b/lib/std/log.zig
@@ -18,8 +18,8 @@
//! ```
//! const std = @import("std");
//!
-//! // Set the log level to warning
-//! pub const log_level: std.log.Level = .warn;
+//! // Set the log level to info
+//! pub const log_level: std.log.Level = .info;
//!
//! // Define root.log to override the std implementation
//! pub fn log(
@@ -28,17 +28,17 @@
//! comptime format: []const u8,
//! args: anytype,
//! ) void {
-//! // Ignore all non-critical logging from sources other than
+//! // Ignore all non-error logging from sources other than
//! // .my_project, .nice_library and .default
//! const scope_prefix = "(" ++ switch (scope) {
//! .my_project, .nice_library, .default => @tagName(scope),
-//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.crit))
+//! else => if (@enumToInt(level) <= @enumToInt(std.log.Level.err))
//! @tagName(scope)
//! else
//! return,
//! } ++ "): ";
//!
-//! const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
+//! const prefix = "[" ++ level.asText() ++ "] " ++ scope_prefix;
//!
//! // Print the message to stderr, silently ignoring any errors
//! const held = std.debug.getStderrMutex().acquire();
@@ -49,23 +49,23 @@
//!
//! pub fn main() void {
//! // Using the default scope:
-//! std.log.info("Just a simple informational log message", .{}); // Won't be printed as log_level is .warn
-//! std.log.warn("Flux capacitor is starting to overheat", .{});
+//! std.log.debug("A borderline useless debug log message", .{}); // Won't be printed as log_level is .info
+//! std.log.info("Flux capacitor is starting to overheat", .{});
//!
//! // Using scoped logging:
//! const my_project_log = std.log.scoped(.my_project);
//! const nice_library_log = std.log.scoped(.nice_library);
//! const verbose_lib_log = std.log.scoped(.verbose_lib);
//!
-//! my_project_log.info("Starting up", .{}); // Won't be printed as log_level is .warn
-//! nice_library_log.err("Something went very wrong, sorry", .{});
-//! verbose_lib_log.err("Added 1 + 1: {}", .{1 + 1}); // Won't be printed as it gets filtered out by our log function
+//! my_project_log.debug("Starting up", .{}); // Won't be printed as log_level is .info
+//! nice_library_log.warn("Something went very wrong, sorry", .{});
+//! verbose_lib_log.warn("Added 1 + 1: {}", .{1 + 1}); // Won't be printed as it gets filtered out by our log function
//! }
//! ```
//! Which produces the following output:
//! ```
-//! [warn] (default): Flux capacitor is starting to overheat
-//! [err] (nice_library): Something went very wrong, sorry
+//! [info] (default): Flux capacitor is starting to overheat
+//! [warning] (nice_library): Something went very wrong, sorry
//! ```
const std = @import("std.zig");
@@ -73,42 +73,29 @@ const builtin = @import("builtin");
const root = @import("root");
pub const Level = enum {
- /// Emergency: a condition that cannot be handled, usually followed by a
- /// panic.
- emerg,
- /// Alert: a condition that should be corrected immediately (e.g. database
- /// corruption).
- alert,
- /// Critical: A bug has been detected or something has gone wrong and it
- /// will have an effect on the operation of the program.
- crit,
- /// Error: A bug has been detected or something has gone wrong but it is
- /// recoverable.
+ /// Error: something has gone wrong. This might be recoverable or might
+ /// be followed by the program exiting.
err,
/// Warning: it is uncertain if something has gone wrong or not, but the
/// circumstances would be worth investigating.
warn,
- /// Notice: non-error but significant conditions.
- notice,
- /// Informational: general messages about the state of the program.
+ /// Info: general messages about the state of the program.
info,
/// Debug: messages only useful for debugging.
debug,
/// Returns a string literal of the given level in full text form.
pub fn asText(comptime self: Level) switch (self) {
- .emerg => @TypeOf("emergency"),
- .crit => @TypeOf("critical"),
.err => @TypeOf("error"),
.warn => @TypeOf("warning"),
- else => @TypeOf(@tagName(self)),
+ .info => @TypeOf("info"),
+ .debug => @TypeOf("debug"),
} {
return switch (self) {
- .emerg => "emergency",
- .crit => "critical",
.err => "error",
.warn => "warning",
- else => @tagName(self),
+ .info => "info",
+ .debug => "debug",
};
}
};
@@ -116,9 +103,8 @@ pub const Level = enum {
/// The default log level is based on build mode.
pub const default_level: Level = switch (builtin.mode) {
.Debug => .debug,
- .ReleaseSafe => .notice,
- .ReleaseFast => .err,
- .ReleaseSmall => .err,
+ .ReleaseSafe => .info,
+ .ReleaseFast, .ReleaseSmall => .err,
};
/// The current log level. This is set to root.log_level if present, otherwise
@@ -188,39 +174,18 @@ pub fn defaultLog(
/// provided here.
pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
return struct {
- /// Log an emergency message. This log level is intended to be used
- /// for conditions that cannot be handled and is usually followed by a panic.
- pub fn emerg(
- comptime format: []const u8,
- args: anytype,
- ) void {
- @setCold(true);
- log(.emerg, scope, format, args);
- }
+ /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+ pub const emerg = @This().err;
- /// Log an alert message. This log level is intended to be used for
- /// conditions that should be corrected immediately (e.g. database corruption).
- pub fn alert(
- comptime format: []const u8,
- args: anytype,
- ) void {
- @setCold(true);
- log(.alert, scope, format, args);
- }
+ /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+ pub const alert = @This().err;
- /// Log a critical message. This log level is intended to be used
- /// when a bug has been detected or something has gone wrong and it will have
- /// an effect on the operation of the program.
- pub fn crit(
- comptime format: []const u8,
- args: anytype,
- ) void {
- @setCold(true);
- log(.crit, scope, format, args);
- }
+ /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+ pub const crit = @This().err;
- /// Log an error message. This log level is intended to be used when
- /// a bug has been detected or something has gone wrong but it is recoverable.
+ /// Log an error message. This log level is intended to be used
+ /// when something has gone wrong. This might be recoverable or might
+ /// be followed by the program exiting.
pub fn err(
comptime format: []const u8,
args: anytype,
@@ -239,14 +204,8 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
log(.warn, scope, format, args);
}
- /// Log a notice message. This log level is intended to be used for
- /// non-error but significant conditions.
- pub fn notice(
- comptime format: []const u8,
- args: anytype,
- ) void {
- log(.notice, scope, format, args);
- }
+ /// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+ pub const notice = @This().info;
/// Log an info message. This log level is intended to be used for
/// general messages about the state of the program.
@@ -271,24 +230,18 @@ pub fn scoped(comptime scope: @Type(.EnumLiteral)) type {
/// The default scoped logging namespace.
pub const default = scoped(.default);
-/// Log an emergency message using the default scope. This log level is
-/// intended to be used for conditions that cannot be handled and is usually
-/// followed by a panic.
-pub const emerg = default.emerg;
+/// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+pub const emerg = default.err;
-/// Log an alert message using the default scope. This log level is intended to
-/// be used for conditions that should be corrected immediately (e.g. database
-/// corruption).
-pub const alert = default.alert;
+/// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+pub const alert = default.err;
-/// Log a critical message using the default scope. This log level is intended
-/// to be used when a bug has been detected or something has gone wrong and it
-/// will have an effect on the operation of the program.
-pub const crit = default.crit;
+/// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+pub const crit = default.err;
/// Log an error message using the default scope. This log level is intended to
-/// be used when a bug has been detected or something has gone wrong but it is
-/// recoverable.
+/// be used when something has gone wrong. This might be recoverable or might
+/// be followed by the program exiting.
pub const err = default.err;
/// Log a warning message using the default scope. This log level is intended
@@ -296,9 +249,8 @@ pub const err = default.err;
/// the circumstances would be worth investigating.
pub const warn = default.warn;
-/// Log a notice message using the default scope. This log level is intended to
-/// be used for non-error but significant conditions.
-pub const notice = default.notice;
+/// Deprecated. TODO: replace with @compileError() after 0.9.0 is released
+pub const notice = default.info;
/// Log an info message using the default scope. This log level is intended to
/// be used for general messages about the state of the program.
diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig
index 8082e656e1..5edf5b3ce5 100644
--- a/lib/std/math/big/int.zig
+++ b/lib/std/math/big/int.zig
@@ -33,7 +33,7 @@ pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize {
}
pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize {
- return calcMulLimbsBufferLen(a_len, b_len, 2) * 4;
+ return a_len + b_len + 4;
}
pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize {
@@ -135,6 +135,11 @@ pub const Mutable = struct {
};
}
+ /// Returns true if `a == 0`.
+ pub fn eqZero(self: Mutable) bool {
+ return self.toConst().eqZero();
+ }
+
/// Asserts that the allocator owns the limbs memory. If this is not the case,
/// use `toConst().toManaged()`.
pub fn toManaged(self: Mutable, allocator: *Allocator) Managed {
@@ -755,8 +760,8 @@ pub const Mutable = struct {
/// q may alias with a or b.
///
/// Asserts there is enough memory to store q and r.
- /// The upper bound for r limb count is a.limbs.len.
- /// The upper bound for q limb count is given by `a.limbs.len + b.limbs.len + 1`.
+ /// The upper bound for r limb count is `b.limbs.len`.
+ /// The upper bound for q limb count is given by `a.limbs`.
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
@@ -768,17 +773,115 @@ pub const Mutable = struct {
a: Const,
b: Const,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
) void {
- div(q, r, a, b, limbs_buffer, allocator);
+ const sep = a.limbs.len + 2;
+ var x = a.toMutable(limbs_buffer[0..sep]);
+ var y = b.toMutable(limbs_buffer[sep..]);
+
+ div(q, r, &x, &y);
+
+ // Note, `div` performs truncating division, which satisfies
+ // @divTrunc(a, b) * b + @rem(a, b) = a
+ // so r = a - @divTrunc(a, b) * b
+ // Note, @rem(a, -b) = @rem(-b, a) = -@rem(a, b) = -@rem(-a, -b)
+ // For divTrunc, we want to perform
+ // @divFloor(a, b) * b + @mod(a, b) = a
+ // Note:
+ // @divFloor(-a, b)
+ // = @divFloor(a, -b)
+ // = -@divCeil(a, b)
+ // = -@divFloor(a + b - 1, b)
+ // = -@divTrunc(a + b - 1, b)
+
+ // Note (1):
+ // @divTrunc(a + b - 1, b) * b + @rem(a + b - 1, b) = a + b - 1
+ // = @divTrunc(a + b - 1, b) * b + @rem(a - 1, b) = a + b - 1
+ // = @divTrunc(a + b - 1, b) * b + @rem(a - 1, b) - b + 1 = a
+
+ if (a.positive and b.positive) {
+ // Positive-positive case, don't need to do anything.
+ } else if (a.positive and !b.positive) {
+ // a/-b -> q is negative, and so we need to fix flooring.
+ // Subtract one to make the division flooring.
+
+ // @divFloor(a, -b) * -b + @mod(a, -b) = a
+ // If b divides a exactly, we have @divFloor(a, -b) * -b = a
+ // Else, we have @divFloor(a, -b) * -b > a, so @mod(a, -b) becomes negative
+
+ // We have:
+ // @divFloor(a, -b) * -b + @mod(a, -b) = a
+ // = -@divTrunc(a + b - 1, b) * -b + @mod(a, -b) = a
+ // = @divTrunc(a + b - 1, b) * b + @mod(a, -b) = a
+
+ // Substitute a for (1):
+ // @divTrunc(a + b - 1, b) * b + @rem(a - 1, b) - b + 1 = @divTrunc(a + b - 1, b) * b + @mod(a, -b)
+ // Yields:
+ // @mod(a, -b) = @rem(a - 1, b) - b + 1
+ // Note that `r` holds @rem(a, b) at this point.
+ //
+ // If @rem(a, b) is not 0:
+ // @rem(a - 1, b) = @rem(a, b) - 1
+ // => @mod(a, -b) = @rem(a, b) - 1 - b + 1 = @rem(a, b) - b
+ // Else:
+ // @rem(a - 1, b) = @rem(a + b - 1, b) = @rem(b - 1, b) = b - 1
+ // => @mod(a, -b) = b - 1 - b + 1 = 0
+ if (!r.eqZero()) {
+ q.addScalar(q.toConst(), -1);
+ r.positive = true;
+ r.sub(r.toConst(), y.toConst().abs());
+ }
+ } else if (!a.positive and b.positive) {
+ // -a/b -> q is negative, and so we need to fix flooring.
+ // Subtract one to make the division flooring.
+
+ // @divFloor(-a, b) * b + @mod(-a, b) = a
+ // If b divides a exactly, we have @divFloor(-a, b) * b = -a
+ // Else, we have @divFloor(-a, b) * b < -a, so @mod(-a, b) becomes positive
+
+ // We have:
+ // @divFloor(-a, b) * b + @mod(-a, b) = -a
+ // = -@divTrunc(a + b - 1, b) * b + @mod(-a, b) = -a
+ // = @divTrunc(a + b - 1, b) * b - @mod(-a, b) = a
+
+ // Substitute a for (1):
+ // @divTrunc(a + b - 1, b) * b + @rem(a - 1, b) - b + 1 = @divTrunc(a + b - 1, b) * b - @mod(-a, b)
+ // Yields:
+ // @rem(a - 1, b) - b + 1 = -@mod(-a, b)
+ // => -@mod(-a, b) = @rem(a - 1, b) - b + 1
+ // => @mod(-a, b) = -(@rem(a - 1, b) - b + 1) = -@rem(a - 1, b) + b - 1
+ //
+ // If @rem(a, b) is not 0:
+ // @rem(a - 1, b) = @rem(a, b) - 1
+ // => @mod(-a, b) = -(@rem(a, b) - 1) + b - 1 = -@rem(a, b) + 1 + b - 1 = -@rem(a, b) + b
+ // Else :
+ // @rem(a - 1, b) = b - 1
+ // => @mod(-a, b) = -(b - 1) + b - 1 = 0
+ if (!r.eqZero()) {
+ q.addScalar(q.toConst(), -1);
+ r.positive = false;
+ r.add(r.toConst(), y.toConst().abs());
+ }
+ } else if (!a.positive and !b.positive) {
+ // a/b -> q is positive, don't need to do anything to fix flooring.
- // Trunc -> Floor.
- if (!q.positive) {
- const one: Const = .{ .limbs = &[_]Limb{1}, .positive = true };
- q.sub(q.toConst(), one);
- r.add(q.toConst(), one);
+ // @divFloor(-a, -b) * -b + @mod(-a, -b) = -a
+ // If b divides a exactly, we have @divFloor(-a, -b) * -b = -a
+ // Else, we have @divFloor(-a, -b) * -b > -a, so @mod(-a, -b) becomes negative
+
+ // We have:
+ // @divFloor(-a, -b) * -b + @mod(-a, -b) = -a
+ // = @divTrunc(a, b) * -b + @mod(-a, -b) = -a
+ // = @divTrunc(a, b) * b - @mod(-a, -b) = a
+
+ // We also have:
+ // @divTrunc(a, b) * b + @rem(a, b) = a
+
+ // Substitute a:
+ // @divTrunc(a, b) * b + @rem(a, b) = @divTrunc(a, b) * b - @mod(-a, -b)
+ // => @rem(a, b) = -@mod(-a, -b)
+ // => @mod(-a, -b) = -@rem(a, b)
+ r.positive = false;
}
- r.positive = b.positive;
}
/// q = a / b (rem r)
@@ -787,9 +890,8 @@ pub const Mutable = struct {
/// q may alias with a or b.
///
/// Asserts there is enough memory to store q and r.
- /// The upper bound for r limb count is a.limbs.len.
- /// The upper bound for q limb count is given by `calcQuotientLimbLen`. This accounts
- /// for temporary space used by the division algorithm.
+ /// The upper bound for r limb count is `b.limbs.len`.
+ /// The upper bound for q limb count is given by `a.limbs.len`.
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
@@ -801,10 +903,12 @@ pub const Mutable = struct {
a: Const,
b: Const,
limbs_buffer: []Limb,
- allocator: ?*Allocator,
) void {
- div(q, r, a, b, limbs_buffer, allocator);
- r.positive = a.positive;
+ const sep = a.limbs.len + 2;
+ var x = a.toMutable(limbs_buffer[0..sep]);
+ var y = b.toMutable(limbs_buffer[sep..]);
+
+ div(q, r, &x, &y);
}
/// r = a << shift, in other words, r = a * 2^shift
@@ -825,7 +929,7 @@ pub const Mutable = struct {
///
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
/// r is `calcTwosCompLimbCount(bit_count)`.
- pub fn shiftLeftSat(r: *Mutable, a: Const, shift: usize, signedness: std.builtin.Signedness, bit_count: usize) void {
+ pub fn shiftLeftSat(r: *Mutable, a: Const, shift: usize, signedness: Signedness, bit_count: usize) void {
// Special case: When the argument is negative, but the result is supposed to be unsigned,
// return 0 in all cases.
if (!a.positive and signedness == .unsigned) {
@@ -906,6 +1010,17 @@ pub const Mutable = struct {
r.positive = a.positive;
}
+ /// r = ~a under 2s complement wrapping semantics.
+ /// r may alias with a.
+ ///
+ /// Assets that r has enough limbs to store the result. The upper bound Limb count is
+ /// r is `calcTwosCompLimbCount(bit_count)`.
+ pub fn bitNotWrap(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
+ r.copy(a.negate());
+ const negative_one = Const{ .limbs = &.{1}, .positive = false };
+ r.addWrap(r.toConst(), negative_one, signedness, bit_count);
+ }
+
/// r = a | b under 2s complement semantics.
/// r may alias with a or b.
///
@@ -1157,181 +1272,214 @@ pub const Mutable = struct {
result.copy(x.toConst());
}
- /// Truncates by default.
- fn div(quo: *Mutable, rem: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void {
- assert(!b.eqZero()); // division by zero
- assert(quo != rem); // illegal aliasing
+ // Truncates by default.
+ fn div(q: *Mutable, r: *Mutable, x: *Mutable, y: *Mutable) void {
+ assert(!y.eqZero()); // division by zero
+ assert(q != r); // illegal aliasing
- if (a.orderAbs(b) == .lt) {
- // quo may alias a so handle rem first
- rem.copy(a);
- rem.positive = a.positive == b.positive;
+ const q_positive = (x.positive == y.positive);
+ const r_positive = x.positive;
- quo.positive = true;
- quo.len = 1;
- quo.limbs[0] = 0;
+ if (x.toConst().orderAbs(y.toConst()) == .lt) {
+ // q may alias x so handle r first.
+ r.copy(x.toConst());
+ r.positive = r_positive;
+
+ q.set(0);
return;
}
// Handle trailing zero-words of divisor/dividend. These are not handled in the following
// algorithms.
- const a_zero_limb_count = blk: {
- var i: usize = 0;
- while (i < a.limbs.len) : (i += 1) {
- if (a.limbs[i] != 0) break;
- }
- break :blk i;
- };
- const b_zero_limb_count = blk: {
- var i: usize = 0;
- while (i < b.limbs.len) : (i += 1) {
- if (b.limbs[i] != 0) break;
- }
- break :blk i;
- };
+ // Note, there must be a non-zero limb for either.
+ // const x_trailing = std.mem.indexOfScalar(Limb, x.limbs[0..x.len], 0).?;
+ // const y_trailing = std.mem.indexOfScalar(Limb, y.limbs[0..y.len], 0).?;
+
+ const x_trailing = for (x.limbs[0..x.len]) |xi, i| {
+ if (xi != 0) break i;
+ } else unreachable;
- const ab_zero_limb_count = math.min(a_zero_limb_count, b_zero_limb_count);
+ const y_trailing = for (y.limbs[0..y.len]) |yi, i| {
+ if (yi != 0) break i;
+ } else unreachable;
- if (b.limbs.len - ab_zero_limb_count == 1) {
- lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[ab_zero_limb_count..a.limbs.len], b.limbs[b.limbs.len - 1]);
- quo.normalize(a.limbs.len - ab_zero_limb_count);
- quo.positive = (a.positive == b.positive);
+ const xy_trailing = math.min(x_trailing, y_trailing);
- rem.len = 1;
- rem.positive = true;
+ if (y.len - xy_trailing == 1) {
+ lldiv1(q.limbs, &r.limbs[0], x.limbs[xy_trailing..x.len], y.limbs[y.len - 1]);
+ q.normalize(x.len - xy_trailing);
+ q.positive = q_positive;
+
+ r.len = 1;
+ r.positive = r_positive;
} else {
- // x and y are modified during division
- const sep_len = calcMulLimbsBufferLen(a.limbs.len, b.limbs.len, 2);
- const x_limbs = limbs_buffer[0 * sep_len ..][0..sep_len];
- const y_limbs = limbs_buffer[1 * sep_len ..][0..sep_len];
- const t_limbs = limbs_buffer[2 * sep_len ..][0..sep_len];
- const mul_limbs_buf = limbs_buffer[3 * sep_len ..][0..sep_len];
-
- var x: Mutable = .{
- .limbs = x_limbs,
- .positive = a.positive,
- .len = a.limbs.len - ab_zero_limb_count,
+ // Shrink x, y such that the trailing zero limbs shared between are removed.
+ var x0 = Mutable{
+ .limbs = x.limbs[xy_trailing..],
+ .len = x.len - xy_trailing,
+ .positive = true,
};
- var y: Mutable = .{
- .limbs = y_limbs,
- .positive = b.positive,
- .len = b.limbs.len - ab_zero_limb_count,
+
+ var y0 = Mutable{
+ .limbs = y.limbs[xy_trailing..],
+ .len = y.len - xy_trailing,
+ .positive = true,
};
- // Shrink x, y such that the trailing zero limbs shared between are removed.
- mem.copy(Limb, x.limbs, a.limbs[ab_zero_limb_count..a.limbs.len]);
- mem.copy(Limb, y.limbs, b.limbs[ab_zero_limb_count..b.limbs.len]);
+ divmod(q, r, &x0, &y0);
+ q.positive = q_positive;
- divN(quo, rem, &x, &y, t_limbs, mul_limbs_buf, allocator);
- quo.positive = (a.positive == b.positive);
+ r.positive = r_positive;
}
- if (ab_zero_limb_count != 0) {
- rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits);
+ if (xy_trailing != 0) {
+ // Manually shift here since we know its limb aligned.
+ mem.copyBackwards(Limb, r.limbs[xy_trailing..], r.limbs[0..r.len]);
+ mem.set(Limb, r.limbs[0..xy_trailing], 0);
+ r.len += xy_trailing;
}
}
/// Handbook of Applied Cryptography, 14.20
///
/// x = qy + r where 0 <= r < y
- fn divN(
+ /// y is modified but returned intact.
+ fn divmod(
q: *Mutable,
r: *Mutable,
x: *Mutable,
y: *Mutable,
- tmp_limbs: []Limb,
- mul_limb_buf: []Limb,
- allocator: ?*Allocator,
) void {
- assert(y.len >= 2);
- assert(x.len >= y.len);
- assert(q.limbs.len >= x.len + y.len - 1);
-
- // See 3.2
- var backup_tmp_limbs: [3]Limb = undefined;
- const t_limbs = if (tmp_limbs.len < 3) &backup_tmp_limbs else tmp_limbs;
-
- var tmp: Mutable = .{
- .limbs = t_limbs,
- .len = 1,
- .positive = true,
- };
- tmp.limbs[0] = 0;
+ // 0.
+ // Normalize so that y[t] > b/2
+ const lz = @clz(Limb, y.limbs[y.len - 1]);
+ const norm_shift = if (lz == 0 and y.toConst().isOdd())
+ limb_bits // Force an extra limb so that y is even.
+ else
+ lz;
- // Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even
- var norm_shift = @clz(Limb, y.limbs[y.len - 1]);
- if (norm_shift == 0 and y.toConst().isOdd()) {
- norm_shift = limb_bits;
- }
x.shiftLeft(x.toConst(), norm_shift);
y.shiftLeft(y.toConst(), norm_shift);
const n = x.len - 1;
const t = y.len - 1;
+ const shift = n - t;
// 1.
- q.len = n - t + 1;
+ // for 0 <= j <= n - t, set q[j] to 0
+ q.len = shift + 1;
q.positive = true;
mem.set(Limb, q.limbs[0..q.len], 0);
// 2.
- tmp.shiftLeft(y.toConst(), limb_bits * (n - t));
- while (x.toConst().order(tmp.toConst()) != .lt) {
- q.limbs[n - t] += 1;
- x.sub(x.toConst(), tmp.toConst());
+ // while x >= y * b^(n - t):
+ // x -= y * b^(n - t)
+ // q[n - t] += 1
+ // Note, this algorithm is performed only once if y[t] > radix/2 and y is even, which we
+ // enforced in step 0. This means we can replace the while with an if.
+ // Note, multiplication by b^(n - t) comes down to shifting to the right by n - t limbs.
+ // We can also replace x >= y * b^(n - t) by x/b^(n - t) >= y, and use shifts for that.
+ {
+ // x >= y * b^(n - t) can be replaced by x/b^(n - t) >= y.
+
+ // 'divide' x by b^(n - t)
+ var tmp = Mutable{
+ .limbs = x.limbs[shift..],
+ .len = x.len - shift,
+ .positive = true,
+ };
+
+ if (tmp.toConst().order(y.toConst()) != .lt) {
+ // Perform x -= y * b^(n - t)
+ // Note, we can subtract y from x[n - t..] and get the result without shifting.
+ // We can also re-use tmp which already contains the relevant part of x. Note that
+ // this also edits x.
+ // Due to the check above, this cannot underflow.
+ tmp.sub(tmp.toConst(), y.toConst());
+
+ // tmp.sub normalized tmp, but we need to normalize x now.
+ x.limbs.len = tmp.limbs.len + shift;
+
+ q.limbs[shift] += 1;
+ }
}
// 3.
+ // for i from n down to t + 1, do
var i = n;
- while (i > t) : (i -= 1) {
- // 3.1
+ while (i >= t + 1) : (i -= 1) {
+ const k = i - t - 1;
+ // 3.1.
+ // if x_i == y_t:
+ // q[i - t - 1] = b - 1
+ // else:
+ // q[i - t - 1] = (x[i] * b + x[i - 1]) / y[t]
if (x.limbs[i] == y.limbs[t]) {
- q.limbs[i - t - 1] = maxInt(Limb);
+ q.limbs[k] = maxInt(Limb);
} else {
- const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
- const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
- q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
+ const q0 = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
+ const n0 = @as(DoubleLimb, y.limbs[t]);
+ q.limbs[k] = @intCast(Limb, q0 / n0);
}
// 3.2
- tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0;
- tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0;
- tmp.limbs[2] = x.limbs[i];
- tmp.normalize(3);
+ // while q[i - t - 1] * (y[t] * b + y[t - 1] > x[i] * b * b + x[i - 1] + x[i - 2]:
+ // q[i - t - 1] -= 1
+ // Note, if y[t] > b / 2 this part is repeated no more than twice.
+
+ // Extract from y.
+ const y0 = if (t > 0) y.limbs[t - 1] else 0;
+ const y1 = y.limbs[t];
+
+ // Extract from x.
+ // Note, big endian.
+ const tmp0 = [_]Limb{
+ x.limbs[i],
+ if (i >= 1) x.limbs[i - 1] else 0,
+ if (i >= 2) x.limbs[i - 2] else 0,
+ };
while (true) {
- // 2x1 limb multiplication unrolled against single-limb q[i-t-1]
- var carry: Limb = 0;
- r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry);
- r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry);
- r.limbs[2] = carry;
- r.normalize(3);
-
- if (r.toConst().orderAbs(tmp.toConst()) != .gt) {
+ // Ad-hoc 2x1 multiplication with q[i - t - 1].
+ // Note, big endian.
+ var tmp1 = [_]Limb{ 0, undefined, undefined };
+ tmp1[2] = addMulLimbWithCarry(0, y0, q.limbs[k], &tmp1[0]);
+ tmp1[1] = addMulLimbWithCarry(0, y1, q.limbs[k], &tmp1[0]);
+
+ // Big-endian compare
+ if (mem.order(Limb, &tmp1, &tmp0) != .gt)
break;
- }
- q.limbs[i - t - 1] -= 1;
+ q.limbs[k] -= 1;
}
- // 3.3
- tmp.set(q.limbs[i - t - 1]);
- tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator);
- tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1));
- x.sub(x.toConst(), tmp.toConst());
-
- if (!x.positive) {
- tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1));
- x.add(x.toConst(), tmp.toConst());
- q.limbs[i - t - 1] -= 1;
+ // 3.3.
+ // x -= q[i - t - 1] * y * b^(i - t - 1)
+ // Note, we multiply by a single limb here.
+ // The shift doesn't need to be performed if we add the result of the first multiplication
+ // to x[i - t - 1].
+ // mem.set(Limb, x.limbs, 0);
+ const underflow = llmulLimb(.sub, x.limbs[k..x.len], y.limbs[0..y.len], q.limbs[k]);
+
+ // 3.4.
+ // if x < 0:
+ // x += y * b^(i - t - 1)
+ // q[i - t - 1] -= 1
+ // Note, we check for x < 0 using the underflow flag from the previous operation.
+ if (underflow) {
+ // While we didn't properly set the signedness of x, this operation should 'flow' it back to positive.
+ llaccum(.add, x.limbs[k..x.len], y.limbs[0..y.len]);
+ q.limbs[k] -= 1;
}
+
+ x.normalize(x.len);
}
- // Denormalize
q.normalize(q.len);
+ // De-normalize r and y.
r.shiftRight(x.toConst(), norm_shift);
- r.normalize(r.len);
+ y.shiftRight(y.toConst(), norm_shift);
}
/// Truncate an integer to a number of bits, following 2s-complement semantics.
@@ -1394,13 +1542,16 @@ pub const Mutable = struct {
r.normalize(r.len);
}
} else {
- r.copy(a);
- if (r.len < req_limbs) {
+ if (a.limbs.len < req_limbs) {
// Integer fits within target bits, no wrapping required.
+ r.copy(a);
return;
}
- r.len = req_limbs;
+ r.copy(.{
+ .positive = a.positive,
+ .limbs = a.limbs[0..req_limbs],
+ });
r.limbs[r.len - 1] &= mask;
r.normalize(r.len);
@@ -1786,7 +1937,7 @@ pub const Const = struct {
while (q.len >= 2) {
// Passing an allocator here would not be helpful since this division is destroying
// information, not creating it. [TODO citation needed]
- q.divTrunc(&r, q.toConst(), b, rest_of_the_limbs_buf, null);
+ q.divTrunc(&r, q.toConst(), b, rest_of_the_limbs_buf);
var r_word = r.limbs[0];
var i: usize = 0;
@@ -2413,16 +2564,14 @@ pub const Managed = struct {
/// a / b are floored (rounded towards 0).
///
/// Returns an error if memory could not be allocated.
- ///
- /// q's allocator is used for temporary storage to speed up the multiplication.
pub fn divFloor(q: *Managed, r: *Managed, a: Const, b: Const) !void {
- try q.ensureCapacity(a.limbs.len + b.limbs.len + 1);
- try r.ensureCapacity(a.limbs.len);
+ try q.ensureCapacity(a.limbs.len);
+ try r.ensureCapacity(b.limbs.len);
var mq = q.toMutable();
var mr = r.toMutable();
const limbs_buffer = try q.allocator.alloc(Limb, calcDivLimbsBufferLen(a.limbs.len, b.limbs.len));
defer q.allocator.free(limbs_buffer);
- mq.divFloor(&mr, a, b, limbs_buffer, q.allocator);
+ mq.divFloor(&mr, a, b, limbs_buffer);
q.setMetadata(mq.positive, mq.len);
r.setMetadata(mr.positive, mr.len);
}
@@ -2432,16 +2581,14 @@ pub const Managed = struct {
/// a / b are truncated (rounded towards -inf).
///
/// Returns an error if memory could not be allocated.
- ///
- /// q's allocator is used for temporary storage to speed up the multiplication.
pub fn divTrunc(q: *Managed, r: *Managed, a: Const, b: Const) !void {
- try q.ensureCapacity(a.limbs.len + b.limbs.len + 1);
- try r.ensureCapacity(a.limbs.len);
+ try q.ensureCapacity(a.limbs.len);
+ try r.ensureCapacity(b.limbs.len);
var mq = q.toMutable();
var mr = r.toMutable();
const limbs_buffer = try q.allocator.alloc(Limb, calcDivLimbsBufferLen(a.limbs.len, b.limbs.len));
defer q.allocator.free(limbs_buffer);
- mq.divTrunc(&mr, a, b, limbs_buffer, q.allocator);
+ mq.divTrunc(&mr, a, b, limbs_buffer);
q.setMetadata(mq.positive, mq.len);
r.setMetadata(mr.positive, mr.len);
}
@@ -2455,7 +2602,7 @@ pub const Managed = struct {
}
/// r = a <<| shift with 2s-complement saturating semantics.
- pub fn shiftLeftSat(r: *Managed, a: Managed, shift: usize, signedness: std.builtin.Signedness, bit_count: usize) !void {
+ pub fn shiftLeftSat(r: *Managed, a: Managed, shift: usize, signedness: Signedness, bit_count: usize) !void {
try r.ensureTwosCompCapacity(bit_count);
var m = r.toMutable();
m.shiftLeftSat(a.toConst(), shift, signedness, bit_count);
@@ -2476,6 +2623,14 @@ pub const Managed = struct {
r.setMetadata(m.positive, m.len);
}
+ /// r = ~a under 2s-complement wrapping semantics.
+ pub fn bitNotWrap(r: *Managed, a: Managed, signedness: Signedness, bit_count: usize) !void {
+ try r.ensureTwosCompCapacity(bit_count);
+ var m = r.toMutable();
+ m.bitNotWrap(a.toConst(), signedness, bit_count);
+ r.setMetadata(m.positive, m.len);
+ }
+
/// r = a | b
///
/// a and b are zero-extended to the longer of a or b.
@@ -2863,20 +3018,22 @@ fn llmulaccLong(comptime op: AccOp, r: []Limb, a: []const Limb, b: []const Limb)
var i: usize = 0;
while (i < b.len) : (i += 1) {
- llmulLimb(op, r[i..], a, b[i]);
+ _ = llmulLimb(op, r[i..], a, b[i]);
}
}
/// r = r (op) y * xi
/// The result is computed modulo `r.len`.
-fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) void {
+/// Returns whether the operation overflowed.
+fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) bool {
@setRuntimeSafety(debug_safety);
if (xi == 0) {
- return;
+ return false;
}
- var a_lo = acc[0..y.len];
- var a_hi = acc[y.len..];
+ const split = std.math.min(y.len, acc.len);
+ var a_lo = acc[0..split];
+ var a_hi = acc[split..];
switch (op) {
.add => {
@@ -2890,6 +3047,8 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) void {
while ((carry != 0) and (j < a_hi.len)) : (j += 1) {
carry = @boolToInt(@addWithOverflow(Limb, a_hi[j], carry, &a_hi[j]));
}
+
+ return carry != 0;
},
.sub => {
var borrow: Limb = 0;
@@ -2902,6 +3061,8 @@ fn llmulLimb(comptime op: AccOp, acc: []Limb, y: []const Limb, xi: Limb) void {
while ((borrow != 0) and (j < a_hi.len)) : (j += 1) {
borrow = @boolToInt(@subWithOverflow(Limb, a_hi[j], borrow, &a_hi[j]));
}
+
+ return borrow != 0;
},
}
}
@@ -3394,7 +3555,8 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
for (x_norm) |v, i| {
// Accumulate all the x[i]*x[j] (with x!=j) products
- llmulLimb(.add, r[2 * i + 1 ..], x_norm[i + 1 ..], v);
+ const overflow = llmulLimb(.add, r[2 * i + 1 ..], x_norm[i + 1 ..], v);
+ assert(!overflow);
}
// Each product appears twice, multiply by 2
@@ -3402,7 +3564,8 @@ fn llsquareBasecase(r: []Limb, x: []const Limb) void {
for (x_norm) |v, i| {
// Compute and add the squares
- llmulLimb(.add, r[2 * i ..], x[i .. i + 1], v);
+ const overflow = llmulLimb(.add, r[2 * i ..], x[i .. i + 1], v);
+ assert(!overflow);
}
}
diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig
index a7e3186632..5975cf4896 100644
--- a/lib/std/math/big/int_test.zig
+++ b/lib/std/math/big/int_test.zig
@@ -1016,7 +1016,7 @@ test "big.int mulWrap multi-multi unsigned" {
defer c.deinit();
try c.mulWrap(a.toConst(), b.toConst(), .unsigned, 65);
- try testing.expect((try c.to(u256)) == (op1 * op2) & ((1 << 65) - 1));
+ try testing.expect((try c.to(u128)) == (op1 * op2) & ((1 << 65) - 1));
}
test "big.int mulWrap multi-multi signed" {
@@ -1399,6 +1399,63 @@ test "big.int div floor single-single -/-" {
try testing.expect((try r.to(i32)) == er);
}
+test "big.int div floor no remainder negative quotient" {
+ const u: i32 = -0x80000000;
+ const v: i32 = 1;
+
+ var a = try Managed.initSet(testing.allocator, u);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, v);
+ defer b.deinit();
+
+ var q = try Managed.init(testing.allocator);
+ defer q.deinit();
+ var r = try Managed.init(testing.allocator);
+ defer r.deinit();
+ try Managed.divFloor(&q, &r, a.toConst(), b.toConst());
+
+ try testing.expect((try q.to(i32)) == -0x80000000);
+ try testing.expect((try r.to(i32)) == 0);
+}
+
+test "big.int div floor negative close to zero" {
+ const u: i32 = -2;
+ const v: i32 = 12;
+
+ var a = try Managed.initSet(testing.allocator, u);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, v);
+ defer b.deinit();
+
+ var q = try Managed.init(testing.allocator);
+ defer q.deinit();
+ var r = try Managed.init(testing.allocator);
+ defer r.deinit();
+ try Managed.divFloor(&q, &r, a.toConst(), b.toConst());
+
+ try testing.expect((try q.to(i32)) == -1);
+ try testing.expect((try r.to(i32)) == 10);
+}
+
+test "big.int div floor positive close to zero" {
+ const u: i32 = 10;
+ const v: i32 = 12;
+
+ var a = try Managed.initSet(testing.allocator, u);
+ defer a.deinit();
+ var b = try Managed.initSet(testing.allocator, v);
+ defer b.deinit();
+
+ var q = try Managed.init(testing.allocator);
+ defer q.deinit();
+ var r = try Managed.init(testing.allocator);
+ defer r.deinit();
+ try Managed.divFloor(&q, &r, a.toConst(), b.toConst());
+
+ try testing.expect((try q.to(i32)) == 0);
+ try testing.expect((try r.to(i32)) == 10);
+}
+
test "big.int div multi-multi with rem" {
var a = try Managed.initSet(testing.allocator, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999);
defer a.deinit();
@@ -1654,6 +1711,18 @@ test "big.int truncate negative multi to single" {
try testing.expect((try a.to(i17)) == 0);
}
+test "big.int truncate multi unsigned many" {
+ var a = try Managed.initSet(testing.allocator, 1);
+ defer a.deinit();
+ try a.shiftLeft(a, 1023);
+
+ var b = try Managed.init(testing.allocator);
+ defer b.deinit();
+ try b.truncate(a.toConst(), .signed, @bitSizeOf(i1));
+
+ try testing.expect((try b.to(i1)) == 0);
+}
+
test "big.int saturate single signed positive" {
var a = try Managed.initSet(testing.allocator, 0xBBBB_BBBB);
defer a.deinit();
@@ -1866,6 +1935,42 @@ test "big.int sat shift-left signed multi negative" {
try testing.expect((try a.to(SignedDoubleLimb)) == @as(SignedDoubleLimb, x) <<| shift);
}
+test "big.int bitNotWrap unsigned simple" {
+ var a = try Managed.initSet(testing.allocator, 123);
+ defer a.deinit();
+
+ try a.bitNotWrap(a, .unsigned, 10);
+
+ try testing.expect((try a.to(u10)) == ~@as(u10, 123));
+}
+
+test "big.int bitNotWrap unsigned multi" {
+ var a = try Managed.initSet(testing.allocator, 0);
+ defer a.deinit();
+
+ try a.bitNotWrap(a, .unsigned, @bitSizeOf(DoubleLimb));
+
+ try testing.expect((try a.to(DoubleLimb)) == maxInt(DoubleLimb));
+}
+
+test "big.int bitNotWrap signed simple" {
+ var a = try Managed.initSet(testing.allocator, -456);
+ defer a.deinit();
+
+ try a.bitNotWrap(a, .signed, 11);
+
+ try testing.expect((try a.to(i11)) == ~@as(i11, -456));
+}
+
+test "big.int bitNotWrap signed multi" {
+ var a = try Managed.initSet(testing.allocator, 0);
+ defer a.deinit();
+
+ try a.bitNotWrap(a, .signed, @bitSizeOf(SignedDoubleLimb));
+
+ try testing.expect((try a.to(SignedDoubleLimb)) == -1);
+}
+
test "big.int bitwise and simple" {
var a = try Managed.initSet(testing.allocator, 0xffffffff11111111);
defer a.deinit();
diff --git a/lib/std/math/ceil.zig b/lib/std/math/ceil.zig
index fbac256166..cf3adcf5b5 100644
--- a/lib/std/math/ceil.zig
+++ b/lib/std/math/ceil.zig
@@ -20,6 +20,10 @@ pub fn ceil(x: anytype) @TypeOf(x) {
f32 => ceil32(x),
f64 => ceil64(x),
f128 => ceil128(x),
+
+ // TODO this is not correct for some targets
+ c_longdouble => @floatCast(c_longdouble, ceil128(x)),
+
else => @compileError("ceil not implemented for " ++ @typeName(T)),
};
}
diff --git a/lib/std/math/floor.zig b/lib/std/math/floor.zig
index c5ddb9e144..d6761ba77e 100644
--- a/lib/std/math/floor.zig
+++ b/lib/std/math/floor.zig
@@ -21,6 +21,10 @@ pub fn floor(x: anytype) @TypeOf(x) {
f32 => floor32(x),
f64 => floor64(x),
f128 => floor128(x),
+
+ // TODO this is not correct for some targets
+ c_longdouble => @floatCast(c_longdouble, floor128(x)),
+
else => @compileError("floor not implemented for " ++ @typeName(T)),
};
}
diff --git a/lib/std/math/trunc.zig b/lib/std/math/trunc.zig
index eab9a8b0c7..32bd7fb0aa 100644
--- a/lib/std/math/trunc.zig
+++ b/lib/std/math/trunc.zig
@@ -21,6 +21,10 @@ pub fn trunc(x: anytype) @TypeOf(x) {
f32 => trunc32(x),
f64 => trunc64(x),
f128 => trunc128(x),
+
+ // TODO this is not correct for some targets
+ c_longdouble => @floatCast(c_longdouble, trunc128(x)),
+
else => @compileError("trunc not implemented for " ++ @typeName(T)),
};
}
diff --git a/lib/std/os/linux/arm-eabi.zig b/lib/std/os/linux/arm-eabi.zig
index 4bb60a0005..39f7118338 100644
--- a/lib/std/os/linux/arm-eabi.zig
+++ b/lib/std/os/linux/arm-eabi.zig
@@ -520,6 +520,10 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
breakpoint = 0x0f0001,
cacheflush = 0x0f0002,
diff --git a/lib/std/os/linux/arm64.zig b/lib/std/os/linux/arm64.zig
index 58895843e0..506221b41a 100644
--- a/lib/std/os/linux/arm64.zig
+++ b/lib/std/os/linux/arm64.zig
@@ -407,6 +407,10 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
_,
};
diff --git a/lib/std/os/linux/i386.zig b/lib/std/os/linux/i386.zig
index 4d1af88e0a..8dc6e3d50e 100644
--- a/lib/std/os/linux/i386.zig
+++ b/lib/std/os/linux/i386.zig
@@ -568,6 +568,11 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
+ memfd_secret = 447,
_,
};
diff --git a/lib/std/os/linux/mips.zig b/lib/std/os/linux/mips.zig
index b3697405f7..27d999987b 100644
--- a/lib/std/os/linux/mips.zig
+++ b/lib/std/os/linux/mips.zig
@@ -626,6 +626,10 @@ pub const SYS = enum(usize) {
faccessat2 = Linux + 439,
process_madvise = Linux + 440,
epoll_pwait2 = Linux + 441,
+ mount_setattr = Linux + 442,
+ landlock_create_ruleset = Linux + 444,
+ landlock_add_rule = Linux + 445,
+ landlock_restrict_self = Linux + 446,
_,
};
diff --git a/lib/std/os/linux/powerpc.zig b/lib/std/os/linux/powerpc.zig
index 30001c48c6..f58ae4cd2f 100644
--- a/lib/std/os/linux/powerpc.zig
+++ b/lib/std/os/linux/powerpc.zig
@@ -561,6 +561,11 @@ pub const SYS = enum(usize) {
pidfd_getfd = 438,
faccessat2 = 439,
process_madvise = 440,
+ epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
};
pub const O = struct {
diff --git a/lib/std/os/linux/powerpc64.zig b/lib/std/os/linux/powerpc64.zig
index 2903df15ef..094fc1a8f5 100644
--- a/lib/std/os/linux/powerpc64.zig
+++ b/lib/std/os/linux/powerpc64.zig
@@ -534,6 +534,10 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
_,
};
diff --git a/lib/std/os/linux/riscv64.zig b/lib/std/os/linux/riscv64.zig
index be78a1bf4e..75505442c0 100644
--- a/lib/std/os/linux/riscv64.zig
+++ b/lib/std/os/linux/riscv64.zig
@@ -403,6 +403,10 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
_,
};
diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig
index b1f96f144c..92aa68d55d 100644
--- a/lib/std/os/linux/sparc64.zig
+++ b/lib/std/os/linux/sparc64.zig
@@ -568,6 +568,10 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
_,
};
diff --git a/lib/std/os/linux/tls.zig b/lib/std/os/linux/tls.zig
index 37f6abfefc..770ab9b92c 100644
--- a/lib/std/os/linux/tls.zig
+++ b/lib/std/os/linux/tls.zig
@@ -289,8 +289,10 @@ pub fn prepareTLS(area: []u8) usize {
// Copy the data
mem.copy(u8, area[tls_image.data_offset..], tls_image.init_data);
- // Return the corrected (if needed) value for the tp register
- return @ptrToInt(area.ptr) + tls_tp_offset +
+ // Return the corrected value (if needed) for the tp register.
+ // Overflow here is not a problem, the pointer arithmetic involving the tp
+ // is done with wrapping semantics.
+ return @ptrToInt(area.ptr) +% tls_tp_offset +%
if (tls_tp_points_past_tcb) tls_image.data_offset else tls_image.tcb_offset;
}
diff --git a/lib/std/os/linux/x86_64.zig b/lib/std/os/linux/x86_64.zig
index b6a2d2763f..022942685c 100644
--- a/lib/std/os/linux/x86_64.zig
+++ b/lib/std/os/linux/x86_64.zig
@@ -472,6 +472,11 @@ pub const SYS = enum(usize) {
faccessat2 = 439,
process_madvise = 440,
epoll_pwait2 = 441,
+ mount_setattr = 442,
+ landlock_create_ruleset = 444,
+ landlock_add_rule = 445,
+ landlock_restrict_self = 446,
+ memfd_secret = 447,
_,
};
diff --git a/lib/std/os/uefi/tables/boot_services.zig b/lib/std/os/uefi/tables/boot_services.zig
index 31ac352089..d5c3f12816 100644
--- a/lib/std/os/uefi/tables/boot_services.zig
+++ b/lib/std/os/uefi/tables/boot_services.zig
@@ -179,6 +179,7 @@ pub const MemoryType = enum(u32) {
pub const MemoryDescriptor = extern struct {
type: MemoryType,
+ padding: u32,
physical_start: u64,
virtual_start: u64,
number_of_pages: usize,
diff --git a/lib/std/priority_dequeue.zig b/lib/std/priority_dequeue.zig
index 5bde0a36d0..59f3e15ff6 100644
--- a/lib/std/priority_dequeue.zig
+++ b/lib/std/priority_dequeue.zig
@@ -9,30 +9,27 @@ const expectEqual = testing.expectEqual;
const expectError = testing.expectError;
/// Priority Dequeue for storing generic data. Initialize with `init`.
-pub fn PriorityDequeue(comptime T: type) type {
+/// Provide `compareFn` that returns `Order.lt` when its first
+/// argument should get min-popped before its second argument,
+/// `Order.eq` if the arguments are of equal priority, or `Order.gt`
+/// if the second argument should be min-popped first.
+/// Popping the max element works in reverse. For example,
+/// to make `popMin` return the smallest number, provide
+/// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
+pub fn PriorityDequeue(comptime T: type, comptime compareFn: fn (T, T) Order) type {
return struct {
const Self = @This();
items: []T,
len: usize,
allocator: *Allocator,
- compareFn: fn (a: T, b: T) Order,
-
- /// Initialize and return a new priority dequeue. Provide `compareFn`
- /// that returns `Order.lt` when its first argument should
- /// get min-popped before its second argument, `Order.eq` if the
- /// arguments are of equal priority, or `Order.gt` if the second
- /// argument should be min-popped first. Popping the max element works
- /// in reverse. For example, to make `popMin` return the smallest
- /// number, provide
- ///
- /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
- pub fn init(allocator: *Allocator, compareFn: fn (T, T) Order) Self {
+
+ /// Initialize and return a new priority dequeue.
+ pub fn init(allocator: *Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
.allocator = allocator,
- .compareFn = compareFn,
};
}
@@ -91,7 +88,7 @@ pub fn PriorityDequeue(comptime T: type) type {
const parent = self.items[parent_index];
const min_layer = self.nextIsMinLayer();
- const order = self.compareFn(child, parent);
+ const order = compareFn(child, parent);
if ((min_layer and order == .gt) or (!min_layer and order == .lt)) {
// We must swap the item with it's parent if it is on the "wrong" layer
self.items[parent_index] = child;
@@ -124,7 +121,7 @@ pub fn PriorityDequeue(comptime T: type) type {
const grandparent = self.items[grandparent_index];
// If the grandparent is already better or equal, we have gone as far as we need to
- if (self.compareFn(child, grandparent) != target_order) break;
+ if (compareFn(child, grandparent) != target_order) break;
// Otherwise swap the item with it's grandparent
self.items[grandparent_index] = child;
@@ -219,10 +216,10 @@ pub fn PriorityDequeue(comptime T: type) type {
// Find the best grandchild
const best_left = self.bestItemAtIndices(first_grandchild_index, index2, target_order);
const best_right = self.bestItemAtIndices(index3, last_grandchild_index, target_order);
- const best_grandchild = self.bestItem(best_left, best_right, target_order);
+ const best_grandchild = Self.bestItem(best_left, best_right, target_order);
// If the item is better than or equal to its best grandchild, we are done
- if (self.compareFn(best_grandchild.item, elem) != target_order) return;
+ if (compareFn(best_grandchild.item, elem) != target_order) return;
// Otherwise, swap them
self.items[best_grandchild.index] = elem;
@@ -239,7 +236,7 @@ pub fn PriorityDequeue(comptime T: type) type {
const best_descendent = self.bestDescendent(first_child_index, first_grandchild_index, target_order);
// If the item is better than or equal to its best descendant, we are done
- if (self.compareFn(best_descendent.item, elem) != target_order) return;
+ if (compareFn(best_descendent.item, elem) != target_order) return;
// Otherwise swap them
self.items[best_descendent.index] = elem;
@@ -263,7 +260,7 @@ pub fn PriorityDequeue(comptime T: type) type {
const parent_index = parentIndex(child_index);
const parent = self.items[parent_index];
- if (self.compareFn(parent, child) == target_order) {
+ if (compareFn(parent, child) == target_order) {
self.items[parent_index] = child;
self.items[child_index] = parent;
}
@@ -281,8 +278,8 @@ pub fn PriorityDequeue(comptime T: type) type {
};
}
- fn bestItem(self: Self, item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex {
- if (self.compareFn(item1.item, item2.item) == target_order) {
+ fn bestItem(item1: ItemAndIndex, item2: ItemAndIndex, target_order: Order) ItemAndIndex {
+ if (compareFn(item1.item, item2.item) == target_order) {
return item1;
} else {
return item2;
@@ -292,7 +289,7 @@ pub fn PriorityDequeue(comptime T: type) type {
fn bestItemAtIndices(self: Self, index1: usize, index2: usize, target_order: Order) ItemAndIndex {
var item1 = self.getItem(index1);
var item2 = self.getItem(index2);
- return self.bestItem(item1, item2, target_order);
+ return Self.bestItem(item1, item2, target_order);
}
fn bestDescendent(self: Self, first_child_index: usize, first_grandchild_index: usize, target_order: Order) ItemAndIndex {
@@ -340,12 +337,11 @@ pub fn PriorityDequeue(comptime T: type) type {
/// Dequeue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// De-initialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (T, T) Order, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
.allocator = allocator,
- .compareFn = compareFn,
};
if (queue.len <= 1) return queue;
@@ -400,7 +396,7 @@ pub fn PriorityDequeue(comptime T: type) type {
}
pub const Iterator = struct {
- queue: *PriorityDequeue(T),
+ queue: *PriorityDequeue(T, compareFn),
count: usize,
pub fn next(it: *Iterator) ?T {
@@ -462,10 +458,10 @@ fn lessThanComparison(a: u32, b: u32) Order {
return std.math.order(a, b);
}
-const PDQ = PriorityDequeue(u32);
+const PDQ = PriorityDequeue(u32, lessThanComparison);
test "std.PriorityDequeue: add and remove min" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(54);
@@ -487,11 +483,11 @@ test "std.PriorityDequeue: add and remove min structs" {
const S = struct {
size: u32,
};
- var queue = PriorityDequeue(S).init(testing.allocator, struct {
+ var queue = PriorityDequeue(S, struct {
fn order(a: S, b: S) Order {
return std.math.order(a.size, b.size);
}
- }.order);
+ }.order).init(testing.allocator);
defer queue.deinit();
try queue.add(.{ .size = 54 });
@@ -510,7 +506,7 @@ test "std.PriorityDequeue: add and remove min structs" {
}
test "std.PriorityDequeue: add and remove max" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(54);
@@ -529,7 +525,7 @@ test "std.PriorityDequeue: add and remove max" {
}
test "std.PriorityDequeue: add and remove same min" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -548,7 +544,7 @@ test "std.PriorityDequeue: add and remove same min" {
}
test "std.PriorityDequeue: add and remove same max" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -567,7 +563,7 @@ test "std.PriorityDequeue: add and remove same max" {
}
test "std.PriorityDequeue: removeOrNull empty" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try expect(queue.removeMinOrNull() == null);
@@ -575,7 +571,7 @@ test "std.PriorityDequeue: removeOrNull empty" {
}
test "std.PriorityDequeue: edge case 3 elements" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(9);
@@ -588,7 +584,7 @@ test "std.PriorityDequeue: edge case 3 elements" {
}
test "std.PriorityDequeue: edge case 3 elements max" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(9);
@@ -601,7 +597,7 @@ test "std.PriorityDequeue: edge case 3 elements max" {
}
test "std.PriorityDequeue: peekMin" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try expect(queue.peekMin() == null);
@@ -615,7 +611,7 @@ test "std.PriorityDequeue: peekMin" {
}
test "std.PriorityDequeue: peekMax" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try expect(queue.peekMin() == null);
@@ -629,7 +625,7 @@ test "std.PriorityDequeue: peekMax" {
}
test "std.PriorityDequeue: sift up with odd indices" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
@@ -643,7 +639,7 @@ test "std.PriorityDequeue: sift up with odd indices" {
}
test "std.PriorityDequeue: sift up with odd indices" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
@@ -657,7 +653,7 @@ test "std.PriorityDequeue: sift up with odd indices" {
}
test "std.PriorityDequeue: addSlice min" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
@@ -669,7 +665,7 @@ test "std.PriorityDequeue: addSlice min" {
}
test "std.PriorityDequeue: addSlice max" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
@@ -683,7 +679,7 @@ test "std.PriorityDequeue: addSlice max" {
test "std.PriorityDequeue: fromOwnedSlice trivial case 0" {
const items = [0]u32{};
const queue_items = try testing.allocator.dupe(u32, &items);
- var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
+ var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]);
defer queue.deinit();
try expectEqual(@as(usize, 0), queue.len);
try expect(queue.removeMinOrNull() == null);
@@ -692,7 +688,7 @@ test "std.PriorityDequeue: fromOwnedSlice trivial case 0" {
test "std.PriorityDequeue: fromOwnedSlice trivial case 1" {
const items = [1]u32{1};
const queue_items = try testing.allocator.dupe(u32, &items);
- var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
+ var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]);
defer queue.deinit();
try expectEqual(@as(usize, 1), queue.len);
@@ -703,7 +699,7 @@ test "std.PriorityDequeue: fromOwnedSlice trivial case 1" {
test "std.PriorityDequeue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const queue_items = try testing.allocator.dupe(u32, items[0..]);
- var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, queue_items[0..]);
+ var queue = PDQ.fromOwnedSlice(testing.allocator, queue_items[0..]);
defer queue.deinit();
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
@@ -713,7 +709,7 @@ test "std.PriorityDequeue: fromOwnedSlice" {
}
test "std.PriorityDequeue: update min queue" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(55);
@@ -728,7 +724,7 @@ test "std.PriorityDequeue: update min queue" {
}
test "std.PriorityDequeue: update same min queue" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -744,7 +740,7 @@ test "std.PriorityDequeue: update same min queue" {
}
test "std.PriorityDequeue: update max queue" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(55);
@@ -760,7 +756,7 @@ test "std.PriorityDequeue: update max queue" {
}
test "std.PriorityDequeue: update same max queue" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -776,7 +772,7 @@ test "std.PriorityDequeue: update same max queue" {
}
test "std.PriorityDequeue: iterator" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
var map = std.AutoHashMap(u32, void).init(testing.allocator);
defer {
queue.deinit();
@@ -798,7 +794,7 @@ test "std.PriorityDequeue: iterator" {
}
test "std.PriorityDequeue: remove at index" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.add(3);
@@ -821,7 +817,7 @@ test "std.PriorityDequeue: remove at index" {
}
test "std.PriorityDequeue: iterator while empty" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
var it = queue.iterator();
@@ -830,7 +826,7 @@ test "std.PriorityDequeue: iterator while empty" {
}
test "std.PriorityDequeue: shrinkAndFree" {
- var queue = PDQ.init(testing.allocator, lessThanComparison);
+ var queue = PDQ.init(testing.allocator);
defer queue.deinit();
try queue.ensureTotalCapacity(4);
@@ -868,7 +864,7 @@ fn fuzzTestMin(rng: *std.rand.Random, comptime queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
- var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items);
+ var queue = PDQ.fromOwnedSlice(allocator, items);
defer queue.deinit();
var last_removed: ?u32 = null;
@@ -896,7 +892,7 @@ fn fuzzTestMax(rng: *std.rand.Random, queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
- var queue = PDQ.fromOwnedSlice(testing.allocator, lessThanComparison, items);
+ var queue = PDQ.fromOwnedSlice(testing.allocator, items);
defer queue.deinit();
var last_removed: ?u32 = null;
@@ -924,7 +920,7 @@ fn fuzzTestMinMax(rng: *std.rand.Random, queue_size: usize) !void {
const allocator = testing.allocator;
const items = try generateRandomSlice(allocator, rng, queue_size);
- var queue = PDQ.fromOwnedSlice(allocator, lessThanComparison, items);
+ var queue = PDQ.fromOwnedSlice(allocator, items);
defer queue.deinit();
var last_min: ?u32 = null;
diff --git a/lib/std/priority_queue.zig b/lib/std/priority_queue.zig
index fcdd81b1dd..96113c2828 100644
--- a/lib/std/priority_queue.zig
+++ b/lib/std/priority_queue.zig
@@ -9,29 +9,26 @@ const expectEqual = testing.expectEqual;
const expectError = testing.expectError;
/// Priority queue for storing generic data. Initialize with `init`.
-pub fn PriorityQueue(comptime T: type) type {
+/// Provide `compareFn` that returns `Order.lt` when its first
+/// argument should get popped before its second argument,
+/// `Order.eq` if the arguments are of equal priority, or `Order.gt`
+/// if the second argument should be popped first.
+/// For example, to make `pop` return the smallest number, provide
+/// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
+pub fn PriorityQueue(comptime T: type, comptime compareFn: fn (a: T, b: T) Order) type {
return struct {
const Self = @This();
items: []T,
len: usize,
allocator: *Allocator,
- compareFn: fn (a: T, b: T) Order,
-
- /// Initialize and return a priority queue. Provide `compareFn`
- /// that returns `Order.lt` when its first argument should
- /// get popped before its second argument, `Order.eq` if the
- /// arguments are of equal priority, or `Order.gt` if the second
- /// argument should be popped first. For example, to make `pop`
- /// return the smallest number, provide
- ///
- /// `fn lessThan(a: T, b: T) Order { return std.math.order(a, b); }`
- pub fn init(allocator: *Allocator, compareFn: fn (a: T, b: T) Order) Self {
+
+ /// Initialize and return a priority queue.
+ pub fn init(allocator: *Allocator) Self {
return Self{
.items = &[_]T{},
.len = 0,
.allocator = allocator,
- .compareFn = compareFn,
};
}
@@ -59,7 +56,7 @@ pub fn PriorityQueue(comptime T: type) type {
const child = self.items[child_index];
const parent = self.items[parent_index];
- if (self.compareFn(child, parent) != .lt) break;
+ if (compareFn(child, parent) != .lt) break;
self.items[parent_index] = child;
self.items[child_index] = parent;
@@ -131,14 +128,14 @@ pub fn PriorityQueue(comptime T: type) type {
var smallest = self.items[index];
if (left) |e| {
- if (self.compareFn(e, smallest) == .lt) {
+ if (compareFn(e, smallest) == .lt) {
smallest_index = left_index;
smallest = e;
}
}
if (right) |e| {
- if (self.compareFn(e, smallest) == .lt) {
+ if (compareFn(e, smallest) == .lt) {
smallest_index = right_index;
smallest = e;
}
@@ -157,12 +154,11 @@ pub fn PriorityQueue(comptime T: type) type {
/// PriorityQueue takes ownership of the passed in slice. The slice must have been
/// allocated with `allocator`.
/// Deinitialize with `deinit`.
- pub fn fromOwnedSlice(allocator: *Allocator, compareFn: fn (a: T, b: T) Order, items: []T) Self {
+ pub fn fromOwnedSlice(allocator: *Allocator, items: []T) Self {
var queue = Self{
.items = items,
.len = items.len,
.allocator = allocator,
- .compareFn = compareFn,
};
if (queue.len <= 1) return queue;
@@ -213,7 +209,7 @@ pub fn PriorityQueue(comptime T: type) type {
var update_index: usize = std.mem.indexOfScalar(T, self.items[0..self.len], elem) orelse return error.ElementNotFound;
const old_elem: T = self.items[update_index];
self.items[update_index] = new_elem;
- switch (self.compareFn(new_elem, old_elem)) {
+ switch (compareFn(new_elem, old_elem)) {
.lt => siftUp(self, update_index),
.gt => siftDown(self, update_index),
.eq => {}, // Nothing to do as the items have equal priority
@@ -221,7 +217,7 @@ pub fn PriorityQueue(comptime T: type) type {
}
pub const Iterator = struct {
- queue: *PriorityQueue(T),
+ queue: *PriorityQueue(T, compareFn),
count: usize,
pub fn next(it: *Iterator) ?T {
@@ -271,10 +267,11 @@ fn greaterThan(a: u32, b: u32) Order {
return lessThan(a, b).invert();
}
-const PQ = PriorityQueue(u32);
+const PQlt = PriorityQueue(u32, lessThan);
+const PQgt = PriorityQueue(u32, greaterThan);
test "std.PriorityQueue: add and remove min heap" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(54);
@@ -292,7 +289,7 @@ test "std.PriorityQueue: add and remove min heap" {
}
test "std.PriorityQueue: add and remove same min heap" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -310,14 +307,14 @@ test "std.PriorityQueue: add and remove same min heap" {
}
test "std.PriorityQueue: removeOrNull on empty" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try expect(queue.removeOrNull() == null);
}
test "std.PriorityQueue: edge case 3 elements" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(9);
@@ -329,7 +326,7 @@ test "std.PriorityQueue: edge case 3 elements" {
}
test "std.PriorityQueue: peek" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try expect(queue.peek() == null);
@@ -341,7 +338,7 @@ test "std.PriorityQueue: peek" {
}
test "std.PriorityQueue: sift up with odd indices" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
@@ -355,7 +352,7 @@ test "std.PriorityQueue: sift up with odd indices" {
}
test "std.PriorityQueue: addSlice" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
@@ -369,7 +366,7 @@ test "std.PriorityQueue: addSlice" {
test "std.PriorityQueue: fromOwnedSlice trivial case 0" {
const items = [0]u32{};
const queue_items = try testing.allocator.dupe(u32, &items);
- var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]);
+ var queue = PQlt.fromOwnedSlice(testing.allocator, queue_items[0..]);
defer queue.deinit();
try expectEqual(@as(usize, 0), queue.len);
try expect(queue.removeOrNull() == null);
@@ -378,7 +375,7 @@ test "std.PriorityQueue: fromOwnedSlice trivial case 0" {
test "std.PriorityQueue: fromOwnedSlice trivial case 1" {
const items = [1]u32{1};
const queue_items = try testing.allocator.dupe(u32, &items);
- var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, queue_items[0..]);
+ var queue = PQlt.fromOwnedSlice(testing.allocator, queue_items[0..]);
defer queue.deinit();
try expectEqual(@as(usize, 1), queue.len);
@@ -389,7 +386,7 @@ test "std.PriorityQueue: fromOwnedSlice trivial case 1" {
test "std.PriorityQueue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const heap_items = try testing.allocator.dupe(u32, items[0..]);
- var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, heap_items[0..]);
+ var queue = PQlt.fromOwnedSlice(testing.allocator, heap_items[0..]);
defer queue.deinit();
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
@@ -399,7 +396,7 @@ test "std.PriorityQueue: fromOwnedSlice" {
}
test "std.PriorityQueue: add and remove max heap" {
- var queue = PQ.init(testing.allocator, greaterThan);
+ var queue = PQgt.init(testing.allocator);
defer queue.deinit();
try queue.add(54);
@@ -417,7 +414,7 @@ test "std.PriorityQueue: add and remove max heap" {
}
test "std.PriorityQueue: add and remove same max heap" {
- var queue = PQ.init(testing.allocator, greaterThan);
+ var queue = PQgt.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -435,7 +432,7 @@ test "std.PriorityQueue: add and remove same max heap" {
}
test "std.PriorityQueue: iterator" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
var map = std.AutoHashMap(u32, void).init(testing.allocator);
defer {
queue.deinit();
@@ -457,7 +454,7 @@ test "std.PriorityQueue: iterator" {
}
test "std.PriorityQueue: remove at index" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(3);
@@ -480,7 +477,7 @@ test "std.PriorityQueue: remove at index" {
}
test "std.PriorityQueue: iterator while empty" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
var it = queue.iterator();
@@ -489,7 +486,7 @@ test "std.PriorityQueue: iterator while empty" {
}
test "std.PriorityQueue: shrinkAndFree" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.ensureTotalCapacity(4);
@@ -512,7 +509,7 @@ test "std.PriorityQueue: shrinkAndFree" {
}
test "std.PriorityQueue: update min heap" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(55);
@@ -527,7 +524,7 @@ test "std.PriorityQueue: update min heap" {
}
test "std.PriorityQueue: update same min heap" {
- var queue = PQ.init(testing.allocator, lessThan);
+ var queue = PQlt.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
@@ -543,7 +540,7 @@ test "std.PriorityQueue: update same min heap" {
}
test "std.PriorityQueue: update max heap" {
- var queue = PQ.init(testing.allocator, greaterThan);
+ var queue = PQgt.init(testing.allocator);
defer queue.deinit();
try queue.add(55);
@@ -558,7 +555,7 @@ test "std.PriorityQueue: update max heap" {
}
test "std.PriorityQueue: update same max heap" {
- var queue = PQ.init(testing.allocator, greaterThan);
+ var queue = PQgt.init(testing.allocator);
defer queue.deinit();
try queue.add(1);
diff --git a/lib/std/rand.zig b/lib/std/rand.zig
index 77fab7094d..bafc324b91 100644
--- a/lib/std/rand.zig
+++ b/lib/std/rand.zig
@@ -53,7 +53,7 @@ pub const Random = struct {
return values[index];
}
- /// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
+ /// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: *Random, comptime T: type) T {
const bits = @typeInfo(T).Int.bits;
diff --git a/lib/std/special/c_stage1.zig b/lib/std/special/c_stage1.zig
index cd2d833c2d..1bb28e79df 100644
--- a/lib/std/special/c_stage1.zig
+++ b/lib/std/special/c_stage1.zig
@@ -644,32 +644,41 @@ export fn fmod(x: f64, y: f64) f64 {
export fn floorf(x: f32) f32 {
return math.floor(x);
}
-
-export fn ceilf(x: f32) f32 {
- return math.ceil(x);
-}
-
export fn floor(x: f64) f64 {
return math.floor(x);
}
+export fn floorl(x: c_longdouble) c_longdouble {
+ if (!long_double_is_f128) {
+ @panic("TODO implement this");
+ }
+ return math.floor(x);
+}
+export fn ceilf(x: f32) f32 {
+ return math.ceil(x);
+}
export fn ceil(x: f64) f64 {
return math.ceil(x);
}
-
-export fn fmal(a: c_longdouble, b: c_longdouble, c: c_longdouble) c_longdouble {
+export fn ceill(x: c_longdouble) c_longdouble {
if (!long_double_is_f128) {
@panic("TODO implement this");
}
- return math.fma(c_longdouble, a, b, c);
+ return math.ceil(x);
+}
+
+export fn fmaf(a: f32, b: f32, c: f32) f32 {
+ return math.fma(f32, a, b, c);
}
export fn fma(a: f64, b: f64, c: f64) f64 {
return math.fma(f64, a, b, c);
}
-
-export fn fmaf(a: f32, b: f32, c: f32) f32 {
- return math.fma(f32, a, b, c);
+export fn fmal(a: c_longdouble, b: c_longdouble, c: c_longdouble) c_longdouble {
+ if (!long_double_is_f128) {
+ @panic("TODO implement this");
+ }
+ return math.fma(c_longdouble, a, b, c);
}
export fn sin(a: f64) f64 {
@@ -754,6 +763,13 @@ export fn truncf(a: f32) f32 {
return math.trunc(a);
}
+export fn truncl(a: c_longdouble) c_longdouble {
+ if (!long_double_is_f128) {
+ @panic("TODO implement this");
+ }
+ return math.trunc(a);
+}
+
export fn round(a: f64) f64 {
return math.round(a);
}
diff --git a/lib/std/special/compiler_rt.zig b/lib/std/special/compiler_rt.zig
index 3582b93070..4a2250bac4 100644
--- a/lib/std/special/compiler_rt.zig
+++ b/lib/std/special/compiler_rt.zig
@@ -74,6 +74,86 @@ comptime {
@export(__getf2, .{ .name = "__gttf2", .linkage = linkage });
@export(__extendhfsf2, .{ .name = "__gnu_h2f_ieee", .linkage = linkage });
+
+ const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4;
+ @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage });
+ const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4;
+ @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
+ }
+
+ if (builtin.os.tag == .windows) {
+ // Default stack-probe functions emitted by LLVM
+ if (is_mingw) {
+ const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
+ @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
+ const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
+ @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
+ } else if (!builtin.link_libc) {
+ // This symbols are otherwise exported by MSVCRT.lib
+ const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
+ @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
+ const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ }
+
+ switch (arch) {
+ .i386 => {
+ const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
+ @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
+ const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
+ @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
+ const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
+ },
+ .x86_64 => {
+ // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
+ // that LLVM expects compiler-rt to have.
+ const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
+ @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
+ @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
+ const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
+ @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
+ @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
+ @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
+ const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
+ @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
+ },
+ else => {},
+ }
+ if (arch.isAARCH64()) {
+ const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
+ @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
+ const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
+ @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
+ const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
+ const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
+ }
+ } else {
+ const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
+ @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
+ const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
+ @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
+ const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
+ @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
+ const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
+ @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
+ const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
+ @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
+ const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
+ @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
}
if (!builtin.zig_is_stage2) {
@@ -547,85 +627,6 @@ comptime {
@export(__unordtf2, .{ .name = "__unordkf2", .linkage = linkage });
}
- if (builtin.os.tag == .windows) {
- // Default stack-probe functions emitted by LLVM
- if (is_mingw) {
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_alloca", .linkage = strong_linkage });
- const ___chkstk_ms = @import("compiler_rt/stack_probe.zig").___chkstk_ms;
- @export(___chkstk_ms, .{ .name = "___chkstk_ms", .linkage = strong_linkage });
- } else if (!builtin.link_libc) {
- // This symbols are otherwise exported by MSVCRT.lib
- const _chkstk = @import("compiler_rt/stack_probe.zig")._chkstk;
- @export(_chkstk, .{ .name = "_chkstk", .linkage = strong_linkage });
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- }
-
- switch (arch) {
- .i386 => {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- },
- .x86_64 => {
- // The "ti" functions must use Vector(2, u64) parameter types to adhere to the ABI
- // that LLVM expects compiler-rt to have.
- const __divti3_windows_x86_64 = @import("compiler_rt/divti3.zig").__divti3_windows_x86_64;
- @export(__divti3_windows_x86_64, .{ .name = "__divti3", .linkage = linkage });
- const __modti3_windows_x86_64 = @import("compiler_rt/modti3.zig").__modti3_windows_x86_64;
- @export(__modti3_windows_x86_64, .{ .name = "__modti3", .linkage = linkage });
- const __multi3_windows_x86_64 = @import("compiler_rt/multi3.zig").__multi3_windows_x86_64;
- @export(__multi3_windows_x86_64, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3_windows_x86_64 = @import("compiler_rt/udivti3.zig").__udivti3_windows_x86_64;
- @export(__udivti3_windows_x86_64, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4_windows_x86_64 = @import("compiler_rt/udivmodti4.zig").__udivmodti4_windows_x86_64;
- @export(__udivmodti4_windows_x86_64, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3_windows_x86_64 = @import("compiler_rt/umodti3.zig").__umodti3_windows_x86_64;
- @export(__umodti3_windows_x86_64, .{ .name = "__umodti3", .linkage = linkage });
- },
- else => {},
- }
- if (arch.isAARCH64()) {
- const __chkstk = @import("compiler_rt/stack_probe.zig").__chkstk;
- @export(__chkstk, .{ .name = "__chkstk", .linkage = strong_linkage });
- const __divti3_windows = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3_windows, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __udivti3_windows = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3_windows, .{ .name = "__udivti3", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- }
- } else {
- const __divti3 = @import("compiler_rt/divti3.zig").__divti3;
- @export(__divti3, .{ .name = "__divti3", .linkage = linkage });
- const __modti3 = @import("compiler_rt/modti3.zig").__modti3;
- @export(__modti3, .{ .name = "__modti3", .linkage = linkage });
- const __multi3 = @import("compiler_rt/multi3.zig").__multi3;
- @export(__multi3, .{ .name = "__multi3", .linkage = linkage });
- const __udivti3 = @import("compiler_rt/udivti3.zig").__udivti3;
- @export(__udivti3, .{ .name = "__udivti3", .linkage = linkage });
- const __udivmodti4 = @import("compiler_rt/udivmodti4.zig").__udivmodti4;
- @export(__udivmodti4, .{ .name = "__udivmodti4", .linkage = linkage });
- const __umodti3 = @import("compiler_rt/umodti3.zig").__umodti3;
- @export(__umodti3, .{ .name = "__umodti3", .linkage = linkage });
- }
- const __muloti4 = @import("compiler_rt/muloti4.zig").__muloti4;
- @export(__muloti4, .{ .name = "__muloti4", .linkage = linkage });
- const __mulodi4 = @import("compiler_rt/mulodi4.zig").__mulodi4;
- @export(__mulodi4, .{ .name = "__mulodi4", .linkage = linkage });
-
_ = @import("compiler_rt/atomics.zig");
@export(fmaq, .{ .name = "fmaq", .linkage = linkage });
diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig
index f69f0598dd..e9c7cc39e0 100644
--- a/lib/std/zig/parser_test.zig
+++ b/lib/std/zig/parser_test.zig
@@ -4398,7 +4398,7 @@ test "zig fmt: regression test for #5722" {
\\ while (it.next()) |node|
\\ view_tags.append(node.view.current_tags) catch {
\\ c.wl_resource_post_no_memory(self.wl_resource);
- \\ log.crit(.river_status, "out of memory", .{});
+ \\ log.err(.river_status, "out of memory", .{});
\\ return;
\\ };
\\}
diff --git a/src/Air.zig b/src/Air.zig
index 86e16487bb..d39a78f1ad 100644
--- a/src/Air.zig
+++ b/src/Air.zig
@@ -80,11 +80,27 @@ pub const Inst = struct {
/// is the same as both operands.
/// Uses the `bin_op` field.
mul_sat,
- /// Integer or float division. For integers, wrapping is undefined behavior.
+ /// Float division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
/// Uses the `bin_op` field.
- div,
+ div_float,
+ /// Truncating integer or float division. For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_trunc,
+ /// Flooring integer or float division. For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_floor,
+ /// Integer or float division. Guaranteed no remainder.
+ /// For integers, wrapping is undefined behavior.
+ /// Both operands are guaranteed to be the same type, and the result type
+ /// is the same as both operands.
+ /// Uses the `bin_op` field.
+ div_exact,
/// Integer or float remainder division.
/// Both operands are guaranteed to be the same type, and the result type
/// is the same as both operands.
@@ -360,6 +376,9 @@ pub const Inst = struct {
/// Given a tagged union value, get its tag value.
/// Uses the `ty_op` field.
get_union_tag,
+ /// Constructs a slice from a pointer and a length.
+ /// Uses the `ty_pl` field, payload is `Bin`. lhs is ptr, rhs is len.
+ slice,
/// Given a slice value, return the length.
/// Result type is always usize.
/// Uses the `ty_op` field.
@@ -367,6 +386,12 @@ pub const Inst = struct {
/// Given a slice value, return the pointer.
/// Uses the `ty_op` field.
slice_ptr,
+ /// Given a pointer to a slice, return a pointer to the length of the slice.
+ /// Uses the `ty_op` field.
+ ptr_slice_len_ptr,
+ /// Given a pointer to a slice, return a pointer to the pointer of the slice.
+ /// Uses the `ty_op` field.
+ ptr_slice_ptr_ptr,
/// Given an array value and element index, return the element value at that index.
/// Result type is the element type of the array operand.
/// Uses the `bin_op` field.
@@ -375,10 +400,10 @@ pub const Inst = struct {
/// Result type is the element type of the slice operand.
/// Uses the `bin_op` field.
slice_elem_val,
- /// Given a pointer to a slice, and element index, return the element value at that index.
- /// Result type is the element type of the slice operand (2 element type operations).
- /// Uses the `bin_op` field.
- ptr_slice_elem_val,
+ /// Given a slice value and element index, return a pointer to the element value at that index.
+ /// Result type is a pointer to the element type of the slice operand.
+ /// Uses the `ty_pl` field with payload `Bin`.
+ slice_elem_ptr,
/// Given a pointer value, and element index, return the element value at that index.
/// Result type is the element type of the pointer operand.
/// Uses the `bin_op` field.
@@ -387,11 +412,6 @@ pub const Inst = struct {
/// Result type is pointer to the element type of the pointer operand.
/// Uses the `ty_pl` field with payload `Bin`.
ptr_elem_ptr,
- /// Given a pointer to a pointer, and element index, return the element value of the inner
- /// pointer at that index.
- /// Result type is the element type of the inner pointer operand.
- /// Uses the `bin_op` field.
- ptr_ptr_elem_val,
/// Given a pointer to an array, return a slice.
/// Uses the `ty_op` field.
array_to_slice,
@@ -640,7 +660,10 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.bit_and,
@@ -685,9 +708,11 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.constant,
.struct_field_ptr,
.struct_field_val,
+ .slice_elem_ptr,
.ptr_elem_ptr,
.cmpxchg_weak,
.cmpxchg_strong,
+ .slice,
=> return air.getRefType(datas[inst].ty_pl.ty),
.not,
@@ -707,6 +732,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.wrap_errunion_payload,
.wrap_errunion_err,
.slice_ptr,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -760,11 +787,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
return ptr_ty.elemType();
},
- .ptr_slice_elem_val, .ptr_ptr_elem_val => {
- const outer_ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
- const inner_ptr_ty = outer_ptr_ty.elemType();
- return inner_ptr_ty.elemType();
- },
.atomic_load => {
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr);
return ptr_ty.elemType();
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 44234d41f7..59643d5279 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -11,6 +11,8 @@ const StringIndexAdapter = std.hash_map.StringIndexAdapter;
const StringIndexContext = std.hash_map.StringIndexContext;
const Zir = @import("Zir.zig");
+const refToIndex = Zir.refToIndex;
+const indexToRef = Zir.indexToRef;
const trace = @import("tracy.zig").trace;
const BuiltinFn = @import("BuiltinFn.zig");
@@ -57,6 +59,7 @@ fn addExtraAssumeCapacity(astgen: *AstGen, extra: anytype) u32 {
Zir.Inst.Ref => @enumToInt(@field(extra, field.name)),
i32 => @bitCast(u32, @field(extra, field.name)),
Zir.Inst.Call.Flags => @bitCast(u32, @field(extra, field.name)),
+ Zir.Inst.SwitchBlock.Bits => @bitCast(u32, @field(extra, field.name)),
else => @compileError("bad field type"),
});
}
@@ -193,9 +196,6 @@ pub const ResultLoc = union(enum) {
/// The expression must generate a pointer rather than a value. For example, the left hand side
/// of an assignment uses this kind of result location.
ref,
- /// The callee will accept a ref, but it is not necessary, and the `ResultLoc`
- /// may be treated as `none` instead.
- none_or_ref,
/// The expression will be coerced into this type, but it will be evaluated as an rvalue.
ty: Zir.Inst.Ref,
/// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion,
@@ -231,7 +231,7 @@ pub const ResultLoc = union(enum) {
fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy {
switch (rl) {
// In this branch there will not be any store_to_block_ptr instructions.
- .discard, .none, .none_or_ref, .ty, .coerced_ty, .ref => return .{
+ .discard, .none, .ty, .coerced_ty, .ref => return .{
.tag = .break_operand,
.elide_store_to_block_ptr_instructions = false,
},
@@ -276,17 +276,30 @@ fn typeExpr(gz: *GenZir, scope: *Scope, type_node: Ast.Node.Index) InnerError!Zi
return expr(gz, scope, coerced_type_rl, type_node);
}
+fn reachableTypeExpr(
+ gz: *GenZir,
+ scope: *Scope,
+ type_node: Ast.Node.Index,
+ reachable_node: Ast.Node.Index,
+) InnerError!Zir.Inst.Ref {
+ const prev_force_comptime = gz.force_comptime;
+ gz.force_comptime = true;
+ defer gz.force_comptime = prev_force_comptime;
+
+ return reachableExpr(gz, scope, coerced_type_rl, type_node, reachable_node);
+}
+
/// Same as `expr` but fails with a compile error if the result type is `noreturn`.
fn reachableExpr(
gz: *GenZir,
scope: *Scope,
rl: ResultLoc,
node: Ast.Node.Index,
- src_node: Ast.Node.Index,
+ reachable_node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const result_inst = try expr(gz, scope, rl, node);
if (gz.refIsNoReturn(result_inst)) {
- return gz.astgen.failNodeNotes(src_node, "unreachable code", .{}, &[_]u32{
+ return gz.astgen.failNodeNotes(reachable_node, "unreachable code", .{}, &[_]u32{
try gz.astgen.errNoteNode(node, "control flow is diverted here", .{}),
});
}
@@ -634,18 +647,24 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
return simpleBinOp(gz, scope, rl, node, .bit_and);
},
- .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
- .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
+ .bit_or => return simpleBinOp(gz, scope, rl, node, .bit_or),
+ .bit_xor => return simpleBinOp(gz, scope, rl, node, .xor),
.bang_equal => return simpleBinOp(gz, scope, rl, node, .cmp_neq),
.equal_equal => return simpleBinOp(gz, scope, rl, node, .cmp_eq),
.greater_than => return simpleBinOp(gz, scope, rl, node, .cmp_gt),
.greater_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_gte),
.less_than => return simpleBinOp(gz, scope, rl, node, .cmp_lt),
.less_or_equal => return simpleBinOp(gz, scope, rl, node, .cmp_lte),
-
.array_cat => return simpleBinOp(gz, scope, rl, node, .array_cat),
- .array_mult => return simpleBinOp(gz, scope, rl, node, .array_mul),
+
+ .array_mult => {
+ const result = try gz.addPlNode(.array_mul, node, Zir.Inst.Bin{
+ .lhs = try expr(gz, scope, .none, node_datas[node].lhs),
+ .rhs = try comptimeExpr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs),
+ });
+ return rvalue(gz, rl, result, node);
+ },
.error_union => return simpleBinOp(gz, scope, rl, node, .error_union_type),
.merge_error_sets => return simpleBinOp(gz, scope, rl, node, .merge_error_sets),
@@ -721,62 +740,44 @@ fn expr(gz: *GenZir, scope: *Scope, rl: ResultLoc, node: Ast.Node.Index) InnerEr
.slice_open => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, node_datas[node].rhs);
const result = try gz.addPlNode(.slice_start, node, Zir.Inst.SliceStart{
.lhs = lhs,
.start = start,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.slice => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, Ast.Node.Slice);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
- const end = try expr(gz, scope, .{ .ty = .usize_type }, extra.end);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start);
+ const end = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end);
const result = try gz.addPlNode(.slice_end, node, Zir.Inst.SliceEnd{
.lhs = lhs,
.start = start,
.end = end,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.slice_sentinel => {
const lhs = try expr(gz, scope, .ref, node_datas[node].lhs);
const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SliceSentinel);
- const start = try expr(gz, scope, .{ .ty = .usize_type }, extra.start);
- const end = if (extra.end != 0) try expr(gz, scope, .{ .ty = .usize_type }, extra.end) else .none;
- const sentinel = try expr(gz, scope, .{ .ty = .usize_type }, extra.sentinel);
+ const start = try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.start);
+ const end = if (extra.end != 0) try expr(gz, scope, .{ .coerced_ty = .usize_type }, extra.end) else .none;
+ const sentinel = try expr(gz, scope, .none, extra.sentinel);
const result = try gz.addPlNode(.slice_sentinel, node, Zir.Inst.SliceSentinel{
.lhs = lhs,
.start = start,
.end = end,
.sentinel = sentinel,
});
- switch (rl) {
- .ref, .none_or_ref => return result,
- else => {
- const dereffed = try gz.addUnNode(.load, result, node);
- return rvalue(gz, rl, dereffed, node);
- },
- }
+ return rvalue(gz, rl, result, node);
},
.deref => {
const lhs = try expr(gz, scope, .none, node_datas[node].lhs);
switch (rl) {
- .ref, .none_or_ref => return lhs,
+ .ref => return lhs,
else => {
const result = try gz.addUnNode(.load, lhs, node);
return rvalue(gz, rl, result, node);
@@ -1155,16 +1156,6 @@ fn fnProtoExpr(
return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{});
}
- const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1;
- const is_inferred_error = token_tags[maybe_bang] == .bang;
- if (is_inferred_error) {
- return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
- }
- var ret_gz = gz.makeSubBlock(scope);
- defer ret_gz.instructions.deinit(gpa);
- const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
-
const cc: Zir.Inst.Ref = if (fn_proto.ast.callconv_expr != 0)
try expr(
gz,
@@ -1175,6 +1166,16 @@ fn fnProtoExpr(
else
Zir.Inst.Ref.none;
+ const maybe_bang = tree.firstToken(fn_proto.ast.return_type) - 1;
+ const is_inferred_error = token_tags[maybe_bang] == .bang;
+ if (is_inferred_error) {
+ return astgen.failTok(maybe_bang, "function prototype may not have inferred error set", .{});
+ }
+ var ret_gz = gz.makeSubBlock(scope);
+ defer ret_gz.instructions.deinit(gpa);
+ const ret_ty = try expr(&ret_gz, scope, coerced_type_rl, fn_proto.ast.return_type);
+ const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+
const result = try gz.addFunc(.{
.src_node = fn_proto.ast.proto_node,
.param_block = 0,
@@ -1273,7 +1274,7 @@ fn arrayInitExpr(
return arrayInitExprRlNone(gz, scope, node, array_init.ast.elements, .array_init_anon_ref);
}
},
- .none, .none_or_ref => {
+ .none => {
if (types.array != .none) {
return arrayInitExprRlTy(gz, scope, node, array_init.ast.elements, types.elem, .array_init);
} else {
@@ -1475,7 +1476,7 @@ fn structInitExpr(
return structInitExprRlNone(gz, scope, node, struct_init, .struct_init_anon_ref);
}
},
- .none, .none_or_ref => {
+ .none => {
if (struct_init.ast.type_expr != 0) {
const ty_inst = try typeExpr(gz, scope, struct_init.ast.type_expr);
return structInitExprRlTy(gz, scope, node, struct_init, ty_inst, .struct_init);
@@ -1691,9 +1692,9 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
return Zir.Inst.Ref.unreachable_value;
}
block_gz.break_count += 1;
- const prev_rvalue_rl_count = block_gz.rvalue_rl_count;
const operand = try expr(parent_gz, parent_scope, block_gz.break_result_loc, rhs);
- const have_store_to_block = block_gz.rvalue_rl_count != prev_rvalue_rl_count;
+ // if list grew as much as rvalue_rl_count, then a break inside operand already saved the store_to_block_ptr
+ const have_store_to_block = block_gz.rvalue_rl_count > block_gz.labeled_store_to_block_ptr_list.items.len;
const br = try parent_gz.addBreak(.@"break", block_inst, operand);
@@ -2052,7 +2053,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.as_node,
.bit_and,
.bitcast,
- .bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@@ -2109,7 +2109,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.negate,
.negate_wrap,
.typeof,
- .typeof_elem,
.xor,
.optional_type,
.optional_payload_safe,
@@ -2136,17 +2135,8 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: Ast.Node.Index) Inner
.slice_sentinel,
.import,
.switch_block,
- .switch_block_multi,
- .switch_block_else,
- .switch_block_else_multi,
- .switch_block_under,
- .switch_block_under_multi,
- .switch_block_ref,
- .switch_block_ref_multi,
- .switch_block_ref_else,
- .switch_block_ref_else_multi,
- .switch_block_ref_under,
- .switch_block_ref_under_multi,
+ .switch_cond,
+ .switch_cond_ref,
.switch_capture,
.switch_capture_ref,
.switch_capture_multi,
@@ -3191,11 +3181,6 @@ fn fnDecl(
break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr);
};
- var ret_gz = decl_gz.makeSubBlock(params_scope);
- defer ret_gz.instructions.deinit(gpa);
- const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
- const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
-
const cc: Zir.Inst.Ref = blk: {
if (fn_proto.ast.callconv_expr != 0) {
if (has_inline_keyword) {
@@ -3221,6 +3206,11 @@ fn fnDecl(
}
};
+ var ret_gz = decl_gz.makeSubBlock(params_scope);
+ defer ret_gz.instructions.deinit(gpa);
+ const ret_ty = try expr(&ret_gz, params_scope, coerced_type_rl, fn_proto.ast.return_type);
+ const ret_br = try ret_gz.addBreak(.break_inline, 0, ret_ty);
+
const func_inst: Zir.Inst.Ref = if (body_node == 0) func: {
if (!is_extern) {
return astgen.failTok(fn_proto.ast.fn_token, "non-extern function has no body", .{});
@@ -4912,7 +4902,7 @@ fn tryExpr(
.ref => .ref,
else => .none,
};
- const err_ops = switch (rl) {
+ const err_ops = switch (operand_rl) {
// zig fmt: off
.ref => [3]Zir.Inst.Tag{ .is_non_err_ptr, .err_union_code_ptr, .err_union_payload_unsafe_ptr },
else => [3]Zir.Inst.Tag{ .is_non_err, .err_union_code, .err_union_payload_unsafe },
@@ -5130,11 +5120,12 @@ fn fieldAccess(
rl: ResultLoc,
node: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
- if (rl == .ref) {
- return addFieldAccess(.field_ptr, gz, scope, .ref, node);
- } else {
- const access = try addFieldAccess(.field_val, gz, scope, .none_or_ref, node);
- return rvalue(gz, rl, access, node);
+ switch (rl) {
+ .ref => return addFieldAccess(.field_ptr, gz, scope, .ref, node),
+ else => {
+ const access = try addFieldAccess(.field_val, gz, scope, .none, node);
+ return rvalue(gz, rl, access, node);
+ },
}
}
@@ -5178,7 +5169,7 @@ fn arrayAccess(
),
else => return rvalue(gz, rl, try gz.addBin(
.elem_val,
- try expr(gz, scope, .none_or_ref, node_datas[node].lhs),
+ try expr(gz, scope, .none, node_datas[node].lhs),
try expr(gz, scope, .{ .ty = .usize_type }, node_datas[node].rhs),
), node),
}
@@ -5743,7 +5734,8 @@ fn forExpr(
const len = try parent_gz.addUnNode(.indexable_ptr_len, array_ptr, for_full.ast.cond_expr);
const index_ptr = blk: {
- const index_ptr = try parent_gz.addUnNode(.alloc, .usize_type, node);
+ const alloc_tag: Zir.Inst.Tag = if (is_inline) .alloc_comptime else .alloc;
+ const index_ptr = try parent_gz.addUnNode(alloc_tag, .usize_type, node);
// initialize to zero
_ = try parent_gz.addBin(.store, index_ptr, .zero_usize);
break :blk index_ptr;
@@ -6031,11 +6023,12 @@ fn switchExpr(
}
const operand_rl: ResultLoc = if (any_payload_is_ref) .ref else .none;
- const operand = try expr(parent_gz, scope, operand_rl, operand_node);
+ const raw_operand = try expr(parent_gz, scope, operand_rl, operand_node);
+ const cond_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_cond_ref else .switch_cond;
+ const cond = try parent_gz.addUnNode(cond_tag, raw_operand, operand_node);
// We need the type of the operand to use as the result location for all the prong items.
- const typeof_tag: Zir.Inst.Tag = if (any_payload_is_ref) .typeof_elem else .typeof;
- const operand_ty_inst = try parent_gz.addUnNode(typeof_tag, operand, operand_node);
- const item_rl: ResultLoc = .{ .ty = operand_ty_inst };
+ const cond_ty_inst = try parent_gz.addUnNode(.typeof, cond, operand_node);
+ const item_rl: ResultLoc = .{ .ty = cond_ty_inst };
// These contain the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti.
// This is the optional else prong body.
@@ -6053,7 +6046,7 @@ fn switchExpr(
defer block_scope.instructions.deinit(gpa);
// This gets added to the parent block later, after the item expressions.
- const switch_block = try parent_gz.addBlock(undefined, switch_node);
+ const switch_block = try parent_gz.addBlock(.switch_block, switch_node);
// We re-use this same scope for all cases, including the special prong, if any.
var case_scope = parent_gz.makeSubBlock(&block_scope.base);
@@ -6206,44 +6199,32 @@ fn switchExpr(
// Now that the item expressions are generated we can add this.
try parent_gz.instructions.append(gpa, switch_block);
- const ref_bit: u4 = @boolToInt(any_payload_is_ref);
- const multi_bit: u4 = @boolToInt(multi_cases_len != 0);
- const special_prong_bits: u4 = @enumToInt(special_prong);
- comptime {
- assert(@enumToInt(Zir.SpecialProng.none) == 0b00);
- assert(@enumToInt(Zir.SpecialProng.@"else") == 0b01);
- assert(@enumToInt(Zir.SpecialProng.under) == 0b10);
- }
- const zir_tags = astgen.instructions.items(.tag);
- zir_tags[switch_block] = switch ((ref_bit << 3) | (special_prong_bits << 1) | multi_bit) {
- 0b0_00_0 => .switch_block,
- 0b0_00_1 => .switch_block_multi,
- 0b0_01_0 => .switch_block_else,
- 0b0_01_1 => .switch_block_else_multi,
- 0b0_10_0 => .switch_block_under,
- 0b0_10_1 => .switch_block_under_multi,
- 0b1_00_0 => .switch_block_ref,
- 0b1_00_1 => .switch_block_ref_multi,
- 0b1_01_0 => .switch_block_ref_else,
- 0b1_01_1 => .switch_block_ref_else_multi,
- 0b1_10_0 => .switch_block_ref_under,
- 0b1_10_1 => .switch_block_ref_under_multi,
- else => unreachable,
- };
- const payload_index = astgen.extra.items.len;
- const zir_datas = astgen.instructions.items(.data);
- zir_datas[switch_block].pl_node.payload_index = @intCast(u32, payload_index);
- // Documentation for this: `Zir.Inst.SwitchBlock` and `Zir.Inst.SwitchBlockMulti`.
- try astgen.extra.ensureUnusedCapacity(gpa, @as(usize, 2) + // operand, scalar_cases_len
+ try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.SwitchBlock).Struct.fields.len +
@boolToInt(multi_cases_len != 0) +
special_case_payload.items.len +
scalar_cases_payload.items.len +
multi_cases_payload.items.len);
- astgen.extra.appendAssumeCapacity(@enumToInt(operand));
- astgen.extra.appendAssumeCapacity(scalar_cases_len);
+
+ const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.SwitchBlock{
+ .operand = cond,
+ .bits = Zir.Inst.SwitchBlock.Bits{
+ .is_ref = any_payload_is_ref,
+ .has_multi_cases = multi_cases_len != 0,
+ .has_else = special_prong == .@"else",
+ .has_under = special_prong == .under,
+ .scalar_cases_len = @intCast(Zir.Inst.SwitchBlock.Bits.ScalarCasesLen, scalar_cases_len),
+ },
+ });
+
+ const zir_datas = astgen.instructions.items(.data);
+ const zir_tags = astgen.instructions.items(.tag);
+
+ zir_datas[switch_block].pl_node.payload_index = payload_index;
+
if (multi_cases_len != 0) {
astgen.extra.appendAssumeCapacity(multi_cases_len);
}
+
const strat = rl.strategy(&block_scope);
switch (strat.tag) {
.break_operand => {
@@ -6517,7 +6498,8 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
},
.always => {
// Value is always an error. Emit both error defers and regular defers.
- const err_code = try gz.addUnNode(.err_union_code, operand, node);
+ const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
+ const err_code = try gz.addUnNode(.err_union_code, result, node);
try genDefers(gz, defer_outer, scope, .{ .both = err_code });
try gz.addRet(rl, operand, node);
return Zir.Inst.Ref.unreachable_value;
@@ -6532,7 +6514,8 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
}
// Emit conditional branch for generating errdefers.
- const is_non_err = try gz.addUnNode(.is_non_err, operand, node);
+ const result = if (rl == .ptr) try gz.addUnNode(.load, rl.ptr, node) else operand;
+ const is_non_err = try gz.addUnNode(.is_non_err, result, node);
const condbr = try gz.addCondBr(.condbr, node);
var then_scope = gz.makeSubBlock(scope);
@@ -6545,7 +6528,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
defer else_scope.instructions.deinit(astgen.gpa);
const which_ones: DefersToEmit = if (!defer_counts.need_err_code) .both_sans_err else .{
- .both = try else_scope.addUnNode(.err_union_code, operand, node),
+ .both = try else_scope.addUnNode(.err_union_code, result, node),
};
try genDefers(&else_scope, defer_outer, scope, which_ones);
try else_scope.addRet(rl, operand, node);
@@ -6664,7 +6647,7 @@ fn identifier(
);
switch (rl) {
- .ref, .none_or_ref => return ptr_inst,
+ .ref => return ptr_inst,
else => {
const loaded = try gz.addUnNode(.load, ptr_inst, ident);
return rvalue(gz, rl, loaded, ident);
@@ -6700,7 +6683,7 @@ fn identifier(
// Decl references happen by name rather than ZIR index so that when unrelated
// decls are modified, ZIR code containing references to them can be unmodified.
switch (rl) {
- .ref, .none_or_ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
+ .ref => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
const result = try gz.addStrTok(.decl_val, name_str_index, ident_token);
return rvalue(gz, rl, result, ident);
@@ -7105,7 +7088,7 @@ fn as(
) InnerError!Zir.Inst.Ref {
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
- .none, .none_or_ref, .discard, .ref, .ty, .coerced_ty => {
+ .none, .discard, .ref, .ty, .coerced_ty => {
const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node);
return rvalue(gz, rl, result, node);
},
@@ -7128,7 +7111,7 @@ fn unionInit(
const union_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, params[1]);
switch (rl) {
- .none, .none_or_ref, .discard, .ref, .ty, .coerced_ty, .inferred_ptr => {
+ .none, .discard, .ref, .ty, .coerced_ty, .inferred_ptr => {
_ = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
@@ -7189,42 +7172,13 @@ fn bitCast(
lhs: Ast.Node.Index,
rhs: Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
- const astgen = gz.astgen;
- const dest_type = try typeExpr(gz, scope, lhs);
- switch (rl) {
- .none, .none_or_ref, .discard, .ty, .coerced_ty => {
- const operand = try expr(gz, scope, .none, rhs);
- const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
- .lhs = dest_type,
- .rhs = operand,
- });
- return rvalue(gz, rl, result, node);
- },
- .ref => {
- return astgen.failNode(node, "cannot take address of `@bitCast` result", .{});
- },
- .ptr, .inferred_ptr => |result_ptr| {
- return bitCastRlPtr(gz, scope, node, dest_type, result_ptr, rhs);
- },
- .block_ptr => |block| {
- return bitCastRlPtr(gz, scope, node, dest_type, block.rl_ptr, rhs);
- },
- }
-}
-
-fn bitCastRlPtr(
- gz: *GenZir,
- scope: *Scope,
- node: Ast.Node.Index,
- dest_type: Zir.Inst.Ref,
- result_ptr: Zir.Inst.Ref,
- rhs: Ast.Node.Index,
-) InnerError!Zir.Inst.Ref {
- const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
+ const dest_type = try reachableTypeExpr(gz, scope, lhs, node);
+ const operand = try reachableExpr(gz, scope, .none, rhs, node);
+ const result = try gz.addPlNode(.bitcast, node, Zir.Inst.Bin{
.lhs = dest_type,
- .rhs = result_ptr,
+ .rhs = operand,
});
- return expr(gz, scope, .{ .ptr = casted_result_ptr }, rhs);
+ return rvalue(gz, rl, result, node);
}
fn typeOf(
@@ -8383,7 +8337,7 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
}
}
-fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never, always, maybe } {
+fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.EvalToError {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@@ -8560,10 +8514,10 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
.unwrap_optional,
=> node = node_datas[node].lhs,
- // Forward the question to the RHS sub-expression.
+ // LHS sub-expression may still be an error under the outer optional or error union
.@"catch",
.@"orelse",
- => node = node_datas[node].rhs,
+ => return .maybe,
.block_two,
.block_two_semicolon,
@@ -8590,11 +8544,7 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) enum { never
// If the builtin is an invalid name, we don't cause an error here; instead
// let it pass, and the error will be "invalid builtin function" later.
const builtin_info = BuiltinFn.list.get(builtin_name) orelse return .maybe;
- if (builtin_info.tag == .err_set_cast) {
- return .always;
- } else {
- return .never;
- }
+ return builtin_info.eval_to_error;
},
}
}
@@ -8799,7 +8749,7 @@ fn rvalue(
) InnerError!Zir.Inst.Ref {
if (gz.endsWithNoReturn()) return result;
switch (rl) {
- .none, .none_or_ref, .coerced_ty => return result,
+ .none, .coerced_ty => return result,
.discard => {
// Emit a compile error for discarding error values.
_ = try gz.addUnNode(.ensure_result_non_error, result, src_node);
@@ -9561,9 +9511,7 @@ const GenZir = struct {
gz.rl_ty_inst = ty_inst;
gz.break_result_loc = parent_rl;
},
- .none_or_ref => {
- gz.break_result_loc = .ref;
- },
+
.discard, .none, .ptr, .ref => {
gz.break_result_loc = parent_rl;
},
@@ -10627,21 +10575,6 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void {
astgen.source_column = column;
}
-const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len;
-
-fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref {
- return @intToEnum(Zir.Inst.Ref, ref_start_index + inst);
-}
-
-fn refToIndex(inst: Zir.Inst.Ref) ?Zir.Inst.Index {
- const ref_int = @enumToInt(inst);
- if (ref_int >= ref_start_index) {
- return ref_int - ref_start_index;
- } else {
- return null;
- }
-}
-
fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.Node.Index) !void {
const gpa = astgen.gpa;
const tree = astgen.tree;
diff --git a/src/BuiltinFn.zig b/src/BuiltinFn.zig
index e1f4f5bd16..7c5dde03d1 100644
--- a/src/BuiltinFn.zig
+++ b/src/BuiltinFn.zig
@@ -119,10 +119,21 @@ pub const MemLocRequirement = enum {
forward1,
};
+pub const EvalToError = enum {
+ /// The builtin cannot possibly evaluate to an error.
+ never,
+ /// The builtin will always evaluate to an error.
+ always,
+ /// The builtin may or may not evaluate to an error depending on the parameters.
+ maybe,
+};
+
tag: Tag,
/// Info about the builtin call's ability to take advantage of a result location pointer.
needs_mem_loc: MemLocRequirement = .never,
+/// Info about the builtin call's possibility of returning an error.
+eval_to_error: EvalToError = .never,
/// `true` if the builtin call can be the left-hand side of an expression (assigned to).
allows_lvalue: bool = false,
/// The number of parameters to this builtin function. `null` means variable number
@@ -158,6 +169,7 @@ pub const list = list: {
.{
.tag = .as,
.needs_mem_loc = .forward1,
+ .eval_to_error = .maybe,
.param_count = 2,
},
},
@@ -258,6 +270,7 @@ pub const list = list: {
.{
.tag = .call,
.needs_mem_loc = .always,
+ .eval_to_error = .maybe,
.param_count = 3,
},
},
@@ -391,6 +404,7 @@ pub const list = list: {
"@errSetCast",
.{
.tag = .err_set_cast,
+ .eval_to_error = .always,
.param_count = 2,
},
},
@@ -420,6 +434,7 @@ pub const list = list: {
.{
.tag = .field,
.needs_mem_loc = .always,
+ .eval_to_error = .maybe,
.param_count = 2,
.allows_lvalue = true,
},
@@ -512,6 +527,7 @@ pub const list = list: {
"@intToError",
.{
.tag = .int_to_error,
+ .eval_to_error = .always,
.param_count = 1,
},
},
diff --git a/src/Compilation.zig b/src/Compilation.zig
index 3432c38ab5..50d9376c58 100644
--- a/src/Compilation.zig
+++ b/src/Compilation.zig
@@ -55,6 +55,10 @@ c_object_work_queue: std.fifo.LinearFifo(*CObject, .Dynamic),
/// since the last compilation, as well as scan for `@import` and queue up
/// additional jobs corresponding to those new files.
astgen_work_queue: std.fifo.LinearFifo(*Module.File, .Dynamic),
+/// These jobs are to inspect the file system stat() and if the embedded file has changed
+/// on disk, mark the corresponding Decl outdated and queue up an `analyze_decl`
+/// task for it.
+embed_file_work_queue: std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic),
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator.
/// This data is accessed by multiple threads and is protected by `mutex`.
@@ -181,6 +185,10 @@ const Job = union(enum) {
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
analyze_decl: *Module.Decl,
+ /// The file that was loaded with `@embedFile` has changed on disk
+ /// and has been re-loaded into memory. All Decls that depend on it
+ /// need to be re-analyzed.
+ update_embed_file: *Module.EmbedFile,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: *Module.Decl,
@@ -750,6 +758,8 @@ pub const InitOptions = struct {
subsystem: ?std.Target.SubSystem = null,
/// WASI-only. Type of WASI execution model ("command" or "reactor").
wasi_exec_model: ?std.builtin.WasiExecModel = null,
+ /// (Zig compiler development) Enable dumping linker's state as JSON.
+ enable_link_snapshots: bool = false,
};
fn addPackageTableToCacheHash(
@@ -1434,6 +1444,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.is_test = options.is_test,
.wasi_exec_model = wasi_exec_model,
.use_stage1 = use_stage1,
+ .enable_link_snapshots = options.enable_link_snapshots,
});
errdefer bin_file.destroy();
comp.* = .{
@@ -1451,6 +1462,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.work_queue = std.fifo.LinearFifo(Job, .Dynamic).init(gpa),
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa),
+ .embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
.use_clang = use_clang,
.clang_argv = options.clang_argv,
@@ -1636,6 +1648,7 @@ pub fn destroy(self: *Compilation) void {
self.work_queue.deinit();
self.c_object_work_queue.deinit();
self.astgen_work_queue.deinit();
+ self.embed_file_work_queue.deinit();
{
var it = self.crt_files.iterator();
@@ -1751,6 +1764,16 @@ pub fn update(self: *Compilation) !void {
}
if (!use_stage1) {
+ // Put a work item in for checking if any files used with `@embedFile` changed.
+ {
+ try self.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
+ var it = module.embed_table.iterator();
+ while (it.next()) |entry| {
+ const embed_file = entry.value_ptr.*;
+ self.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
+ }
+ }
+
try self.work_queue.writeItem(.{ .analyze_pkg = std_pkg });
if (self.bin_file.options.is_test) {
try self.work_queue.writeItem(.{ .analyze_pkg = module.main_pkg });
@@ -1874,6 +1897,7 @@ pub fn totalErrorCount(self: *Compilation) usize {
if (self.bin_file.options.module) |module| {
total += module.failed_exports.count();
+ total += module.failed_embed_files.count();
{
var it = module.failed_files.iterator();
@@ -1971,6 +1995,13 @@ pub fn getAllErrorsAlloc(self: *Compilation) !AllErrors {
}
}
{
+ var it = module.failed_embed_files.iterator();
+ while (it.next()) |entry| {
+ const msg = entry.value_ptr.*;
+ try AllErrors.add(module, &arena, &errors, msg.*);
+ }
+ }
+ {
var it = module.failed_decls.iterator();
while (it.next()) |entry| {
// Skip errors for Decls within files that had a parse failure.
@@ -2069,6 +2100,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
var c_obj_prog_node = main_progress_node.start("Compile C Objects", self.c_source_files.len);
defer c_obj_prog_node.end();
+ var embed_file_prog_node = main_progress_node.start("Detect @embedFile updates", self.embed_file_work_queue.count);
+ defer embed_file_prog_node.end();
+
self.work_queue_wait_group.reset();
defer self.work_queue_wait_group.wait();
@@ -2083,6 +2117,13 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
});
}
+ while (self.embed_file_work_queue.readItem()) |embed_file| {
+ self.astgen_wait_group.start();
+ try self.thread_pool.spawn(workerCheckEmbedFile, .{
+ self, embed_file, &embed_file_prog_node, &self.astgen_wait_group,
+ });
+ }
+
while (self.c_object_work_queue.readItem()) |c_object| {
self.work_queue_wait_group.start();
try self.thread_pool.spawn(workerUpdateCObject, .{
@@ -2264,6 +2305,15 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
error.AnalysisFail => continue,
};
},
+ .update_embed_file => |embed_file| {
+ if (build_options.omit_stage2)
+ @panic("sadly stage2 is omitted from this build to save memory on the CI server");
+ const module = self.bin_file.options.module.?;
+ module.updateEmbedFile(embed_file) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => continue,
+ };
+ },
.update_line_number => |decl| {
if (build_options.omit_stage2)
@panic("sadly stage2 is omitted from this build to save memory on the CI server");
@@ -2546,6 +2596,29 @@ fn workerAstGenFile(
}
}
+fn workerCheckEmbedFile(
+ comp: *Compilation,
+ embed_file: *Module.EmbedFile,
+ prog_node: *std.Progress.Node,
+ wg: *WaitGroup,
+) void {
+ defer wg.finish();
+
+ var child_prog_node = prog_node.start(embed_file.sub_file_path, 0);
+ child_prog_node.activate();
+ defer child_prog_node.end();
+
+ const mod = comp.bin_file.options.module.?;
+ mod.detectEmbedFileUpdate(embed_file) catch |err| {
+ comp.reportRetryableEmbedFileError(embed_file, err) catch |oom| switch (oom) {
+ // Swallowing this error is OK because it's implied to be OOM when
+ // there is a missing `failed_embed_files` error message.
+ error.OutOfMemory => {},
+ };
+ return;
+ };
+}
+
pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest {
var man = comp.cache_parent.obtain();
@@ -2794,6 +2867,36 @@ fn reportRetryableAstGenError(
}
}
+fn reportRetryableEmbedFileError(
+ comp: *Compilation,
+ embed_file: *Module.EmbedFile,
+ err: anyerror,
+) error{OutOfMemory}!void {
+ const mod = comp.bin_file.options.module.?;
+ const gpa = mod.gpa;
+
+ const src_loc: Module.SrcLoc = embed_file.owner_decl.srcLoc();
+
+ const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path|
+ try Module.ErrorMsg.create(
+ gpa,
+ src_loc,
+ "unable to load '{s}" ++ std.fs.path.sep_str ++ "{s}': {s}",
+ .{ dir_path, embed_file.sub_file_path, @errorName(err) },
+ )
+ else
+ try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{s}': {s}", .{
+ embed_file.sub_file_path, @errorName(err),
+ });
+ errdefer err_msg.destroy(gpa);
+
+ {
+ const lock = comp.mutex.acquire();
+ defer lock.release();
+ try mod.failed_embed_files.putNoClobber(gpa, embed_file, err_msg);
+ }
+}
+
fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void {
if (!build_options.have_llvm) {
return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{});
diff --git a/src/Liveness.zig b/src/Liveness.zig
index 6f7c938f4c..499500fddb 100644
--- a/src/Liveness.zig
+++ b/src/Liveness.zig
@@ -233,7 +233,10 @@ fn analyzeInst(
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.ptr_add,
@@ -252,9 +255,7 @@ fn analyzeInst(
.store,
.array_elem_val,
.slice_elem_val,
- .ptr_slice_elem_val,
.ptr_elem_val,
- .ptr_ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
@@ -300,6 +301,8 @@ fn analyzeInst(
.wrap_errunion_err,
.slice_ptr,
.slice_len,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -359,7 +362,7 @@ fn analyzeInst(
const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_operand, .none, .none });
},
- .ptr_elem_ptr => {
+ .ptr_elem_ptr, .slice_elem_ptr, .slice => {
const extra = a.air.extraData(Air.Bin, inst_datas[inst].ty_pl.payload).data;
return trackOperands(a, new_set, inst, main_tomb, .{ extra.lhs, extra.rhs, .none });
},
diff --git a/src/Module.zig b/src/Module.zig
index a42ec3c2e1..de6770d3d7 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -55,11 +55,17 @@ decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// is performing the export of another Decl.
/// This table owns the Export memory.
export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
-/// The set of all the files in the Module. We keep track of this in order to iterate
-/// over it and check which source files have been modified on the file system when
+/// The set of all the Zig source files in the Module. We keep track of this in order
+/// to iterate over it and check which source files have been modified on the file system when
/// an update is requested, as well as to cache `@import` results.
/// Keys are fully resolved file paths. This table owns the keys and values.
import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
+/// The set of all the files which have been loaded with `@embedFile` in the Module.
+/// We keep track of this in order to iterate over it and check which files have been
+/// modified on the file system when an update is requested, as well as to cache
+/// `@embedFile` results.
+/// Keys are fully resolved file paths. This table owns the keys and values.
+embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
@@ -87,6 +93,8 @@ compile_log_decls: std.AutoArrayHashMapUnmanaged(*Decl, i32) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{},
+/// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator.
+failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
@@ -309,6 +317,7 @@ pub const WipCaptureScope = struct {
assert(!self.finalized);
// use a temp to avoid unintentional aliasing due to RLS
const tmp = try self.scope.captures.clone(self.perm_arena);
+ self.scope.captures.deinit(self.gpa);
self.scope.captures = tmp;
self.finalized = true;
}
@@ -763,6 +772,17 @@ pub const Decl = struct {
else => false,
};
}
+
+ pub fn getAlignment(decl: Decl, target: Target) u32 {
+ assert(decl.has_tv);
+ if (decl.align_val.tag() != .null_value) {
+ // Explicit alignment.
+ return @intCast(u32, decl.align_val.toUnsignedInt());
+ } else {
+ // Natural alignment.
+ return decl.ty.abiAlignment(target);
+ }
+ }
};
/// This state is attached to every Decl when Module emit_h is non-null.
@@ -782,6 +802,10 @@ pub const ErrorSet = struct {
/// The length is given by `names_len`.
names_ptr: [*]const []const u8,
+ pub fn names(self: ErrorSet) []const []const u8 {
+ return self.names_ptr[0..self.names_len];
+ }
+
pub fn srcLoc(self: ErrorSet) SrcLoc {
return .{
.file_scope = self.owner_decl.getFileScope(),
@@ -864,10 +888,12 @@ pub const EnumSimple = struct {
/// The Decl that corresponds to the enum itself.
owner_decl: *Decl,
/// Set of field names in declaration order.
- fields: std.StringArrayHashMapUnmanaged(void),
+ fields: NameMap,
/// Offset from `owner_decl`, points to the enum decl AST node.
node_offset: i32,
+ pub const NameMap = EnumFull.NameMap;
+
pub fn srcLoc(self: EnumSimple) SrcLoc {
return .{
.file_scope = self.owner_decl.getFileScope(),
@@ -1530,6 +1556,23 @@ pub const File = struct {
}
};
+/// Represents the contents of a file loaded with `@embedFile`.
+pub const EmbedFile = struct {
+ /// Relative to the owning package's root_src_dir.
+ /// Memory is stored in gpa, owned by EmbedFile.
+ sub_file_path: []const u8,
+ bytes: [:0]const u8,
+ stat_size: u64,
+ stat_inode: std.fs.File.INode,
+ stat_mtime: i128,
+ /// Package that this file is a part of, managed externally.
+ pkg: *Package,
+ /// The Decl that was created from the `@embedFile` to own this resource.
+ /// This is how zig knows what other Decl objects to invalidate if the file
+ /// changes on disk.
+ owner_decl: *Decl,
+};
+
/// This struct holds data necessary to construct API-facing `AllErrors.Message`.
/// Its memory is managed with the general purpose allocator so that they
/// can be created and destroyed in response to incremental updates.
@@ -2360,6 +2403,11 @@ pub fn deinit(mod: *Module) void {
}
mod.failed_files.deinit(gpa);
+ for (mod.failed_embed_files.values()) |msg| {
+ msg.destroy(gpa);
+ }
+ mod.failed_embed_files.deinit(gpa);
+
for (mod.failed_exports.values()) |value| {
value.destroy(gpa);
}
@@ -3056,6 +3104,32 @@ pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void {
}
}
+pub fn updateEmbedFile(mod: *Module, embed_file: *EmbedFile) SemaError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // TODO we can potentially relax this if we store some more information along
+ // with decl dependency edges
+ for (embed_file.owner_decl.dependants.keys()) |dep| {
+ switch (dep.analysis) {
+ .unreferenced => unreachable,
+ .in_progress => continue, // already doing analysis, ok
+ .outdated => continue, // already queued for update
+
+ .file_failure,
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => if (dep.generation != mod.generation) {
+ try mod.markOutdatedDecl(dep);
+ },
+ }
+ }
+}
+
pub fn semaPkg(mod: *Module, pkg: *Package) !void {
const file = (try mod.importPkg(pkg)).file;
return mod.semaFile(file);
@@ -3547,6 +3621,84 @@ pub fn importFile(
};
}
+pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*EmbedFile {
+ const gpa = mod.gpa;
+
+ // The resolved path is used as the key in the table, to detect if
+ // a file refers to the same as another, despite different relative paths.
+ const cur_pkg_dir_path = cur_file.pkg.root_src_directory.path orelse ".";
+ const resolved_path = try std.fs.path.resolve(gpa, &[_][]const u8{
+ cur_pkg_dir_path, cur_file.sub_file_path, "..", rel_file_path,
+ });
+ var keep_resolved_path = false;
+ defer if (!keep_resolved_path) gpa.free(resolved_path);
+
+ const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
+ if (gop.found_existing) return gop.value_ptr.*;
+ keep_resolved_path = true; // It's now owned by embed_table.
+
+ const new_file = try gpa.create(EmbedFile);
+ errdefer gpa.destroy(new_file);
+
+ const resolved_root_path = try std.fs.path.resolve(gpa, &[_][]const u8{cur_pkg_dir_path});
+ defer gpa.free(resolved_root_path);
+
+ if (!mem.startsWith(u8, resolved_path, resolved_root_path)) {
+ return error.ImportOutsidePkgPath;
+ }
+ // +1 for the directory separator here.
+ const sub_file_path = try gpa.dupe(u8, resolved_path[resolved_root_path.len + 1 ..]);
+ errdefer gpa.free(sub_file_path);
+
+ var file = try cur_file.pkg.root_src_directory.handle.openFile(sub_file_path, .{});
+ defer file.close();
+
+ const stat = try file.stat();
+ const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
+
+ log.debug("new embedFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, rel_file_path={s}", .{
+ resolved_root_path, resolved_path, sub_file_path, rel_file_path,
+ });
+
+ gop.value_ptr.* = new_file;
+ new_file.* = .{
+ .sub_file_path = sub_file_path,
+ .bytes = bytes,
+ .stat_size = stat.size,
+ .stat_inode = stat.inode,
+ .stat_mtime = stat.mtime,
+ .pkg = cur_file.pkg,
+ .owner_decl = undefined, // Set by Sema immediately after this function returns.
+ };
+ return new_file;
+}
+
+pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
+ var file = try embed_file.pkg.root_src_directory.handle.openFile(embed_file.sub_file_path, .{});
+ defer file.close();
+
+ const stat = try file.stat();
+
+ const unchanged_metadata =
+ stat.size == embed_file.stat_size and
+ stat.mtime == embed_file.stat_mtime and
+ stat.inode == embed_file.stat_inode;
+
+ if (unchanged_metadata) return;
+
+ const gpa = mod.gpa;
+ const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
+ gpa.free(embed_file.bytes);
+ embed_file.bytes = bytes;
+ embed_file.stat_size = stat.size;
+ embed_file.stat_mtime = stat.mtime;
+ embed_file.stat_inode = stat.inode;
+
+ const lock = mod.comp.mutex.acquire();
+ defer lock.release();
+ try mod.comp.work_queue.writeItem(.{ .update_embed_file = embed_file });
+}
+
pub fn scanNamespace(
mod: *Module,
namespace: *Namespace,
@@ -4079,7 +4231,13 @@ fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
decl.analysis = .outdated;
}
-pub fn allocateNewDecl(mod: *Module, name: [:0]const u8, namespace: *Namespace, src_node: Ast.Node.Index, src_scope: ?*CaptureScope) !*Decl {
+pub fn allocateNewDecl(
+ mod: *Module,
+ name: [:0]const u8,
+ namespace: *Namespace,
+ src_node: Ast.Node.Index,
+ src_scope: ?*CaptureScope,
+) !*Decl {
// If we have emit-h then we must allocate a bigger structure to store the emit-h state.
const new_decl: *Decl = if (mod.emit_h != null) blk: {
const parent_struct = try mod.gpa.create(DeclPlusEmitH);
@@ -4101,7 +4259,7 @@ pub fn allocateNewDecl(mod: *Module, name: [:0]const u8, namespace: *Namespace,
.val = undefined,
.align_val = undefined,
.linksection_val = undefined,
- .@"addrspace" = undefined,
+ .@"addrspace" = .generic,
.analysis = .unreferenced,
.deletion_flag = false,
.zir_decl_index = 0,
@@ -4205,7 +4363,6 @@ pub fn createAnonymousDeclFromDeclNamed(
new_decl.val = typed_value.val;
new_decl.align_val = Value.initTag(.null_value);
new_decl.linksection_val = Value.initTag(.null_value);
- new_decl.@"addrspace" = .generic; // default global addrspace
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
diff --git a/src/Sema.zig b/src/Sema.zig
index 229ae054b2..e7cb40420e 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -224,6 +224,16 @@ pub const Block = struct {
});
}
+ pub fn addBitCast(block: *Block, ty: Type, operand: Air.Inst.Ref) Allocator.Error!Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = try block.sema.addType(ty),
+ .operand = operand,
+ } },
+ });
+ }
+
pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref {
return block.addInst(.{
.tag = tag,
@@ -300,7 +310,7 @@ pub const Block = struct {
.ty = ty,
.payload = try block.sema.addExtra(Air.StructField{
.struct_operand = struct_ptr,
- .field_index = @intCast(u32, field_index),
+ .field_index = field_index,
}),
} },
});
@@ -315,6 +325,60 @@ pub const Block = struct {
});
}
+ pub fn addStructFieldVal(
+ block: *Block,
+ struct_val: Air.Inst.Ref,
+ field_index: u32,
+ field_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .struct_field_val,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(field_ty),
+ .payload = try block.sema.addExtra(Air.StructField{
+ .struct_operand = struct_val,
+ .field_index = field_index,
+ }),
+ } },
+ });
+ }
+
+ pub fn addSliceElemPtr(
+ block: *Block,
+ slice: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_ptr_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .slice_elem_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(elem_ptr_ty),
+ .payload = try block.sema.addExtra(Air.Bin{
+ .lhs = slice,
+ .rhs = elem_index,
+ }),
+ } },
+ });
+ }
+
+ pub fn addPtrElemPtr(
+ block: *Block,
+ array_ptr: Air.Inst.Ref,
+ elem_index: Air.Inst.Ref,
+ elem_ptr_ty: Type,
+ ) !Air.Inst.Ref {
+ return block.addInst(.{
+ .tag = .ptr_elem_ptr,
+ .data = .{ .ty_pl = .{
+ .ty = try block.sema.addType(elem_ptr_ty),
+ .payload = try block.sema.addExtra(Air.Bin{
+ .lhs = array_ptr,
+ .rhs = elem_index,
+ }),
+ } },
+ });
+ }
+
pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref {
return Air.indexToRef(try block.addInstAsIndex(inst));
}
@@ -332,6 +396,14 @@ pub const Block = struct {
return result_index;
}
+ fn addUnreachable(block: *Block, src: LazySrcLoc, safety_check: bool) !void {
+ if (safety_check and block.wantSafety()) {
+ _ = try block.sema.safetyPanic(block, src, .unreach);
+ } else {
+ _ = try block.addNoOp(.unreach);
+ }
+ }
+
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
return WipAnonDecl{
.block = block,
@@ -459,7 +531,6 @@ pub fn analyzeBody(
.bit_not => try sema.zirBitNot(block, inst),
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
.bitcast => try sema.zirBitcast(block, inst),
- .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst),
.suspend_block => try sema.zirSuspendBlock(block, inst),
.bool_not => try sema.zirBoolNot(block, inst),
.bool_br_and => try sema.zirBoolBr(block, inst, false),
@@ -532,18 +603,9 @@ pub fn analyzeBody(
.slice_sentinel => try sema.zirSliceSentinel(block, inst),
.slice_start => try sema.zirSliceStart(block, inst),
.str => try sema.zirStr(block, inst),
- .switch_block => try sema.zirSwitchBlock(block, inst, false, .none),
- .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none),
- .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"),
- .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"),
- .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under),
- .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under),
- .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none),
- .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none),
- .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"),
- .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"),
- .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under),
- .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under),
+ .switch_block => try sema.zirSwitchBlock(block, inst),
+ .switch_cond => try sema.zirSwitchCond(block, inst, false),
+ .switch_cond_ref => try sema.zirSwitchCond(block, inst, true),
.switch_capture => try sema.zirSwitchCapture(block, inst, false, false),
.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true),
.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false),
@@ -554,7 +616,6 @@ pub fn analyzeBody(
.size_of => try sema.zirSizeOf(block, inst),
.bit_size_of => try sema.zirBitSizeOf(block, inst),
.typeof => try sema.zirTypeof(block, inst),
- .typeof_elem => try sema.zirTypeofElem(block, inst),
.log2_int_type => try sema.zirLog2IntType(block, inst),
.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst),
.xor => try sema.zirBitwise(block, inst, .xor),
@@ -596,9 +657,6 @@ pub fn analyzeBody(
.pop_count => try sema.zirPopCount(block, inst),
.byte_swap => try sema.zirByteSwap(block, inst),
.bit_reverse => try sema.zirBitReverse(block, inst),
- .div_exact => try sema.zirDivExact(block, inst),
- .div_floor => try sema.zirDivFloor(block, inst),
- .div_trunc => try sema.zirDivTrunc(block, inst),
.shr_exact => try sema.zirShrExact(block, inst),
.bit_offset_of => try sema.zirBitOffsetOf(block, inst),
.offset_of => try sema.zirOffsetOf(block, inst),
@@ -638,19 +696,22 @@ pub fn analyzeBody(
.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon),
.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func),
- .add => try sema.zirArithmetic(block, inst, .add),
- .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
- .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
- .div => try sema.zirArithmetic(block, inst, .div),
- .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
- .mod => try sema.zirArithmetic(block, inst, .mod),
- .rem => try sema.zirArithmetic(block, inst, .rem),
- .mul => try sema.zirArithmetic(block, inst, .mul),
- .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
- .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
- .sub => try sema.zirArithmetic(block, inst, .sub),
- .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
- .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
+ .add => try sema.zirArithmetic(block, inst, .add),
+ .addwrap => try sema.zirArithmetic(block, inst, .addwrap),
+ .add_sat => try sema.zirArithmetic(block, inst, .add_sat),
+ .div => try sema.zirArithmetic(block, inst, .div),
+ .div_exact => try sema.zirArithmetic(block, inst, .div_exact),
+ .div_floor => try sema.zirArithmetic(block, inst, .div_floor),
+ .div_trunc => try sema.zirArithmetic(block, inst, .div_trunc),
+ .mod_rem => try sema.zirArithmetic(block, inst, .mod_rem),
+ .mod => try sema.zirArithmetic(block, inst, .mod),
+ .rem => try sema.zirArithmetic(block, inst, .rem),
+ .mul => try sema.zirArithmetic(block, inst, .mul),
+ .mulwrap => try sema.zirArithmetic(block, inst, .mulwrap),
+ .mul_sat => try sema.zirArithmetic(block, inst, .mul_sat),
+ .sub => try sema.zirArithmetic(block, inst, .sub),
+ .subwrap => try sema.zirArithmetic(block, inst, .subwrap),
+ .sub_sat => try sema.zirArithmetic(block, inst, .sub_sat),
.maximum => try sema.zirMinMax(block, inst, .max),
.minimum => try sema.zirMinMax(block, inst, .min),
@@ -1179,6 +1240,22 @@ fn failWithModRemNegative(sema: *Sema, block: *Block, src: LazySrcLoc, lhs_ty: T
return sema.fail(block, src, "remainder division with '{}' and '{}': signed integers and floats must use @rem or @mod", .{ lhs_ty, rhs_ty });
}
+fn failWithExpectedOptionalType(sema: *Sema, block: *Block, src: LazySrcLoc, optional_ty: Type) CompileError {
+ return sema.fail(block, src, "expected optional type, found {}", .{optional_ty});
+}
+
+fn failWithErrorSetCodeMissing(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ dest_err_set_ty: Type,
+ src_err_set_ty: Type,
+) CompileError {
+ return sema.fail(block, src, "expected type '{}', found type '{}'", .{
+ dest_err_set_ty, src_err_set_ty,
+ });
+}
+
/// We don't return a pointer to the new error note because the pointer
/// becomes invalid when you add another one.
fn errNote(
@@ -1328,12 +1405,6 @@ pub fn resolveInstValue(
};
}
-fn zirBitcastResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO implement zir_sema.zirBitcastResultPtr", .{});
-}
-
fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -1359,7 +1430,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
// for the inferred allocation.
// This instruction will not make it to codegen; it is only to participate
// in the `stored_inst_list` of the `inferred_alloc`.
- const operand = try block.addTyOp(.bitcast, pointee_ty, .void_value);
+ const operand = try block.addBitCast(pointee_ty, .void_value);
try inferred_alloc.stored_inst_list.append(sema.arena, operand);
},
.inferred_alloc_comptime => {
@@ -1386,7 +1457,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
}
}
try sema.requireRuntimeBlock(block, src);
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return bitcasted_ptr;
}
@@ -1880,7 +1951,7 @@ fn zirRetPtr(
try sema.requireFunctionBlock(block, src);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty);
+ return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty, 0);
}
const ptr_type = try Type.ptr(sema.arena, .{
@@ -1956,44 +2027,38 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const array = sema.resolveInst(inst_data.operand);
- const array_ty = sema.typeOf(array);
+ const object = sema.resolveInst(inst_data.operand);
+ const object_ty = sema.typeOf(object);
- if (array_ty.isSlice()) {
- return sema.analyzeSliceLen(block, src, array);
- }
+ const is_pointer_to = object_ty.isSinglePointer();
- if (array_ty.isSinglePointer()) {
- const elem_ty = array_ty.elemType();
- if (elem_ty.isSlice()) {
- const slice_inst = try sema.analyzeLoad(block, src, array, src);
- return sema.analyzeSliceLen(block, src, slice_inst);
- }
- if (!elem_ty.isIndexable()) {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "type '{}' does not support indexing",
- .{elem_ty},
- );
- errdefer msg.destroy(sema.gpa);
- try sema.errNote(
- block,
- src,
- msg,
- "for loop operand must be an array, slice, tuple, or vector",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- }
- const result_ptr = try sema.fieldPtr(block, src, array, "len", src);
- return sema.analyzeLoad(block, src, result_ptr, src);
+ const array_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ if (!array_ty.isIndexable()) {
+ const msg = msg: {
+ const msg = try sema.errMsg(
+ block,
+ src,
+ "type '{}' does not support indexing",
+ .{array_ty},
+ );
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(
+ block,
+ src,
+ msg,
+ "for loop operand must be an array, slice, tuple, or vector",
+ .{},
+ );
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
}
- return sema.fail(block, src, "TODO implement Sema.zirIndexablePtrLen", .{});
+ return sema.fieldVal(block, src, object, "len", src);
}
fn zirAllocExtended(
@@ -2013,9 +2078,7 @@ fn zirAllocExtended(
const type_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
extra_index += 1;
break :blk try sema.resolveType(block, ty_src, type_ref);
- } else {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended inferred", .{});
- };
+ } else undefined;
const alignment: u16 = if (small.has_align) blk: {
const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -2024,22 +2087,47 @@ fn zirAllocExtended(
break :blk alignment;
} else 0;
+ const inferred_alloc_ty = if (small.is_const)
+ Type.initTag(.inferred_alloc_const)
+ else
+ Type.initTag(.inferred_alloc_mut);
+
if (small.is_comptime) {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended comptime", .{});
+ if (small.has_type) {
+ return sema.analyzeComptimeAlloc(block, var_ty, alignment);
+ } else {
+ return sema.addConstant(
+ inferred_alloc_ty,
+ try Value.Tag.inferred_alloc_comptime.create(sema.arena, undefined),
+ );
+ }
}
- if (!small.is_const) {
- return sema.fail(block, src, "TODO implement Sema.zirAllocExtended var", .{});
+ if (small.has_type) {
+ if (!small.is_const) {
+ try sema.validateVarType(block, ty_src, var_ty, false);
+ }
+ const ptr_type = try Type.ptr(sema.arena, .{
+ .pointee_type = var_ty,
+ .@"align" = alignment,
+ .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
+ });
+ try sema.requireRuntimeBlock(block, src);
+ try sema.resolveTypeLayout(block, src, var_ty);
+ return block.addTy(.alloc, ptr_type);
}
- const ptr_type = try Type.ptr(sema.arena, .{
- .pointee_type = var_ty,
- .@"align" = alignment,
- .@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
- });
- try sema.requireRuntimeBlock(block, src);
- try sema.resolveTypeLayout(block, src, var_ty);
- return block.addTy(.alloc, ptr_type);
+ // `Sema.addConstant` does not add the instruction to the block because it is
+ // not needed in the case of constant values. However here, we plan to "downgrade"
+ // to a normal instruction when we hit `resolve_inferred_alloc`. So we append
+ // to the block even though it is currently a `.constant`.
+ const result = try sema.addConstant(
+ inferred_alloc_ty,
+ try Value.Tag.inferred_alloc.create(sema.arena, .{}),
+ );
+ try sema.requireFunctionBlock(block, src);
+ try block.instructions.append(sema.gpa, Air.refToIndex(result).?);
+ return result;
}
fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -2049,7 +2137,7 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
fn zirAllocInferredComptime(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -2071,7 +2159,7 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const var_decl_src = inst_data.src();
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
const ptr_type = try Type.ptr(sema.arena, .{
.pointee_type = var_ty,
@@ -2091,7 +2179,7 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node };
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.is_comptime) {
- return sema.analyzeComptimeAlloc(block, var_ty);
+ return sema.analyzeComptimeAlloc(block, var_ty, 0);
}
try sema.validateVarType(block, ty_src, var_ty, false);
const ptr_type = try Type.ptr(sema.arena, .{
@@ -2256,11 +2344,21 @@ fn validateUnionInit(
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
const field_index = @intCast(u32, field_index_big);
- // TODO here we need to go back and see if we need to convert the union
- // to a comptime-known value. This will involve editing the AIR code we have
- // generated so far - in particular deleting some runtime pointer bitcast
- // instructions which are not actually needed if the initialization expression
- // ends up being comptime-known.
+ // Handle the possibility of the union value being comptime-known.
+ const union_ptr_inst = Air.refToIndex(sema.resolveInst(field_ptr_extra.lhs)).?;
+ switch (sema.air_instructions.items(.tag)[union_ptr_inst]) {
+ .constant => return, // In this case the tag has already been set. No validation to do.
+ .bitcast => {
+ // TODO here we need to go back and see if we need to convert the union
+ // to a comptime-known value. In such case, we must delete all the instructions
+ // added to the current block starting with the bitcast.
+ // If the bitcast result ptr is an alloc, the alloc should be replaced with
+ // a constant decl_ref.
+ // Otherwise, the bitcast should be preserved and a store instruction should be
+ // emitted to store the constant union value through the bitcast.
+ },
+ else => unreachable,
+ }
// Otherwise, we set the new union tag now.
const new_tag = try sema.addConstant(
@@ -2292,7 +2390,7 @@ fn validateStructInit(
const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_ptr_extra.field_name_start);
const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
if (found_fields[field_index] != 0) {
const other_field_ptr = found_fields[field_index];
const other_field_ptr_data = sema.code.instructions.items(.data)[other_field_ptr].pl_node;
@@ -2354,7 +2452,32 @@ fn zirValidateArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil
}
}
-fn failWithBadFieldAccess(
+fn failWithBadMemberAccess(
+ sema: *Sema,
+ block: *Block,
+ agg_ty: Type,
+ field_src: LazySrcLoc,
+ field_name: []const u8,
+) CompileError {
+ const kw_name = switch (agg_ty.zigTypeTag()) {
+ .Union => "union",
+ .Struct => "struct",
+ .Opaque => "opaque",
+ .Enum => "enum",
+ else => unreachable,
+ };
+ const msg = msg: {
+ const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{
+ kw_name, agg_ty, field_name,
+ });
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, agg_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+}
+
+fn failWithBadStructFieldAccess(
sema: *Sema,
block: *Block,
struct_obj: *Module.Struct,
@@ -2440,7 +2563,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
// if expressions should force it when the condition is compile-time known.
const src: LazySrcLoc = .unneeded;
try sema.requireRuntimeBlock(block, src);
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, value);
}
@@ -2486,7 +2609,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
.pointee_type = operand_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
- const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr);
+ const bitcasted_ptr = try block.addBitCast(ptr_ty, ptr);
return sema.storePtr(block, src, bitcasted_ptr, operand);
}
unreachable;
@@ -3985,18 +4108,20 @@ fn analyzeCall(
zir_tags,
);
} else res: {
+ try sema.requireRuntimeBlock(block, call_src);
+
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
for (uncasted_args) |uncasted_arg, i| {
+ const arg_src = call_src; // TODO: better source location
if (i < fn_params_len) {
const param_ty = func_ty.fnParamType(i);
- const arg_src = call_src; // TODO: better source location
+ try sema.resolveTypeLayout(block, arg_src, param_ty);
args[i] = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
} else {
args[i] = uncasted_arg;
}
}
- try sema.requireRuntimeBlock(block, call_src);
try sema.resolveTypeLayout(block, call_src, func_ty_info.return_type);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
@@ -4067,6 +4192,7 @@ fn finishGenericCall(
const param_ty = new_fn_ty.fnParamType(runtime_i);
const arg_src = call_src; // TODO: better source location
const uncasted_arg = uncasted_args[total_i];
+ try sema.resolveTypeLayout(block, arg_src, param_ty);
const casted_arg = try sema.coerce(block, param_ty, uncasted_arg, arg_src);
runtime_args[runtime_i] = casted_arg;
runtime_i += 1;
@@ -4241,7 +4367,7 @@ fn zirErrorToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.bitcast, result_ty, op_coerced);
+ return block.addBitCast(result_ty, op_coerced);
}
fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4271,7 +4397,13 @@ fn zirIntToError(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
// const is_gt_max = @panic("TODO get max errors in compilation");
// try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code);
}
- return block.addTyOp(.bitcast, Type.anyerror, op);
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = Air.Inst.Ref.anyerror_type,
+ .operand = op,
+ } },
+ });
}
fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4414,7 +4546,7 @@ fn zirEnumToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
}
try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.bitcast, int_tag_ty, enum_tag);
+ return block.addBitCast(int_tag_ty, enum_tag);
}
fn zirIntToEnum(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -4494,7 +4626,7 @@ fn zirOptionalPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, optional_ptr_ty)) |val| {
if (val.isNull()) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
@@ -4528,19 +4660,23 @@ fn zirOptionalPayload(
const src = inst_data.src();
const operand = sema.resolveInst(inst_data.operand);
const operand_ty = sema.typeOf(operand);
- const opt_type = operand_ty;
- if (opt_type.zigTypeTag() != .Optional) {
- return sema.fail(block, src, "expected optional type, found {}", .{opt_type});
- }
-
- const child_type = try opt_type.optionalChildAlloc(sema.arena);
+ const result_ty = switch (operand_ty.zigTypeTag()) {
+ .Optional => try operand_ty.optionalChildAlloc(sema.arena),
+ .Pointer => t: {
+ if (operand_ty.ptrSize() != .C) {
+ return sema.failWithExpectedOptionalType(block, src, operand_ty);
+ }
+ break :t operand_ty;
+ },
+ else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
+ };
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.isNull()) {
return sema.fail(block, src, "unable to unwrap null", .{});
}
const sub_val = val.castTag(.opt_payload).?.data;
- return sema.addConstant(child_type, sub_val);
+ return sema.addConstant(result_ty, sub_val);
}
try sema.requireRuntimeBlock(block, src);
@@ -4548,7 +4684,7 @@ fn zirOptionalPayload(
const is_non_null = try block.addUnOp(.is_non_null, operand);
try sema.addSafetyCheck(block, is_non_null, .unwrap_null);
}
- return block.addTyOp(.optional_payload, child_type, operand);
+ return block.addTyOp(.optional_payload, result_ty, operand);
}
/// Value in, value out
@@ -4613,7 +4749,7 @@ fn zirErrUnionPayloadPtr(
});
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
if (val.getError()) |name| {
return sema.fail(block, src, "caught unexpected error '{s}'", .{name});
}
@@ -4672,7 +4808,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const result_ty = operand_ty.elemType().errorUnionSet();
if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| {
- if (try pointer_val.pointerDeref(sema.arena)) |val| {
+ if (try sema.pointerDeref(block, src, pointer_val, operand_ty)) |val| {
assert(val.getError() != null);
return sema.addConstant(result_ty, val);
}
@@ -4731,7 +4867,7 @@ fn zirFunc(
body_inst,
ret_ty_body,
cc,
- Value.initTag(.null_value),
+ Value.@"null",
false,
inferred_error_set,
false,
@@ -4845,6 +4981,8 @@ fn funcCommon(
const error_set_ty = try Type.Tag.error_set_inferred.create(sema.arena, .{
.func = new_func,
.map = .{},
+ .functions = .{},
+ .is_anyerror = false,
});
break :blk try Type.Tag.error_union.create(sema.arena, .{
.error_set = error_set_ty,
@@ -5105,16 +5243,10 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node };
- const lhs_src: LazySrcLoc = src; // TODO
const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(extra.field_name_start);
const object = sema.resolveInst(extra.lhs);
- if (sema.typeOf(object).isSinglePointer()) {
- const result_ptr = try sema.fieldPtr(block, src, object, field_name, field_name_src);
- return sema.analyzeLoad(block, src, result_ptr, lhs_src);
- } else {
- return sema.fieldVal(block, src, object, field_name, field_name_src);
- }
+ return sema.fieldVal(block, src, object, field_name, field_name_src);
}
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -5388,11 +5520,80 @@ fn zirSwitchCapture(
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
- const src = switch_info.src();
+ const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index);
+ const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = switch_info.src_node };
+ const switch_src = switch_info.src();
+ const operand_is_ref = switch_extra.data.bits.is_ref;
+ const cond_inst = Zir.refToIndex(switch_extra.data.operand).?;
+ const cond_info = sema.code.instructions.items(.data)[cond_inst].un_node;
+ const operand_ptr = sema.resolveInst(cond_info.operand);
+ const operand_ptr_ty = sema.typeOf(operand_ptr);
+ const operand_ty = if (operand_is_ref) operand_ptr_ty.childType() else operand_ptr_ty;
+
+ if (is_multi) {
+ return sema.fail(block, switch_src, "TODO implement Sema for switch capture multi", .{});
+ }
+ const scalar_prong = switch_extra.data.getScalarProng(sema.code, switch_extra.end, capture_info.prong_index);
+ const item = sema.resolveInst(scalar_prong.item);
+ // Previous switch validation ensured this will succeed
+ const item_val = sema.resolveConstValue(block, .unneeded, item) catch unreachable;
- _ = is_ref;
- _ = is_multi;
- return sema.fail(block, src, "TODO implement Sema for zirSwitchCapture", .{});
+ switch (operand_ty.zigTypeTag()) {
+ .Union => {
+ const union_obj = operand_ty.cast(Type.Payload.Union).?.data;
+ const enum_ty = union_obj.tag_ty;
+
+ const field_index_usize = enum_ty.enumTagFieldIndex(item_val).?;
+ const field_index = @intCast(u32, field_index_usize);
+ const field = union_obj.fields.values()[field_index];
+
+ // TODO handle multiple union tags which have compatible types
+
+ if (is_ref) {
+ assert(operand_is_ref);
+
+ const field_ty_ptr = try Type.ptr(sema.arena, .{
+ .pointee_type = field.ty,
+ .@"addrspace" = .generic,
+ .mutable = operand_ptr_ty.ptrIsMutable(),
+ });
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| {
+ return sema.addConstant(
+ field_ty_ptr,
+ try Value.Tag.field_ptr.create(sema.arena, .{
+ .container_ptr = op_ptr_val,
+ .field_index = field_index,
+ }),
+ );
+ }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addStructFieldPtr(operand_ptr, field_index, field_ty_ptr);
+ }
+
+ const operand = if (operand_is_ref)
+ try sema.analyzeLoad(block, operand_src, operand_ptr, operand_src)
+ else
+ operand_ptr;
+
+ if (try sema.resolveDefinedValue(block, operand_src, operand)) |operand_val| {
+ return sema.addConstant(
+ field.ty,
+ operand_val.castTag(.@"union").?.data.val,
+ );
+ }
+ try sema.requireRuntimeBlock(block, operand_src);
+ return block.addStructFieldVal(operand, field_index, field.ty);
+ },
+ .ErrorSet => {
+ return sema.fail(block, operand_src, "TODO implement Sema for zirSwitchCapture for error sets", .{});
+ },
+ else => {
+ return sema.fail(block, operand_src, "switch on type '{}' provides no capture value", .{
+ operand_ty,
+ });
+ },
+ }
}
fn zirSwitchCaptureElse(
@@ -5407,96 +5608,104 @@ fn zirSwitchCaptureElse(
const zir_datas = sema.code.instructions.items(.data);
const capture_info = zir_datas[inst].switch_capture;
const switch_info = zir_datas[capture_info.switch_inst].pl_node;
+ const switch_extra = sema.code.extraData(Zir.Inst.SwitchBlock, switch_info.payload_index).data;
const src = switch_info.src();
+ const operand_is_ref = switch_extra.bits.is_ref;
+ assert(!is_ref or operand_is_ref);
- _ = is_ref;
return sema.fail(block, src, "TODO implement Sema for zirSwitchCaptureElse", .{});
}
-fn zirSwitchBlock(
+fn zirSwitchCond(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
- special_prong: Zir.SpecialProng,
) CompileError!Air.Inst.Ref {
- const tracy = trace(@src());
- defer tracy.end();
-
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
+ const operand_ptr = sema.resolveInst(inst_data.operand);
+ const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, src) else operand_ptr;
+ const operand_ty = sema.typeOf(operand);
- const operand_ptr = sema.resolveInst(extra.data.operand);
- const operand = if (is_ref)
- try sema.analyzeLoad(block, src, operand_ptr, operand_src)
- else
- operand_ptr;
+ switch (operand_ty.zigTypeTag()) {
+ .Type,
+ .Void,
+ .Bool,
+ .Int,
+ .Float,
+ .ComptimeFloat,
+ .ComptimeInt,
+ .EnumLiteral,
+ .Pointer,
+ .Fn,
+ .ErrorSet,
+ .Enum,
+ => {
+ if ((try sema.typeHasOnePossibleValue(block, src, operand_ty))) |opv| {
+ return sema.addConstant(operand_ty, opv);
+ }
+ return operand;
+ },
- return sema.analyzeSwitch(
- block,
- operand,
- extra.end,
- special_prong,
- extra.data.cases_len,
- 0,
- inst,
- inst_data.src_node,
- );
+ .Union => {
+ const enum_ty = operand_ty.unionTagType() orelse {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, src, "switch on untagged union", .{});
+ errdefer msg.destroy(sema.gpa);
+ try sema.addDeclaredHereNote(msg, operand_ty);
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ };
+ return sema.unionToTag(block, enum_ty, operand, src);
+ },
+
+ .ErrorUnion,
+ .NoReturn,
+ .Array,
+ .Struct,
+ .Undefined,
+ .Null,
+ .Optional,
+ .BoundFn,
+ .Opaque,
+ .Vector,
+ .Frame,
+ .AnyFrame,
+ => return sema.fail(block, src, "switch on type '{}'", .{operand_ty}),
+ }
}
-fn zirSwitchBlockMulti(
- sema: *Sema,
- block: *Block,
- inst: Zir.Inst.Index,
- is_ref: bool,
- special_prong: Zir.SpecialProng,
-) CompileError!Air.Inst.Ref {
+fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
+ const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node };
- const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
+ const src_node_offset = inst_data.src_node;
+ const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
+ const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
+ const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
- const operand_ptr = sema.resolveInst(extra.data.operand);
- const operand = if (is_ref)
- try sema.analyzeLoad(block, src, operand_ptr, operand_src)
- else
- operand_ptr;
+ const operand = sema.resolveInst(extra.data.operand);
- return sema.analyzeSwitch(
- block,
- operand,
- extra.end,
- special_prong,
- extra.data.scalar_cases_len,
- extra.data.multi_cases_len,
- inst,
- inst_data.src_node,
- );
-}
+ var header_extra_index: usize = extra.end;
-fn analyzeSwitch(
- sema: *Sema,
- block: *Block,
- operand: Air.Inst.Ref,
- extra_end: usize,
- special_prong: Zir.SpecialProng,
- scalar_cases_len: usize,
- multi_cases_len: usize,
- switch_inst: Zir.Inst.Index,
- src_node_offset: i32,
-) CompileError!Air.Inst.Ref {
- const gpa = sema.gpa;
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = sema.code.extra[header_extra_index];
+ header_extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+ const special_prong = extra.data.bits.specialProng();
const special: struct { body: []const Zir.Inst.Index, end: usize } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra_end },
+ .none => .{ .body = &.{}, .end = header_extra_index },
.under, .@"else" => blk: {
- const body_len = sema.code.extra[extra_end];
- const extra_body_start = extra_end + 1;
+ const body_len = sema.code.extra[header_extra_index];
+ const extra_body_start = header_extra_index + 1;
break :blk .{
.body = sema.code.extra[extra_body_start..][0..body_len],
.end = extra_body_start + body_len,
@@ -5504,9 +5713,6 @@ fn analyzeSwitch(
},
};
- const src: LazySrcLoc = .{ .node_offset = src_node_offset };
- const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset };
- const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset };
const operand_ty = sema.typeOf(operand);
// Validate usage of '_' prongs.
@@ -5900,7 +6106,7 @@ fn analyzeSwitch(
.data = undefined,
});
var label: Block.Label = .{
- .zir_block = switch_inst,
+ .zir_block = inst,
.merges = .{
.results = .{},
.br_list = .{},
@@ -6173,14 +6379,22 @@ fn analyzeSwitch(
}
var final_else_body: []const Air.Inst.Index = &.{};
- if (special.body.len != 0) {
+ if (special.body.len != 0 or !is_first) {
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
defer wip_captures.deinit();
case_block.instructions.shrinkRetainingCapacity(0);
case_block.wip_capture_scope = wip_captures.scope;
- _ = try sema.analyzeBody(&case_block, special.body);
+ if (special.body.len != 0) {
+ _ = try sema.analyzeBody(&case_block, special.body);
+ } else {
+ // We still need a terminator in this block, but we have proven
+ // that it is unreachable.
+ // TODO this should be a special safety panic other than unreachable, something
+ // like "panic: switch operand had corrupt value not allowed by the type"
+ try case_block.addUnreachable(src, true);
+ }
try wip_captures.finalize();
@@ -6411,10 +6625,33 @@ fn validateSwitchNoRange(
fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
- _ = extra;
- const src = inst_data.src();
-
- return sema.fail(block, src, "TODO implement zirHasField", .{});
+ const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const unresolved_ty = try sema.resolveType(block, ty_src, extra.lhs);
+ const field_name = try sema.resolveConstString(block, name_src, extra.rhs);
+ const ty = try sema.resolveTypeFields(block, ty_src, unresolved_ty);
+
+ const has_field = hf: {
+ if (ty.isSlice()) {
+ if (mem.eql(u8, field_name, "ptr")) break :hf true;
+ if (mem.eql(u8, field_name, "len")) break :hf true;
+ break :hf false;
+ }
+ break :hf switch (ty.zigTypeTag()) {
+ .Struct => ty.structFields().contains(field_name),
+ .Union => ty.unionFields().contains(field_name),
+ .Enum => ty.enumFields().contains(field_name),
+ .Array => mem.eql(u8, field_name, "len"),
+ else => return sema.fail(block, ty_src, "type '{}' does not support '@hasField'", .{
+ ty,
+ }),
+ };
+ };
+ if (has_field) {
+ return Air.Inst.Ref.bool_true;
+ } else {
+ return Air.Inst.Ref.bool_false;
+ }
}
fn zirHasDecl(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6465,6 +6702,45 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.addConstant(file_root_decl.ty, file_root_decl.val);
}
+fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const mod = sema.mod;
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
+ const name = try sema.resolveConstString(block, src, inst_data.operand);
+
+ const embed_file = mod.embedFile(block.getFileScope(), name) catch |err| switch (err) {
+ error.ImportOutsidePkgPath => {
+ return sema.fail(block, src, "embed of file outside package path: '{s}'", .{name});
+ },
+ else => {
+ // TODO: these errors are file system errors; make sure an update() will
+ // retry this and not cache the file system error, which may be transient.
+ return sema.fail(block, src, "unable to open '{s}': {s}", .{ name, @errorName(err) });
+ },
+ };
+
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1];
+
+ // TODO instead of using `Value.Tag.bytes`, create a new value tag for pointing at
+ // a `*Module.EmbedFile`. The purpose of this would be:
+ // - If only the length is read and the bytes are not inspected by comptime code,
+ // there can be an optimization where the codegen backend does a copy_file_range
+ // into the final binary, and never loads the data into memory.
+ // - When a Decl is destroyed, it can free the `*Module.EmbedFile`.
+ embed_file.owner_decl = try anon_decl.finish(
+ try Type.Tag.array_u8_sentinel_0.create(anon_decl.arena(), embed_file.bytes.len),
+ try Value.Tag.bytes.create(anon_decl.arena(), bytes_including_null),
+ );
+
+ return sema.analyzeDeclRef(embed_file.owner_decl);
+}
+
fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
_ = block;
_ = inst;
@@ -6627,8 +6903,42 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const tracy = trace(@src());
defer tracy.end();
- _ = inst;
- return sema.fail(block, sema.src, "TODO implement zirBitNot", .{});
+ const inst_data = sema.code.instructions.items(.data)[inst].un_node;
+ const src = inst_data.src();
+ const operand_src = src; // TODO put this on the operand, not the '~'
+
+ const operand = sema.resolveInst(inst_data.operand);
+ const operand_type = sema.typeOf(operand);
+ const scalar_type = operand_type.scalarType();
+
+ if (scalar_type.zigTypeTag() != .Int) {
+ return sema.fail(block, src, "unable to perform binary not operation on type '{}'", .{operand_type});
+ }
+
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ const target = sema.mod.getTarget();
+ if (val.isUndef()) {
+ return sema.addConstUndef(scalar_type);
+ } else if (operand_type.zigTypeTag() == .Vector) {
+ const vec_len = operand_type.arrayLen();
+ var elem_val_buf: Value.ElemValueBuffer = undefined;
+ const elems = try sema.arena.alloc(Value, vec_len);
+ for (elems) |*elem, i| {
+ const elem_val = val.elemValueBuffer(i, &elem_val_buf);
+ elem.* = try elem_val.bitwiseNot(scalar_type, sema.arena, target);
+ }
+ return sema.addConstant(
+ operand_type,
+ try Value.Tag.array.create(sema.arena, elems),
+ );
+ } else {
+ const result_val = try val.bitwiseNot(scalar_type, sema.arena, target);
+ return sema.addConstant(scalar_type, result_val);
+ }
+ }
+
+ try sema.requireRuntimeBlock(block, src);
+ return block.addTyOp(.not, operand_type, operand);
}
fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -6666,11 +6976,11 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const final_len = lhs_info.len + rhs_info.len;
const final_len_including_sent = final_len + @boolToInt(res_sent != null);
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
+ const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
+ const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const lhs_sub_val = if (is_pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
- const rhs_sub_val = if (is_pointer) (try rhs_val.pointerDeref(anon_decl.arena())).? else rhs_val;
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
{
var i: u64 = 0;
@@ -6690,18 +7000,20 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
buf[final_len] = try rs.copy(anon_decl.arena());
break :ty try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = lhs_info.elem_type,
- .sentinel = rs,
+ .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()),
+ .sentinel = try rs.copy(anon_decl.arena()),
});
} else try Type.Tag.array.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = lhs_info.elem_type,
+ .elem_type = try lhs_info.elem_type.copy(anon_decl.arena()),
});
const val = try Value.Tag.array.create(anon_decl.arena(), buf);
- return if (is_pointer)
- sema.analyzeDeclRef(try anon_decl.finish(ty, val))
- else
- sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(ty, val));
+ const decl = try anon_decl.finish(ty, val);
+ if (is_pointer) {
+ return sema.analyzeDeclRef(decl);
+ } else {
+ return sema.analyzeDeclVal(block, .unneeded, decl);
+ }
} else {
return sema.fail(block, lhs_src, "TODO runtime array_cat", .{});
}
@@ -6735,40 +7047,43 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
// In `**` rhs has to be comptime-known, but lhs can be runtime-known
- const tomulby = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
+ const factor = try sema.resolveInt(block, rhs_src, extra.rhs, Type.usize);
const mulinfo = getArrayCatInfo(lhs_ty) orelse
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
- const final_len = std.math.mul(u64, mulinfo.len, tomulby) catch
+ const final_len = std.math.mul(u64, mulinfo.len, factor) catch
return sema.fail(block, rhs_src, "operation results in overflow", .{});
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
+ const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
+
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
- const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try lhs_val.pointerDeref(anon_decl.arena())).? else lhs_val;
const final_ty = if (mulinfo.sentinel) |sent|
try Type.Tag.array_sentinel.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = mulinfo.elem_type,
- .sentinel = sent,
+ .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()),
+ .sentinel = try sent.copy(anon_decl.arena()),
})
else
try Type.Tag.array.create(anon_decl.arena(), .{
.len = final_len,
- .elem_type = mulinfo.elem_type,
+ .elem_type = try mulinfo.elem_type.copy(anon_decl.arena()),
});
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
- // handles the optimisation where arr.len == 0 : [_]T { X } ** N
+ // Optimization for the common pattern of a single element repeated N times, such
+ // as zero-filling a byte array.
const val = if (mulinfo.len == 1) blk: {
- const copied_val = try (try lhs_sub_val.elemValue(sema.arena, 0)).copy(anon_decl.arena());
+ const elem_val = try lhs_sub_val.elemValue(sema.arena, 0);
+ const copied_val = try elem_val.copy(anon_decl.arena());
break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val);
} else blk: {
// the actual loop
var i: u64 = 0;
- while (i < tomulby) : (i += 1) {
+ while (i < factor) : (i += 1) {
var j: u64 = 0;
while (j < mulinfo.len) : (j += 1) {
const val = try lhs_sub_val.elemValue(sema.arena, j);
@@ -6780,10 +7095,11 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
break :blk try Value.Tag.array.create(anon_decl.arena(), buf);
};
+ const decl = try anon_decl.finish(final_ty, val);
if (lhs_ty.zigTypeTag() == .Pointer) {
- return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val));
+ return sema.analyzeDeclRef(decl);
} else {
- return sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(final_ty, val));
+ return sema.analyzeDeclVal(block, .unneeded, decl);
}
}
return sema.fail(block, lhs_src, "TODO runtime array_mul", .{});
@@ -6872,7 +7188,6 @@ fn analyzeArithmetic(
if (lhs_zig_ty_tag == .Pointer) switch (lhs_ty.ptrSize()) {
.One, .Slice => {},
.Many, .C => {
- // Pointer arithmetic.
const op_src = src; // TODO better source location
const air_tag: Air.Inst.Tag = switch (zir_tag) {
.add => .ptr_add,
@@ -6884,24 +7199,7 @@ fn analyzeArithmetic(
.{@tagName(zir_tag)},
),
};
- // TODO if the operand is comptime-known to be negative, or is a negative int,
- // coerce to isize instead of usize.
- const casted_rhs = try sema.coerce(block, Type.usize, rhs, rhs_src);
- const runtime_src = runtime_src: {
- if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
- if (try sema.resolveDefinedValue(block, rhs_src, casted_rhs)) |rhs_val| {
- _ = lhs_val;
- _ = rhs_val;
- return sema.fail(block, src, "TODO implement Sema for comptime pointer arithmetic", .{});
- } else {
- break :runtime_src rhs_src;
- }
- } else {
- break :runtime_src lhs_src;
- }
- };
- try sema.requireRuntimeBlock(block, runtime_src);
- return block.addBinOp(air_tag, lhs, casted_rhs);
+ return analyzePtrArithmetic(sema, block, op_src, lhs, rhs, air_tag, lhs_src, rhs_src);
},
};
@@ -7121,6 +7419,9 @@ fn analyzeArithmetic(
} else break :rs .{ .src = lhs_src, .air_tag = .sub_sat };
},
.div => {
+ // TODO: emit compile error when .div is used on integers and there would be an
+ // ambiguous result between div_floor and div_trunc.
+
// For integers:
// If the lhs is zero, then zero is returned regardless of rhs.
// If the rhs is zero, compile error for division by zero.
@@ -7130,9 +7431,11 @@ fn analyzeArithmetic(
// * if lhs type is signed:
// * if rhs is comptime-known and not -1, result is undefined
// * if rhs is -1 or runtime-known, compile error because there is a
- // possible value (-min_int * -1) for which division would be
+ // possible value (-min_int / -1) for which division would be
// illegal behavior.
// * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
// For floats:
// If the rhs is zero, compile error for division by zero.
// If the rhs is undefined, compile error because there is a possible
@@ -7178,8 +7481,198 @@ fn analyzeArithmetic(
try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
);
}
- } else break :rs .{ .src = rhs_src, .air_tag = .div };
- } else break :rs .{ .src = lhs_src, .air_tag = .div };
+ } else {
+ if (is_int) {
+ break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
+ } else {
+ break :rs .{ .src = rhs_src, .air_tag = .div_float };
+ }
+ }
+ } else {
+ if (is_int) {
+ break :rs .{ .src = lhs_src, .air_tag = .div_trunc };
+ } else {
+ break :rs .{ .src = lhs_src, .air_tag = .div_float };
+ }
+ }
+ },
+ .div_trunc => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined:
+ // * if lhs type is signed:
+ // * if rhs is comptime-known and not -1, result is undefined
+ // * if rhs is -1 or runtime-known, compile error because there is a
+ // possible value (-min_int / -1) for which division would be
+ // illegal behavior.
+ // * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ return sema.addConstUndef(scalar_type);
+ }
+
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDiv(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDivTrunc(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_trunc };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_trunc };
+ },
+ .div_floor => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined:
+ // * if lhs type is signed:
+ // * if rhs is comptime-known and not -1, result is undefined
+ // * if rhs is -1 or runtime-known, compile error because there is a
+ // possible value (-min_int / -1) for which division would be
+ // illegal behavior.
+ // * if lhs type is unsigned, undef is returned regardless of rhs.
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, result is undefined.
+ if (maybe_lhs_val) |lhs_val| {
+ if (!lhs_val.isUndef()) {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ if (lhs_ty.isSignedInt() and rhs_ty.isSignedInt()) {
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.compare(.neq, Value.negative_one, scalar_type)) {
+ return sema.addConstUndef(scalar_type);
+ }
+ }
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ return sema.addConstUndef(scalar_type);
+ }
+
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDivFloor(rhs_val, sema.arena),
+ );
+ } else {
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDivFloor(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_floor };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_floor };
+ },
+ .div_exact => {
+ // For integers:
+ // If the lhs is zero, then zero is returned regardless of rhs.
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, compile error because there is a possible
+ // value for which the division would result in a remainder.
+ // TODO: emit runtime safety for if there is a remainder
+ // TODO: emit runtime safety for division by zero
+ //
+ // For floats:
+ // If the rhs is zero, compile error for division by zero.
+ // If the rhs is undefined, compile error because there is a possible
+ // value (zero) for which the division would be illegal behavior.
+ // If the lhs is undefined, compile error because there is a possible
+ // value for which the division would result in a remainder.
+ if (maybe_lhs_val) |lhs_val| {
+ if (lhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ } else {
+ if (lhs_val.compareWithZero(.eq)) {
+ return sema.addConstant(scalar_type, Value.zero);
+ }
+ }
+ }
+ if (maybe_rhs_val) |rhs_val| {
+ if (rhs_val.isUndef()) {
+ return sema.failWithUseOfUndef(block, rhs_src);
+ }
+ if (rhs_val.compareWithZero(.eq)) {
+ return sema.failWithDivideByZero(block, rhs_src);
+ }
+ }
+ if (maybe_lhs_val) |lhs_val| {
+ if (maybe_rhs_val) |rhs_val| {
+ if (is_int) {
+ // TODO: emit compile error if there is a remainder
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.intDiv(rhs_val, sema.arena),
+ );
+ } else {
+ // TODO: emit compile error if there is a remainder
+ return sema.addConstant(
+ scalar_type,
+ try lhs_val.floatDiv(rhs_val, scalar_type, sema.arena),
+ );
+ }
+ } else break :rs .{ .src = rhs_src, .air_tag = .div_exact };
+ } else break :rs .{ .src = lhs_src, .air_tag = .div_exact };
},
.mul => {
// For integers:
@@ -7505,6 +7998,51 @@ fn analyzeArithmetic(
return block.addBinOp(rs.air_tag, casted_lhs, casted_rhs);
}
+fn analyzePtrArithmetic(
+ sema: *Sema,
+ block: *Block,
+ op_src: LazySrcLoc,
+ ptr: Air.Inst.Ref,
+ uncasted_offset: Air.Inst.Ref,
+ air_tag: Air.Inst.Tag,
+ ptr_src: LazySrcLoc,
+ offset_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ // TODO if the operand is comptime-known to be negative, or is a negative int,
+ // coerce to isize instead of usize.
+ const offset = try sema.coerce(block, Type.usize, uncasted_offset, offset_src);
+ // TODO adjust the return type according to alignment and other factors
+ const runtime_src = rs: {
+ if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
+ if (try sema.resolveDefinedValue(block, offset_src, offset)) |offset_val| {
+ const ptr_ty = sema.typeOf(ptr);
+ const offset_int = offset_val.toUnsignedInt();
+ const new_ptr_ty = ptr_ty; // TODO modify alignment
+ if (ptr_val.getUnsignedInt()) |addr| {
+ const target = sema.mod.getTarget();
+ const elem_ty = ptr_ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ const new_addr = switch (air_tag) {
+ .ptr_add => addr + elem_size * offset_int,
+ .ptr_sub => addr - elem_size * offset_int,
+ else => unreachable,
+ };
+ const new_ptr_val = try Value.Tag.int_u64.create(sema.arena, new_addr);
+ return sema.addConstant(new_ptr_ty, new_ptr_val);
+ }
+ if (air_tag == .ptr_sub) {
+ return sema.fail(block, op_src, "TODO implement Sema comptime pointer subtraction", .{});
+ }
+ const new_ptr_val = try ptr_val.elemPtr(sema.arena, offset_int);
+ return sema.addConstant(new_ptr_ty, new_ptr_val);
+ } else break :rs offset_src;
+ } else break :rs ptr_src;
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addBinOp(air_tag, ptr, offset);
+}
+
fn zirLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@@ -7629,11 +8167,13 @@ fn zirCmpEq(
rhs_ty_tag == .Null and lhs_ty_tag == .Optional))
{
// comparing null with optionals
- const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs;
+ const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs;
return sema.analyzeIsNull(block, src, opt_operand, op == .neq);
}
if (((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) {
- return sema.fail(block, src, "TODO implement C pointer cmp", .{});
+ // comparing null with C pointers
+ const opt_operand = if (lhs_ty_tag == .Null) rhs else lhs;
+ return sema.analyzeIsNull(block, src, opt_operand, op == .neq);
}
if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) {
const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty;
@@ -8030,7 +8570,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// return_type: ?type,
field_values[4] = try Value.Tag.ty.create(sema.arena, ty.fnReturnType());
// args: []const FnArg,
- field_values[5] = Value.initTag(.null_value); // TODO
+ field_values[5] = Value.@"null"; // TODO
return sema.addConstant(
type_info_ty,
@@ -8088,7 +8628,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_allowzero: bool,
field_values[5] = if (info.@"allowzero") Value.initTag(.bool_true) else Value.initTag(.bool_false);
// sentinel: anytype,
- field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+ field_values[6] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
return sema.addConstant(
type_info_ty,
@@ -8106,7 +8646,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// child: type,
field_values[1] = try Value.Tag.ty.create(sema.arena, info.elem_type);
// sentinel: anytype,
- field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.initTag(.null_value);
+ field_values[2] = if (info.sentinel) |some| try Value.Tag.opt_payload.create(sema.arena, some) else Value.@"null";
return sema.addConstant(
type_info_ty,
@@ -8159,14 +8699,6 @@ fn zirTypeof(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
return sema.addType(operand_ty);
}
-fn zirTypeofElem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- _ = block;
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const operand_ptr = sema.resolveInst(inst_data.operand);
- const elem_ty = sema.typeOf(operand_ptr).elemType();
- return sema.addType(elem_ty);
-}
-
fn zirTypeofLog2IntType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
@@ -8237,12 +8769,13 @@ fn zirBoolNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bool_type = Type.initTag(.bool);
const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src);
- if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| {
- if (val.toBool()) {
- return Air.Inst.Ref.bool_false;
- } else {
- return Air.Inst.Ref.bool_true;
- }
+ if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
+ return if (val.isUndef())
+ sema.addConstUndef(bool_type)
+ else if (val.toBool())
+ Air.Inst.Ref.bool_false
+ else
+ Air.Inst.Ref.bool_true;
}
try sema.requireRuntimeBlock(block, src);
return block.addTyOp(.not, bool_type, operand);
@@ -8446,15 +8979,10 @@ fn zirUnreachable(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const inst_data = sema.code.instructions.items(.data)[inst].@"unreachable";
const src = inst_data.src();
- const safety_check = inst_data.safety;
try sema.requireRuntimeBlock(block, src);
// TODO Add compile error for @optimizeFor occurring too late in a scope.
- if (safety_check and block.wantSafety()) {
- return sema.safetyPanic(block, src, .unreach);
- } else {
- _ = try block.addNoOp(.unreach);
- return always_noreturn;
- }
+ try block.addUnreachable(src, inst_data.safety);
+ return always_noreturn;
}
fn zirRetErrValue(
@@ -8466,19 +8994,13 @@ fn zirRetErrValue(
const err_name = inst_data.get(sema.code);
const src = inst_data.src();
- // Add the error tag to the inferred error set of the in-scope function.
- if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
- if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
- _ = try payload.data.map.getOrPut(sema.gpa, err_name);
- }
- }
// Return the error code from the function.
const kv = try sema.mod.getErrorValue(err_name);
const result_inst = try sema.addConstant(
try Type.Tag.error_set_single.create(sema.arena, kv.key),
try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }),
);
- return sema.analyzeRet(block, result_inst, src, true);
+ return sema.analyzeRet(block, result_inst, src);
}
fn zirRetCoerce(
@@ -8493,7 +9015,7 @@ fn zirRetCoerce(
const operand = sema.resolveInst(inst_data.operand);
const src = inst_data.src();
- return sema.analyzeRet(block, operand, src, true);
+ return sema.analyzeRet(block, operand, src);
}
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -8504,11 +9026,7 @@ fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const operand = sema.resolveInst(inst_data.operand);
const src = inst_data.src();
- // TODO: we pass false here for the `need_coercion` boolean, but I'm pretty sure we need
- // to remove this parameter entirely. Observe the problem by looking at the incorrect compile
- // error that occurs when a behavior test case being executed at comptime fails, e.g.
- // `test { comptime foo(); } fn foo() { try expect(false); }`
- return sema.analyzeRet(block, operand, src, false);
+ return sema.analyzeRet(block, operand, src);
}
fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -8521,7 +9039,7 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
if (block.is_comptime or block.inlining != null) {
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
- return sema.analyzeRet(block, operand, src, false);
+ return sema.analyzeRet(block, operand, src);
}
try sema.requireRuntimeBlock(block, src);
_ = try block.addUnOp(.ret_load, ret_ptr);
@@ -8533,12 +9051,25 @@ fn analyzeRet(
block: *Block,
uncasted_operand: Air.Inst.Ref,
src: LazySrcLoc,
- need_coercion: bool,
) CompileError!Zir.Inst.Index {
- const operand = if (!need_coercion)
- uncasted_operand
- else
- try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src);
+ // Special case for returning an error to an inferred error set; we need to
+ // add the error tag to the inferred error set of the in-scope function, so
+ // that the coercion below works correctly.
+ if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
+ if (sema.fn_ret_ty.errorUnionSet().castTag(.error_set_inferred)) |payload| {
+ const op_ty = sema.typeOf(uncasted_operand);
+ switch (op_ty.zigTypeTag()) {
+ .ErrorSet => {
+ try payload.data.addErrorSet(sema.gpa, op_ty);
+ },
+ .ErrorUnion => {
+ try payload.data.addErrorSet(sema.gpa, op_ty.errorUnionSet());
+ },
+ else => {},
+ }
+ }
+ }
+ const operand = try sema.coerce(block, sema.fn_ret_ty, uncasted_operand, src);
if (block.inlining) |inlining| {
if (block.is_comptime) {
@@ -8559,7 +9090,7 @@ fn analyzeRet(
fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
// extend this swich as additional operators are implemented
return switch (tag) {
- .add, .sub, .mul, .div, .mod, .rem, .mod_rem => true,
+ .add, .sub, .mul, .div, .div_exact, .div_trunc, .div_floor, .mod, .rem, .mod_rem => true,
else => false,
};
}
@@ -8708,7 +9239,7 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_src, field_name);
if (found_fields[field_index] != 0) {
const other_field_type = found_fields[field_index];
const other_field_type_data = zir_datas[other_field_type].pl_node;
@@ -8789,8 +9320,9 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const field_src: LazySrcLoc = .{ .node_offset_back2tok = field_type_data.src_node };
const field_type_extra = sema.code.extraData(Zir.Inst.FieldType, field_type_data.payload_index).data;
const field_name = sema.code.nullTerminatedString(field_type_extra.name_start);
- const field_index = union_obj.fields.getIndex(field_name) orelse
+ const field_index_usize = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
if (is_ref) {
return sema.fail(block, src, "TODO: Sema.zirStructInit is_ref=true union", .{});
@@ -8798,12 +9330,10 @@ fn zirStructInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool)
const init_inst = sema.resolveInst(item.data.init);
if (try sema.resolveMaybeUndefVal(block, field_src, init_inst)) |val| {
+ const tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
return sema.addConstant(
resolved_ty,
- try Value.Tag.@"union".create(sema.arena, .{
- .tag = try Value.Tag.int_u64.create(sema.arena, field_index),
- .val = val,
- }),
+ try Value.Tag.@"union".create(sema.arena, .{ .tag = tag_val, .val = val }),
);
}
return sema.fail(block, src, "TODO: Sema.zirStructInit for runtime-known union values", .{});
@@ -8917,7 +9447,7 @@ fn zirFieldType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
.Struct => {
const struct_obj = resolved_ty.castTag(.@"struct").?.data;
const field = struct_obj.fields.get(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, src, field_name);
return sema.addType(field.ty);
},
.Union => {
@@ -8963,8 +9493,10 @@ fn zirAlignOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, operand_src, inst_data.operand);
+ const resolved_ty = try sema.resolveTypeFields(block, operand_src, ty);
+ try sema.resolveTypeLayout(block, operand_src, resolved_ty);
const target = sema.mod.getTarget();
- const abi_align = ty.abiAlignment(target);
+ const abi_align = resolved_ty.abiAlignment(target);
return sema.addIntUnsigned(Type.comptime_int, abi_align);
}
@@ -8980,12 +9512,6 @@ fn zirBoolToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
return block.addUnOp(.bool_to_int, operand);
}
-fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].un_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirEmbedFile", .{});
-}
-
fn zirErrorName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
@@ -9013,8 +9539,8 @@ fn zirReify(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const type_info = try sema.coerce(block, type_info_ty, uncasted_operand, operand_src);
const val = try sema.resolveConstValue(block, operand_src, type_info);
const union_val = val.cast(Value.Payload.Union).?.data;
- const TypeInfoTag = std.meta.Tag(std.builtin.TypeInfo);
- const tag_index = @intCast(std.meta.Tag(TypeInfoTag), union_val.tag.toUnsignedInt());
+ const tag_ty = type_info_ty.unionTagType().?;
+ const tag_index = tag_ty.enumTagFieldIndex(union_val.tag).?;
switch (@intToEnum(std.builtin.TypeId, tag_index)) {
.Type => return Air.Inst.Ref.type_type,
.Void => return Air.Inst.Ref.void_type,
@@ -9103,8 +9629,8 @@ fn zirIntToFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const operand = sema.resolveInst(extra.rhs);
const operand_ty = sema.typeOf(operand);
- _ = try sema.checkIntType(block, ty_src, dest_ty);
- try sema.checkFloatType(block, operand_src, operand_ty);
+ try sema.checkFloatType(block, ty_src, dest_ty);
+ _ = try sema.checkIntType(block, operand_src, operand_ty);
if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
const target = sema.mod.getTarget();
@@ -9169,7 +9695,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment);
}
}
- return block.addTyOp(.bitcast, type_res, operand_coerced);
+ return block.addBitCast(type_res, operand_coerced);
}
fn zirErrSetCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9196,10 +9722,7 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
@tagName(dest_ty.zigTypeTag()), dest_ty,
});
}
- if (try sema.resolveMaybeUndefVal(block, operand_src, operand)) |val| {
- return sema.addConstant(dest_ty, val);
- }
- return block.addTyOp(.bitcast, dest_ty, operand);
+ return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src);
}
fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9219,14 +9742,18 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
}
const target = sema.mod.getTarget();
- const src_info = operand_ty.intInfo(target);
const dest_info = dest_ty.intInfo(target);
- if (src_info.bits == 0 or dest_info.bits == 0) {
- return sema.addConstant(dest_ty, Value.initTag(.zero));
+ if (dest_info.bits == 0) {
+ return sema.addConstant(dest_ty, Value.zero);
}
if (!src_is_comptime_int) {
+ const src_info = operand_ty.intInfo(target);
+ if (src_info.bits == 0) {
+ return sema.addConstant(dest_ty, Value.zero);
+ }
+
if (src_info.signedness != dest_info.signedness) {
return sema.fail(block, operand_src, "expected {s} integer type, found '{}'", .{
@tagName(dest_info.signedness), operand_ty,
@@ -9264,8 +9791,33 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirAlignCast", .{});
+ const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
+ const align_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
+ const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
+ const dest_align = try sema.resolveAlign(block, align_src, extra.lhs);
+ const ptr = sema.resolveInst(extra.rhs);
+ const ptr_ty = sema.typeOf(ptr);
+
+ // TODO in addition to pointers, this instruction is supposed to work for
+ // pointer-like optionals and slices.
+ try sema.checkPtrType(block, ptr_src, ptr_ty);
+
+ // TODO compile error if the result pointer is comptime known and would have an
+ // alignment that disagrees with the Decl's alignment.
+
+ // TODO insert safety check that the alignment is correct
+
+ const ptr_info = ptr_ty.ptrInfo().data;
+ const dest_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = ptr_info.pointee_type,
+ .@"align" = dest_align,
+ .@"addrspace" = ptr_info.@"addrspace",
+ .mutable = ptr_info.mutable,
+ .@"allowzero" = ptr_info.@"allowzero",
+ .@"volatile" = ptr_info.@"volatile",
+ .size = ptr_info.size,
+ });
+ return sema.coerceCompatiblePtrs(block, dest_ty, ptr, ptr_src);
}
fn zirClz(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@@ -9340,24 +9892,6 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return sema.fail(block, src, "TODO: Sema.zirBitReverse", .{});
}
-fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivExact", .{});
-}
-
-fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivFloor", .{});
-}
-
-fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
- const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
- const src = inst_data.src();
- return sema.fail(block, src, "TODO: Sema.zirDivTrunc", .{});
-}
-
fn zirShrExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
@@ -9385,6 +9919,18 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr
}
}
+fn checkPtrType(
+ sema: *Sema,
+ block: *Block,
+ ty_src: LazySrcLoc,
+ ty: Type,
+) CompileError!void {
+ switch (ty.zigTypeTag()) {
+ .Pointer => {},
+ else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}),
+ }
+}
+
fn checkFloatType(
sema: *Sema,
block: *Block,
@@ -9648,7 +10194,8 @@ fn zirCmpxchg(
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
- const elem_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
if (elem_ty.zigTypeTag() == .Float) {
return sema.fail(
@@ -9680,7 +10227,7 @@ fn zirCmpxchg(
// special case zero bit types
if ((try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) != null) {
- return sema.addConstant(result_ty, Value.initTag(.null_value));
+ return sema.addConstant(result_ty, Value.@"null");
}
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
@@ -9691,10 +10238,10 @@ fn zirCmpxchg(
// to become undef as well
return sema.addConstUndef(result_ty);
}
- const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
+ const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const result_val = if (stored_val.eql(expected_val, elem_ty)) blk: {
try sema.storePtr(block, src, ptr, new_value);
- break :blk Value.initTag(.null_value);
+ break :blk Value.@"null";
} else try Value.Tag.opt_payload.create(sema.arena, stored_val);
return sema.addConstant(result_ty, result_val);
@@ -9753,7 +10300,8 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.lhs);
- const elem_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const elem_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs);
@@ -9774,7 +10322,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
}
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
- if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
+ if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@@ -9801,7 +10349,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
// zig fmt: on
const ptr = sema.resolveInst(extra.ptr);
- const operand_ty = sema.typeOf(ptr).elemType();
+ const ptr_ty = sema.typeOf(ptr);
+ const operand_ty = ptr_ty.elemType();
try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
@@ -9838,7 +10387,7 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
};
if (ptr_val.isComptimeMutablePtr()) {
const target = sema.mod.getTarget();
- const stored_val = (try ptr_val.pointerDeref(sema.arena)) orelse break :rs ptr_src;
+ const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
const new_val = switch (op) {
// zig fmt: off
.Xchg => operand_val,
@@ -10152,7 +10701,7 @@ fn zirVarExtended(
// extra_index += 1;
// const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
// break :blk align_tv.val;
- //} else Value.initTag(.null_value);
+ //} else Value.@"null";
const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -10234,7 +10783,7 @@ fn zirFuncExtended(
extra_index += 1;
const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
break :blk align_tv.val;
- } else Value.initTag(.null_value);
+ } else Value.@"null";
const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len];
extra_index += ret_ty_body.len;
@@ -10519,7 +11068,7 @@ fn panicWithMsg(
});
const null_stack_trace = try sema.addConstant(
try Type.optional(arena, ptr_stack_trace_ty),
- Value.initTag(.null_value),
+ Value.@"null",
);
const args = try arena.create([2]Air.Inst.Ref);
args.* = .{ msg_inst, null_stack_trace };
@@ -10580,12 +11129,22 @@ fn fieldVal(
const object_src = src; // TODO better source location
const object_ty = sema.typeOf(object);
- switch (object_ty.zigTypeTag()) {
+ // Zig allows dereferencing a single pointer during field lookup. Note that
+ // we don't actually need to generate the dereference some field lookups, like the
+ // length of arrays and other comptime operations.
+ const is_pointer_to = object_ty.isSinglePointer();
+
+ const inner_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ switch (inner_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
return sema.addConstant(
Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(arena, object_ty.arrayLen()),
+ try Value.Tag.int_u64.create(arena, inner_ty.arrayLen()),
);
} else {
return sema.fail(
@@ -10596,75 +11155,40 @@ fn fieldVal(
);
}
},
- .Pointer => switch (object_ty.ptrSize()) {
- .Slice => {
- if (mem.eql(u8, field_name, "ptr")) {
- const buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
- const result_ty = object_ty.slicePtrFieldType(buf);
- if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
- return sema.addConstant(result_ty, val.slicePtr());
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.slice_ptr, result_ty, object);
- } else if (mem.eql(u8, field_name, "len")) {
- const result_ty = Type.usize;
- if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| {
- if (val.isUndef()) return sema.addConstUndef(result_ty);
- return sema.addConstant(
- result_ty,
- try Value.Tag.int_u64.create(arena, val.sliceLen()),
- );
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addTyOp(.slice_len, result_ty, object);
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .One => {
- const ptr_child = object_ty.elemType();
- switch (ptr_child.zigTypeTag()) {
- .Array => {
- if (mem.eql(u8, field_name, "len")) {
- return sema.addConstant(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()),
- );
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .Struct => {
- const struct_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
- return sema.unionFieldVal(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
- },
- .Union => {
- const union_ptr_deref = try sema.analyzeLoad(block, src, object, object_src);
- return sema.unionFieldVal(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
- },
- else => {},
- }
- },
- .Many, .C => {},
+ .Pointer => if (inner_ty.isSlice()) {
+ if (mem.eql(u8, field_name, "ptr")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSlicePtr(block, src, slice, inner_ty, object_src);
+ } else if (mem.eql(u8, field_name, "len")) {
+ const slice = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+ return sema.analyzeSliceLen(block, src, slice);
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
},
.Type => {
- const val = (try sema.resolveDefinedValue(block, object_src, object)).?;
+ const dereffed_type = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object, object_src)
+ else
+ object;
+
+ const val = (try sema.resolveDefinedValue(block, object_src, dereffed_type)).?;
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
+
switch (child_type.zigTypeTag()) {
.ErrorSet => {
- // TODO resolve inferred error sets
const name: []const u8 = if (child_type.castTag(.error_set)) |payload| blk: {
const error_set = payload.data;
// TODO this is O(N). I'm putting off solving this until we solve inferred
@@ -10685,10 +11209,39 @@ fn fieldVal(
try Value.Tag.@"error".create(arena, .{ .name = name }),
);
},
- .Struct, .Opaque, .Union => {
+ .Union => {
if (child_type.getNamespace()) |namespace| {
- if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
- return sema.analyzeLoad(block, src, inst, src);
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ if (child_type.unionTagType()) |enum_ty| {
+ if (enum_ty.enumFieldIndex(field_name)) |field_index_usize| {
+ const field_index = @intCast(u32, field_index_usize);
+ return sema.addConstant(
+ enum_ty,
+ try Value.Tag.enum_field_index.create(sema.arena, field_index),
+ );
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ },
+ .Enum => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ const field_index_usize = child_type.enumFieldIndex(field_name) orelse
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
+ const enum_val = try Value.Tag.enum_field_index.create(arena, field_index);
+ return sema.addConstant(try child_type.copy(arena), enum_val);
+ },
+ .Struct, .Opaque => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupVal(block, src, namespace, field_name)) |inst| {
+ return inst;
}
}
// TODO add note: declared here
@@ -10702,40 +11255,23 @@ fn fieldVal(
kw_name, child_type, field_name,
});
},
- .Enum => {
- if (child_type.getNamespace()) |namespace| {
- if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
- return sema.analyzeLoad(block, src, inst, src);
- }
- }
- const field_index = child_type.enumFieldIndex(field_name) orelse {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "enum '{}' has no member named '{s}'",
- .{ child_type, field_name },
- );
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNoteNonLazy(
- child_type.declSrcLoc(),
- msg,
- "enum declared here",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
- };
- const field_index_u32 = @intCast(u32, field_index);
- const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32);
- return sema.addConstant(try child_type.copy(arena), enum_val);
- },
else => return sema.fail(block, src, "type '{}' has no members", .{child_type}),
}
},
- .Struct => return sema.structFieldVal(block, src, object, field_name, field_name_src, object_ty),
- .Union => return sema.unionFieldVal(block, src, object, field_name, field_name_src, object_ty),
+ .Struct => if (is_pointer_to) {
+ // Avoid loading the entire struct by fetching a pointer and loading that
+ const field_ptr = try sema.structFieldPtr(block, src, object, field_name, field_name_src, inner_ty);
+ return sema.analyzeLoad(block, src, field_ptr, object_src);
+ } else {
+ return sema.structFieldVal(block, src, object, field_name, field_name_src, inner_ty);
+ },
+ .Union => if (is_pointer_to) {
+ // Avoid loading the entire union by fetching a pointer and loading that
+ const field_ptr = try sema.unionFieldPtr(block, src, object, field_name, field_name_src, inner_ty);
+ return sema.analyzeLoad(block, src, field_ptr, object_src);
+ } else {
+ return sema.unionFieldVal(block, src, object, field_name, field_name_src, inner_ty);
+ },
else => {},
}
return sema.fail(block, src, "type '{}' does not support field access", .{object_ty});
@@ -10758,14 +11294,25 @@ fn fieldPtr(
.Pointer => object_ptr_ty.elemType(),
else => return sema.fail(block, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}),
};
- switch (object_ty.zigTypeTag()) {
+
+ // Zig allows dereferencing a single pointer during field lookup. Note that
+ // we don't actually need to generate the dereference some field lookups, like the
+ // length of arrays and other comptime operations.
+ const is_pointer_to = object_ty.isSinglePointer();
+
+ const inner_ty = if (is_pointer_to)
+ object_ty.childType()
+ else
+ object_ty;
+
+ switch (inner_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(anon_decl.arena(), object_ty.arrayLen()),
+ try Value.Tag.int_u64.create(anon_decl.arena(), inner_ty.arrayLen()),
));
} else {
return sema.fail(
@@ -10776,77 +11323,74 @@ fn fieldPtr(
);
}
},
- .Pointer => switch (object_ty.ptrSize()) {
- .Slice => {
- // Here for the ptr and len fields what we need to do is the situation
- // when a temporary has its address taken, e.g. `&a[c..d].len`.
- // This value may be known at compile-time or runtime. In the former
- // case, it should create an anonymous Decl and return a decl_ref to it.
- // In the latter case, it should add an `alloc` instruction, store
- // the runtime value to it, and then return the `alloc`.
- // In both cases the pointer should be const.
- if (mem.eql(u8, field_name, "ptr")) {
- return sema.fail(
- block,
- field_name_src,
- "TODO: implement reference to 'ptr' field of slice '{}'",
- .{object_ty},
- );
- } else if (mem.eql(u8, field_name, "len")) {
- return sema.fail(
- block,
- field_name_src,
- "TODO: implement reference to 'len' field of slice '{}'",
- .{object_ty},
- );
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
+ .Pointer => if (inner_ty.isSlice()) {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+
+ if (mem.eql(u8, field_name, "ptr")) {
+ const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
+ const slice_ptr_ty = inner_ty.slicePtrFieldType(buf);
+
+ if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ try slice_ptr_ty.copy(anon_decl.arena()),
+ try val.slicePtr().copy(anon_decl.arena()),
+ ));
}
- },
- .One => {
- const ptr_child = object_ty.elemType();
- switch (ptr_child.zigTypeTag()) {
- .Array => {
- if (mem.eql(u8, field_name, "len")) {
- var anon_decl = try block.startAnonDecl();
- defer anon_decl.deinit();
- return sema.analyzeDeclRef(try anon_decl.finish(
- Type.initTag(.comptime_int),
- try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()),
- ));
- } else {
- return sema.fail(
- block,
- field_name_src,
- "no member named '{s}' in '{}'",
- .{ field_name, object_ty },
- );
- }
- },
- .Struct => {
- const struct_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- return sema.structFieldPtr(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child);
- },
- .Union => {
- const union_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- return sema.unionFieldPtr(block, src, union_ptr_deref, field_name, field_name_src, ptr_child);
- },
- else => {},
+ try sema.requireRuntimeBlock(block, src);
+
+ const result_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = slice_ptr_ty,
+ .mutable = object_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = object_ptr_ty.ptrAddressSpace(),
+ });
+
+ return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
+ } else if (mem.eql(u8, field_name, "len")) {
+ if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ Type.usize,
+ try Value.Tag.int_u64.create(anon_decl.arena(), val.sliceLen()),
+ ));
}
- },
- .Many, .C => {},
+ try sema.requireRuntimeBlock(block, src);
+
+ const result_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = Type.usize,
+ .mutable = object_ptr_ty.ptrIsMutable(),
+ .@"addrspace" = object_ptr_ty.ptrAddressSpace(),
+ });
+
+ return block.addTyOp(.ptr_slice_len_ptr, result_ty, inner_ptr);
+ } else {
+ return sema.fail(
+ block,
+ field_name_src,
+ "no member named '{s}' in '{}'",
+ .{ field_name, object_ty },
+ );
+ }
},
.Type => {
_ = try sema.resolveConstValue(block, object_ptr_src, object_ptr);
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
- const val = (sema.resolveDefinedValue(block, src, result) catch unreachable).?;
+ const inner = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, result, object_ptr_src)
+ else
+ result;
+
+ const val = (sema.resolveDefinedValue(block, src, inner) catch unreachable).?;
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
+
switch (child_type.zigTypeTag()) {
.ErrorSet => {
// TODO resolve inferred error sets
@@ -10872,22 +11416,24 @@ fn fieldPtr(
try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }),
));
},
- .Struct, .Opaque, .Union => {
+ .Union => {
if (child_type.getNamespace()) |namespace| {
if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
return inst;
}
}
- // TODO add note: declared here
- const kw_name = switch (child_type.zigTypeTag()) {
- .Struct => "struct",
- .Opaque => "opaque",
- .Union => "union",
- else => unreachable,
- };
- return sema.fail(block, src, "{s} '{}' has no member named '{s}'", .{
- kw_name, child_type, field_name,
- });
+ if (child_type.unionTagType()) |enum_ty| {
+ if (enum_ty.enumFieldIndex(field_name)) |field_index| {
+ const field_index_u32 = @intCast(u32, field_index);
+ var anon_decl = try block.startAnonDecl();
+ defer anon_decl.deinit();
+ return sema.analyzeDeclRef(try anon_decl.finish(
+ try enum_ty.copy(anon_decl.arena()),
+ try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
+ ));
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
},
.Enum => {
if (child_type.getNamespace()) |namespace| {
@@ -10896,23 +11442,7 @@ fn fieldPtr(
}
}
const field_index = child_type.enumFieldIndex(field_name) orelse {
- const msg = msg: {
- const msg = try sema.errMsg(
- block,
- src,
- "enum '{}' has no member named '{s}'",
- .{ child_type, field_name },
- );
- errdefer msg.destroy(sema.gpa);
- try sema.mod.errNoteNonLazy(
- child_type.declSrcLoc(),
- msg,
- "enum declared here",
- .{},
- );
- break :msg msg;
- };
- return sema.failWithOwnedErrorMsg(msg);
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
};
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
@@ -10922,14 +11452,34 @@ fn fieldPtr(
try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
));
},
+ .Struct, .Opaque => {
+ if (child_type.getNamespace()) |namespace| {
+ if (try sema.namespaceLookupRef(block, src, namespace, field_name)) |inst| {
+ return inst;
+ }
+ }
+ return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
+ },
else => return sema.fail(block, src, "type '{}' has no members", .{child_type}),
}
},
- .Struct => return sema.structFieldPtr(block, src, object_ptr, field_name, field_name_src, object_ty),
- .Union => return sema.unionFieldPtr(block, src, object_ptr, field_name, field_name_src, object_ty),
+ .Struct => {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+ return sema.structFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty);
+ },
+ .Union => {
+ const inner_ptr = if (is_pointer_to)
+ try sema.analyzeLoad(block, src, object_ptr, object_ptr_src)
+ else
+ object_ptr;
+ return sema.unionFieldPtr(block, src, inner_ptr, field_name, field_name_src, inner_ty);
+ },
else => {},
}
- return sema.fail(block, src, "type '{}' does not support field access", .{object_ty});
+ return sema.fail(block, src, "type '{}' does not support field access (fieldPtr, {}.{s})", .{ object_ty, object_ptr_ty, field_name });
}
fn fieldCallBind(
@@ -11084,6 +11634,17 @@ fn namespaceLookupRef(
return try sema.analyzeDeclRef(decl);
}
+fn namespaceLookupVal(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ namespace: *Namespace,
+ decl_name: []const u8,
+) CompileError!?Air.Inst.Ref {
+ const decl = (try sema.namespaceLookup(block, src, namespace, decl_name)) orelse return null;
+ return try sema.analyzeDeclVal(block, src, decl);
+}
+
fn structFieldPtr(
sema: *Sema,
block: *Block,
@@ -11101,7 +11662,7 @@ fn structFieldPtr(
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index_big = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
const field_index = @intCast(u32, field_index_big);
const field = struct_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, .{
@@ -11138,8 +11699,9 @@ fn structFieldVal(
const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
- const field_index = struct_obj.fields.getIndex(field_name) orelse
- return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
+ return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
+ const field_index = @intCast(u32, field_index_usize);
const field = struct_obj.fields.values()[field_index];
if (try sema.resolveMaybeUndefVal(block, src, struct_byval)) |struct_val| {
@@ -11150,16 +11712,7 @@ fn structFieldVal(
}
try sema.requireRuntimeBlock(block, src);
- return block.addInst(.{
- .tag = .struct_field_val,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(field.ty),
- .payload = try sema.addExtra(Air.StructField{
- .struct_operand = struct_byval,
- .field_index = @intCast(u32, field_index),
- }),
- } },
- });
+ return block.addStructFieldVal(struct_byval, field_index, field.ty);
}
fn unionFieldPtr(
@@ -11218,9 +11771,9 @@ fn unionFieldVal(
const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
- const field_index = union_obj.fields.getIndex(field_name) orelse
+ const field_index_usize = union_obj.fields.getIndex(field_name) orelse
return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name);
-
+ const field_index = @intCast(u32, field_index_usize);
const field = union_obj.fields.values()[field_index];
if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| {
@@ -11232,7 +11785,7 @@ fn unionFieldVal(
}
try sema.requireRuntimeBlock(block, src);
- return sema.fail(block, src, "TODO implement runtime union field access", .{});
+ return block.addStructFieldVal(union_byval, field_index, field.ty);
}
fn elemPtr(
@@ -11250,135 +11803,124 @@ fn elemPtr(
else => return sema.fail(block, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}),
};
if (!array_ty.isIndexable()) {
- return sema.fail(block, src, "array access of non-array type '{}'", .{array_ty});
- }
- if (array_ty.isSinglePointer() and array_ty.elemType().zigTypeTag() == .Array) {
- // we have to deref the ptr operand to get the actual array pointer
- const array_ptr_deref = try sema.analyzeLoad(block, src, array_ptr, array_ptr_src);
- return sema.elemPtrArray(block, src, array_ptr_deref, elem_index, elem_index_src);
- }
- if (array_ty.zigTypeTag() == .Array) {
- return sema.elemPtrArray(block, src, array_ptr, elem_index, elem_index_src);
+ return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty});
}
- return sema.fail(block, src, "TODO implement more analyze elemptr", .{});
+ switch (array_ty.zigTypeTag()) {
+ .Pointer => {
+ // In all below cases, we have to deref the ptr operand to get the actual array pointer.
+ const array = try sema.analyzeLoad(block, array_ptr_src, array_ptr, array_ptr_src);
+ const result_ty = try array_ty.elemPtrType(sema.arena);
+ switch (array_ty.ptrSize()) {
+ .Slice => {
+ const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ const runtime_src = if (maybe_slice_val) |slice_val| rs: {
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const elem_ptr = try slice_val.elemPtr(sema.arena, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ } else array_ptr_src;
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addSliceElemPtr(array, elem_index, result_ty);
+ },
+ .Many, .C => {
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, array_ptr_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs array_ptr_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const elem_ptr = try ptr_val.elemPtr(sema.arena, index);
+ return sema.addConstant(result_ty, elem_ptr);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addPtrElemPtr(array, elem_index, result_ty);
+ },
+ .One => {
+ assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ return sema.elemPtrArray(block, array_ptr_src, array, elem_index, elem_index_src);
+ },
+ }
+ },
+ .Array => return sema.elemPtrArray(block, array_ptr_src, array_ptr, elem_index, elem_index_src),
+ .Vector => return sema.fail(block, src, "TODO implement Sema for elemPtr for vector", .{}),
+ else => unreachable,
+ }
}
fn elemVal(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- array_maybe_ptr: Air.Inst.Ref,
+ array: Air.Inst.Ref,
elem_index: Air.Inst.Ref,
elem_index_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const array_ptr_src = src; // TODO better source location
- const maybe_ptr_ty = sema.typeOf(array_maybe_ptr);
- switch (maybe_ptr_ty.zigTypeTag()) {
- .Pointer => switch (maybe_ptr_ty.ptrSize()) {
+ const array_src = src; // TODO better source location
+ const array_ty = sema.typeOf(array);
+
+ if (!array_ty.isIndexable()) {
+ return sema.fail(block, src, "array access of non-indexable type '{}'", .{array_ty});
+ }
+
+ switch (array_ty.zigTypeTag()) {
+ .Pointer => switch (array_ty.ptrSize()) {
.Slice => {
- const maybe_slice_val = try sema.resolveDefinedValue(block, array_ptr_src, array_maybe_ptr);
+ const maybe_slice_val = try sema.resolveDefinedValue(block, array_src, array);
const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
const runtime_src = if (maybe_slice_val) |slice_val| rs: {
const index_val = maybe_index_val orelse break :rs elem_index_src;
const index = @intCast(usize, index_val.toUnsignedInt());
const elem_val = try slice_val.elemValue(sema.arena, index);
- return sema.addConstant(maybe_ptr_ty.elemType2(), elem_val);
- } else array_ptr_src;
+ return sema.addConstant(array_ty.elemType2(), elem_val);
+ } else array_src;
try sema.requireRuntimeBlock(block, runtime_src);
- return block.addBinOp(.slice_elem_val, array_maybe_ptr, elem_index);
+ return block.addBinOp(.slice_elem_val, array, elem_index);
},
.Many, .C => {
- if (try sema.resolveDefinedValue(block, src, array_maybe_ptr)) |ptr_val| {
- _ = ptr_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known pointer", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_elem_val, array_maybe_ptr, elem_index);
+ const maybe_ptr_val = try sema.resolveDefinedValue(block, array_src, array);
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+
+ const runtime_src = rs: {
+ const ptr_val = maybe_ptr_val orelse break :rs array_src;
+ const index_val = maybe_index_val orelse break :rs elem_index_src;
+ const index = @intCast(usize, index_val.toUnsignedInt());
+ const maybe_array_val = try sema.pointerDeref(block, array_src, ptr_val, array_ty);
+ const array_val = maybe_array_val orelse break :rs array_src;
+ const elem_val = try array_val.elemValue(sema.arena, index);
+ return sema.addConstant(array_ty.elemType2(), elem_val);
+ };
+
+ try sema.requireRuntimeBlock(block, runtime_src);
+ return block.addBinOp(.ptr_elem_val, array, elem_index);
},
.One => {
- const indexable_ty = maybe_ptr_ty.childType();
- switch (indexable_ty.zigTypeTag()) {
- .Pointer => switch (indexable_ty.ptrSize()) {
- .Slice => {
- // We have a pointer to a slice and we want an element value.
- if (try sema.isComptimeKnown(block, src, array_maybe_ptr)) {
- const slice = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- if (try sema.resolveDefinedValue(block, src, slice)) |slice_val| {
- _ = slice_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known slice", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.slice_elem_val, slice, elem_index);
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_slice_elem_val, array_maybe_ptr, elem_index);
- },
- .Many, .C => {
- // We have a pointer to a pointer and we want an element value.
- if (try sema.isComptimeKnown(block, src, array_maybe_ptr)) {
- const ptr = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- if (try sema.resolveDefinedValue(block, src, ptr)) |ptr_val| {
- _ = ptr_val;
- return sema.fail(block, src, "TODO implement Sema for elemVal for comptime known pointer", .{});
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_elem_val, ptr, elem_index);
- }
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.ptr_ptr_elem_val, array_maybe_ptr, elem_index);
- },
- .One => {
- const array_ty = indexable_ty.childType();
- if (array_ty.zigTypeTag() == .Array) {
- // We have a double pointer to an array, and we want an element
- // value. This can happen with this code for example:
- // var a: *[1]u8 = undefined; _ = a[0];
- const array_ptr = try sema.analyzeLoad(block, src, array_maybe_ptr, array_ptr_src);
- const ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src);
- return sema.analyzeLoad(block, src, ptr, elem_index_src);
- } else return sema.fail(
- block,
- array_ptr_src,
- "expected pointer, found '{}'",
- .{array_ty},
- );
- },
- },
- .Array => {
- const ptr = try sema.elemPtr(block, src, array_maybe_ptr, elem_index, elem_index_src);
- return sema.analyzeLoad(block, src, ptr, elem_index_src);
- },
- else => return sema.fail(
- block,
- array_ptr_src,
- "expected pointer, found '{}'",
- .{indexable_ty},
- ),
- }
+ assert(array_ty.childType().zigTypeTag() == .Array); // Guaranteed by isIndexable
+ const elem_ptr = try sema.elemPtr(block, array_src, array, elem_index, elem_index_src);
+ return sema.analyzeLoad(block, array_src, elem_ptr, elem_index_src);
},
},
.Array => {
- if (try sema.resolveMaybeUndefVal(block, src, array_maybe_ptr)) |array_val| {
- const elem_ty = maybe_ptr_ty.childType();
- const opt_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ if (try sema.resolveMaybeUndefVal(block, array_src, array)) |array_val| {
+ const elem_ty = array_ty.childType();
if (array_val.isUndef()) return sema.addConstUndef(elem_ty);
- if (opt_index_val) |index_val| {
+ const maybe_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
+ if (maybe_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt());
const elem_val = try array_val.elemValue(sema.arena, index);
return sema.addConstant(elem_ty, elem_val);
}
}
- try sema.requireRuntimeBlock(block, src);
- return block.addBinOp(.array_elem_val, array_maybe_ptr, elem_index);
+ try sema.requireRuntimeBlock(block, array_src);
+ return block.addBinOp(.array_elem_val, array, elem_index);
},
- else => return sema.fail(
- block,
- array_ptr_src,
- "expected pointer or array; found '{}'",
- .{maybe_ptr_ty},
- ),
+ .Vector => return sema.fail(block, array_src, "TODO implement Sema for elemVal for vector", .{}),
+ else => unreachable,
}
}
@@ -11391,12 +11933,7 @@ fn elemPtrArray(
elem_index_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const array_ptr_ty = sema.typeOf(array_ptr);
- const pointee_type = array_ptr_ty.elemType().elemType();
- const result_ty = try Type.ptr(sema.arena, .{
- .pointee_type = pointee_type,
- .mutable = array_ptr_ty.ptrIsMutable(),
- .@"addrspace" = array_ptr_ty.ptrAddressSpace(),
- });
+ const result_ty = try array_ptr_ty.elemPtrType(sema.arena);
if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| {
if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| {
@@ -11410,16 +11947,7 @@ fn elemPtrArray(
}
// TODO safety check for array bounds
try sema.requireRuntimeBlock(block, src);
- return block.addInst(.{
- .tag = .ptr_elem_ptr,
- .data = .{ .ty_pl = .{
- .ty = try sema.addType(result_ty),
- .payload = try sema.addExtra(Air.Bin{
- .lhs = array_ptr,
- .rhs = elem_index,
- }),
- } },
- });
+ return block.addPtrElemPtr(array_ptr, elem_index, result_ty);
}
fn coerce(
@@ -11452,7 +11980,7 @@ fn coerce(
return sema.addConstant(dest_ty, val);
}
try sema.requireRuntimeBlock(block, inst_src);
- return block.addTyOp(.bitcast, dest_ty, inst);
+ return block.addBitCast(dest_ty, inst);
}
// undefined to anything
@@ -11464,6 +11992,8 @@ fn coerce(
assert(inst_ty.zigTypeTag() != .Undefined);
// comptime known number to other number
+ // TODO why is this a separate function? should just be flattened into the
+ // switch expression below.
if (try sema.coerceNum(block, dest_ty, inst, inst_src)) |some|
return some;
@@ -11471,16 +12001,17 @@ fn coerce(
.Optional => {
// null to ?T
if (inst_ty.zigTypeTag() == .Null) {
- return sema.addConstant(dest_ty, Value.initTag(.null_value));
+ return sema.addConstant(dest_ty, Value.@"null");
}
// T to ?T
- var buf: Type.Payload.ElemType = undefined;
- const child_type = dest_ty.optionalChild(&buf);
+ const child_type = try dest_ty.optionalChildAlloc(sema.arena);
const intermediate = try sema.coerce(block, child_type, inst, inst_src);
return sema.wrapOptional(block, dest_ty, intermediate, inst_src);
},
.Pointer => {
+ const dest_info = dest_ty.ptrInfo().data;
+
// Function body to function pointer.
if (inst_ty.zigTypeTag() == .Fn) {
const fn_val = try sema.resolveConstValue(block, inst_src, inst);
@@ -11489,49 +12020,98 @@ fn coerce(
return sema.coerce(block, dest_ty, inst_as_ptr, inst_src);
}
+ // *T to *[1]T
+ single_item: {
+ if (dest_info.size != .One) break :single_item;
+ if (!inst_ty.isSinglePointer()) break :single_item;
+ const ptr_elem_ty = inst_ty.childType();
+ const array_ty = dest_info.pointee_type;
+ if (array_ty.zigTypeTag() != .Array) break :single_item;
+ const array_elem_ty = array_ty.childType();
+ const dest_is_mut = dest_info.mutable;
+ if (inst_ty.isConstPtr() and dest_is_mut) break :single_item;
+ if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :single_item;
+ if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :single_item;
+ switch (coerceInMemoryAllowed(array_elem_ty, ptr_elem_ty, dest_is_mut, target)) {
+ .ok => {},
+ .no_match => break :single_item,
+ }
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
+ }
+
// Coercions where the source is a single pointer to an array.
src_array_ptr: {
if (!inst_ty.isSinglePointer()) break :src_array_ptr;
- const array_type = inst_ty.elemType();
- if (array_type.zigTypeTag() != .Array) break :src_array_ptr;
- const array_elem_type = array_type.elemType();
- const dest_is_mut = !dest_ty.isConstPtr();
+ const array_ty = inst_ty.childType();
+ if (array_ty.zigTypeTag() != .Array) break :src_array_ptr;
+ const array_elem_type = array_ty.childType();
+ const dest_is_mut = dest_info.mutable;
if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr;
- if (inst_ty.isVolatilePtr() and !dest_ty.isVolatilePtr()) break :src_array_ptr;
- if (inst_ty.ptrAddressSpace() != dest_ty.ptrAddressSpace()) break :src_array_ptr;
+ if (inst_ty.isVolatilePtr() and !dest_info.@"volatile") break :src_array_ptr;
+ if (inst_ty.ptrAddressSpace() != dest_info.@"addrspace") break :src_array_ptr;
- const dst_elem_type = dest_ty.elemType();
+ const dst_elem_type = dest_info.pointee_type;
switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut, target)) {
.ok => {},
.no_match => break :src_array_ptr,
}
- switch (dest_ty.ptrSize()) {
+ switch (dest_info.size) {
.Slice => {
// *[N]T to []T
return sema.coerceArrayPtrToSlice(block, dest_ty, inst, inst_src);
},
.C => {
// *[N]T to [*c]T
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
},
.Many => {
// *[N]T to [*]T
// *[N:s]T to [*:s]T
// *[N:s]T to [*]T
- if (dest_ty.sentinel()) |dst_sentinel| {
- if (array_type.sentinel()) |src_sentinel| {
+ if (dest_info.sentinel) |dst_sentinel| {
+ if (array_ty.sentinel()) |src_sentinel| {
if (src_sentinel.eql(dst_sentinel, dst_elem_type)) {
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
}
} else {
- return sema.coerceArrayPtrToMany(block, dest_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
}
},
.One => {},
}
}
+
+ // coercion to C pointer
+ if (dest_info.size == .C) {
+ switch (inst_ty.zigTypeTag()) {
+ .Null => {
+ return sema.addConstant(dest_ty, Value.@"null");
+ },
+ .ComptimeInt => {
+ const addr = try sema.coerce(block, Type.usize, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
+ },
+ .Int => {
+ const ptr_size_ty = switch (inst_ty.intInfo(target).signedness) {
+ .signed => Type.isize,
+ .unsigned => Type.usize,
+ };
+ const addr = try sema.coerce(block, ptr_size_ty, inst, inst_src);
+ return sema.coerceCompatiblePtrs(block, dest_ty, addr, inst_src);
+ },
+ else => {},
+ }
+ }
+
+ // cast from *T and [*]T to *c_void
+ // but don't do it if the source type is a double pointer
+ if (dest_info.pointee_type.tag() == .c_void and inst_ty.zigTypeTag() == .Pointer and
+ inst_ty.childType().zigTypeTag() != .Pointer)
+ {
+ return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src);
+ }
},
.Int => {
// integer widening
@@ -11605,21 +12185,41 @@ fn coerce(
// T to E!T or E to E!T
return sema.wrapErrorUnion(block, dest_ty, inst, inst_src);
},
- .ErrorSet => {
- // Coercion to `anyerror`.
- // TODO If the dest type tag is not `anyerror` it still could
- // resolve to anyerror. `dest_ty` needs to have inferred error set resolution
- // happen before this check.
- if (dest_ty.tag() == .anyerror and inst_ty.zigTypeTag() == .ErrorSet) {
- return sema.coerceErrSetToAnyError(block, inst, inst_src);
- }
+ .ErrorSet => switch (inst_ty.zigTypeTag()) {
+ .ErrorSet => {
+ // Coercion to `anyerror`. Note that this check can return false positives
+ // in case the error sets did not get resolved.
+ if (dest_ty.isAnyError()) {
+ return sema.coerceCompatibleErrorSets(block, inst, inst_src);
+ }
+ // If both are inferred error sets of functions, and
+ // the dest includes the source function, the coercion is OK.
+ // This check is important because it works without forcing a full resolution
+ // of inferred error sets.
+ if (inst_ty.castTag(.error_set_inferred)) |src_payload| {
+ if (dest_ty.castTag(.error_set_inferred)) |dst_payload| {
+ const src_func = src_payload.data.func;
+ const dst_func = dst_payload.data.func;
+
+ if (src_func == dst_func or dst_payload.data.functions.contains(src_func)) {
+ return sema.coerceCompatibleErrorSets(block, inst, inst_src);
+ }
+ }
+ }
+ // TODO full error set resolution and compare sets by names.
+ },
+ else => {},
},
.Union => switch (inst_ty.zigTypeTag()) {
.Enum, .EnumLiteral => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
.Array => switch (inst_ty.zigTypeTag()) {
- .Vector => return sema.coerceVectorToArray(block, dest_ty, dest_ty_src, inst, inst_src),
+ .Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
+ else => {},
+ },
+ .Vector => switch (inst_ty.zigTypeTag()) {
+ .Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
else => {},
@@ -12064,6 +12664,121 @@ fn beginComptimePtrMutation(
}
}
+const ComptimePtrLoadKit = struct {
+ /// The Value of the Decl that owns this memory.
+ root_val: Value,
+ /// Parent Value.
+ val: Value,
+ /// The Type of the parent Value.
+ ty: Type,
+ /// The starting byte offset of `val` from `root_val`.
+ byte_offset: usize,
+ /// Whether the `root_val` could be mutated by further
+ /// semantic analysis and a copy must be performed.
+ is_mutable: bool,
+};
+
+const ComptimePtrLoadError = CompileError || error{
+ RuntimeLoad,
+};
+
+fn beginComptimePtrLoad(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ ptr_val: Value,
+) ComptimePtrLoadError!ComptimePtrLoadKit {
+ const target = sema.mod.getTarget();
+ switch (ptr_val.tag()) {
+ .decl_ref => {
+ const decl = ptr_val.castTag(.decl_ref).?.data;
+ const decl_val = try decl.value();
+ if (decl_val.tag() == .variable) return error.RuntimeLoad;
+ return ComptimePtrLoadKit{
+ .root_val = decl_val,
+ .val = decl_val,
+ .ty = decl.ty,
+ .byte_offset = 0,
+ .is_mutable = false,
+ };
+ },
+ .decl_ref_mut => {
+ const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
+ const decl_val = try decl.value();
+ if (decl_val.tag() == .variable) return error.RuntimeLoad;
+ return ComptimePtrLoadKit{
+ .root_val = decl_val,
+ .val = decl_val,
+ .ty = decl.ty,
+ .byte_offset = 0,
+ .is_mutable = true,
+ };
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, elem_ptr.array_ptr);
+ const elem_ty = parent.ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = try parent.val.elemValue(sema.arena, elem_ptr.index),
+ .ty = elem_ty,
+ .byte_offset = parent.byte_offset + elem_size * elem_ptr.index,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr);
+ const field_index = @intCast(u32, field_ptr.field_index);
+ try sema.resolveTypeLayout(block, src, parent.ty);
+ const field_offset = parent.ty.structFieldOffset(field_index, target);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = try parent.val.fieldValue(sema.arena, field_index),
+ .ty = parent.ty.structFieldType(field_index),
+ .byte_offset = parent.byte_offset + field_offset,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .eu_payload_ptr => {
+ const err_union_ptr = ptr_val.castTag(.eu_payload_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, err_union_ptr);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = parent.val.castTag(.eu_payload).?.data,
+ .ty = parent.ty.errorUnionPayload(),
+ .byte_offset = undefined,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+ .opt_payload_ptr => {
+ const opt_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
+ const parent = try beginComptimePtrLoad(sema, block, src, opt_ptr);
+ return ComptimePtrLoadKit{
+ .root_val = parent.root_val,
+ .val = parent.val.castTag(.opt_payload).?.data,
+ .ty = try parent.ty.optionalChildAlloc(sema.arena),
+ .byte_offset = undefined,
+ .is_mutable = parent.is_mutable,
+ };
+ },
+
+ .zero,
+ .one,
+ .int_u64,
+ .int_i64,
+ .int_big_positive,
+ .int_big_negative,
+ .variable,
+ .extern_fn,
+ .function,
+ => return error.RuntimeLoad,
+
+ else => unreachable,
+ }
+}
+
fn bitCast(
sema: *Sema,
block: *Block,
@@ -12079,7 +12794,7 @@ fn bitCast(
return sema.addConstant(dest_ty, result_val);
}
try sema.requireRuntimeBlock(block, inst_src);
- return block.addTyOp(.bitcast, dest_ty, inst);
+ return block.addBitCast(dest_ty, inst);
}
fn coerceArrayPtrToSlice(
@@ -12090,21 +12805,26 @@ fn coerceArrayPtrToSlice(
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
- // The comptime Value representation is compatible with both types.
- return sema.addConstant(dest_ty, val);
+ const ptr_array_ty = sema.typeOf(inst);
+ const array_ty = ptr_array_ty.childType();
+ const slice_val = try Value.Tag.slice.create(sema.arena, .{
+ .ptr = val,
+ .len = try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
+ });
+ return sema.addConstant(dest_ty, slice_val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addTyOp(.array_to_slice, dest_ty, inst);
}
-fn coerceArrayPtrToMany(
+fn coerceCompatiblePtrs(
sema: *Sema,
block: *Block,
dest_ty: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
+ if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
// The comptime Value representation is compatible with both types.
return sema.addConstant(dest_ty, val);
}
@@ -12187,7 +12907,7 @@ fn coerceEnumToUnion(
// If the union has all fields 0 bits, the union value is just the enum value.
if (union_ty.unionHasAllZeroBitFieldTypes()) {
- return block.addTyOp(.bitcast, union_ty, enum_tag);
+ return block.addBitCast(union_ty, enum_tag);
}
// TODO resolve the field names and add a hint that says "field 'foo' has type 'bar'"
@@ -12203,49 +12923,52 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
-fn coerceVectorToArray(
+// Coerces vectors/arrays which have the same in-memory layout. This can be used for
+// both coercing from and to vectors.
+fn coerceVectorInMemory(
sema: *Sema,
block: *Block,
- array_ty: Type,
- array_ty_src: LazySrcLoc,
- vector: Air.Inst.Ref,
- vector_src: LazySrcLoc,
+ dest_ty: Type,
+ dest_ty_src: LazySrcLoc,
+ inst: Air.Inst.Ref,
+ inst_src: LazySrcLoc,
) !Air.Inst.Ref {
- const vector_ty = sema.typeOf(vector);
- const array_len = array_ty.arrayLen();
- const vector_len = vector_ty.arrayLen();
- if (array_len != vector_len) {
+ const inst_ty = sema.typeOf(inst);
+ const inst_len = inst_ty.arrayLen();
+ const dest_len = dest_ty.arrayLen();
+
+ if (dest_len != inst_len) {
const msg = msg: {
- const msg = try sema.errMsg(block, vector_src, "expected {}, found {}", .{
- array_ty, vector_ty,
+ const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
+ dest_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
- try sema.errNote(block, array_ty_src, msg, "array has length {d}", .{array_len});
- try sema.errNote(block, vector_src, msg, "vector has length {d}", .{vector_len});
+ try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
+ try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const target = sema.mod.getTarget();
- const array_elem_ty = array_ty.childType();
- const vector_elem_ty = vector_ty.childType();
- const in_memory_result = coerceInMemoryAllowed(array_elem_ty, vector_elem_ty, false, target);
+ const dest_elem_ty = dest_ty.childType();
+ const inst_elem_ty = inst_ty.childType();
+ const in_memory_result = coerceInMemoryAllowed(dest_elem_ty, inst_elem_ty, false, target);
if (in_memory_result != .ok) {
// TODO recursive error notes for coerceInMemoryAllowed failure
- return sema.fail(block, vector_src, "expected {}, found {}", .{ array_ty, vector_ty });
+ return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
}
- if (try sema.resolveMaybeUndefVal(block, vector_src, vector)) |vector_val| {
+ if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| {
// These types share the same comptime value representation.
- return sema.addConstant(array_ty, vector_val);
+ return sema.addConstant(dest_ty, inst_val);
}
- try sema.requireRuntimeBlock(block, vector_src);
- return block.addTyOp(.bitcast, array_ty, vector);
+ try sema.requireRuntimeBlock(block, inst_src);
+ return block.addBitCast(dest_ty, inst);
}
-fn coerceErrSetToAnyError(
+fn coerceCompatibleErrorSets(
sema: *Sema,
block: *Block,
err_set: Air.Inst.Ref,
@@ -12256,7 +12979,13 @@ fn coerceErrSetToAnyError(
return sema.addConstant(Type.anyerror, err_set_val);
}
try sema.requireRuntimeBlock(block, err_set_src);
- return block.addTyOp(.bitcast, Type.anyerror, err_set);
+ return block.addInst(.{
+ .tag = .bitcast,
+ .data = .{ .ty_op = .{
+ .ty = Air.Inst.Ref.anyerror_type,
+ .operand = err_set,
+ } },
+ });
}
fn analyzeDeclVal(
@@ -12296,10 +13025,15 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref {
const decl_tv = try decl.typedValue();
if (decl_tv.val.castTag(.variable)) |payload| {
const variable = payload.data;
+ const alignment: u32 = if (decl.align_val.tag() == .null_value)
+ 0
+ else
+ @intCast(u32, decl.align_val.toUnsignedInt());
const ty = try Type.ptr(sema.arena, .{
.pointee_type = decl_tv.ty,
.mutable = variable.is_mutable,
.@"addrspace" = decl.@"addrspace",
+ .@"align" = alignment,
});
return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl));
}
@@ -12361,7 +13095,7 @@ fn analyzeLoad(
else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}),
};
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
- if (try ptr_val.pointerDeref(sema.arena)) |elem_val| {
+ if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
return sema.addConstant(elem_ty, elem_val);
}
}
@@ -12370,6 +13104,25 @@ fn analyzeLoad(
return block.addTyOp(.load, elem_ty, ptr);
}
+fn analyzeSlicePtr(
+ sema: *Sema,
+ block: *Block,
+ src: LazySrcLoc,
+ slice: Air.Inst.Ref,
+ slice_ty: Type,
+ slice_src: LazySrcLoc,
+) CompileError!Air.Inst.Ref {
+ const buf = try sema.arena.create(Type.SlicePtrFieldTypeBuffer);
+ const result_ty = slice_ty.slicePtrFieldType(buf);
+
+ if (try sema.resolveMaybeUndefVal(block, slice_src, slice)) |val| {
+ if (val.isUndef()) return sema.addConstUndef(result_ty);
+ return sema.addConstant(result_ty, val.slicePtr());
+ }
+ try sema.requireRuntimeBlock(block, src);
+ return block.addTyOp(.slice_ptr, result_ty, slice);
+}
+
fn analyzeSliceLen(
sema: *Sema,
block: *Block,
@@ -12441,74 +13194,128 @@ fn analyzeSlice(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- array_ptr: Air.Inst.Ref,
- start: Air.Inst.Ref,
- end_opt: Air.Inst.Ref,
+ ptr_ptr: Air.Inst.Ref,
+ uncasted_start: Air.Inst.Ref,
+ uncasted_end_opt: Air.Inst.Ref,
sentinel_opt: Air.Inst.Ref,
sentinel_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
- const array_ptr_ty = sema.typeOf(array_ptr);
- const ptr_child = switch (array_ptr_ty.zigTypeTag()) {
- .Pointer => array_ptr_ty.elemType(),
- else => return sema.fail(block, src, "expected pointer, found '{}'", .{array_ptr_ty}),
+ const ptr_src = src; // TODO better source location
+ const start_src = src; // TODO better source location
+ const end_src = src; // TODO better source location
+ // Slice expressions can operate on a variable whose type is an array. This requires
+ // the slice operand to be a pointer. In the case of a non-array, it will be a double pointer.
+ const ptr_ptr_ty = sema.typeOf(ptr_ptr);
+ const ptr_ptr_child_ty = switch (ptr_ptr_ty.zigTypeTag()) {
+ .Pointer => ptr_ptr_ty.elemType(),
+ else => return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ptr_ty}),
};
- var array_type = ptr_child;
- const elem_type = switch (ptr_child.zigTypeTag()) {
- .Array => ptr_child.elemType(),
- .Pointer => blk: {
- if (ptr_child.isSinglePointer()) {
- if (ptr_child.elemType().zigTypeTag() == .Array) {
- array_type = ptr_child.elemType();
- break :blk ptr_child.elemType().elemType();
+ var array_ty = ptr_ptr_child_ty;
+ var slice_ty = ptr_ptr_ty;
+ var ptr_or_slice = ptr_ptr;
+ var elem_ty = ptr_ptr_child_ty.childType();
+ switch (ptr_ptr_child_ty.zigTypeTag()) {
+ .Array => {},
+ .Pointer => {
+ if (ptr_ptr_child_ty.isSinglePointer()) {
+ const double_child_ty = ptr_ptr_child_ty.childType();
+ if (double_child_ty.zigTypeTag() == .Array) {
+ ptr_or_slice = try sema.analyzeLoad(block, src, ptr_ptr, ptr_src);
+ slice_ty = ptr_ptr_child_ty;
+ array_ty = double_child_ty;
+ elem_ty = double_child_ty.childType();
+ } else {
+ return sema.fail(block, ptr_src, "slice of single-item pointer", .{});
}
-
- return sema.fail(block, src, "slice of single-item pointer", .{});
}
- break :blk ptr_child.elemType();
},
- else => return sema.fail(block, src, "slice of non-array type '{}'", .{ptr_child}),
+ else => return sema.fail(block, ptr_src, "slice of non-array type '{}'", .{ptr_ptr_child_ty}),
+ }
+ const ptr = if (slice_ty.isSlice())
+ try sema.analyzeSlicePtr(block, src, ptr_or_slice, slice_ty, ptr_src)
+ else
+ ptr_or_slice;
+
+ const start = try sema.coerce(block, Type.usize, uncasted_start, start_src);
+ const new_ptr = try analyzePtrArithmetic(sema, block, src, ptr, start, .ptr_add, ptr_src, start_src);
+
+ const end = e: {
+ if (uncasted_end_opt != .none) {
+ break :e try sema.coerce(block, Type.usize, uncasted_end_opt, end_src);
+ }
+
+ if (array_ty.zigTypeTag() == .Array) {
+ break :e try sema.addConstant(
+ Type.usize,
+ try Value.Tag.int_u64.create(sema.arena, array_ty.arrayLen()),
+ );
+ } else if (slice_ty.isSlice()) {
+ break :e try sema.analyzeSliceLen(block, src, ptr_or_slice);
+ }
+ return sema.fail(block, end_src, "slice of pointer must include end value", .{});
};
const slice_sentinel = if (sentinel_opt != .none) blk: {
- const casted = try sema.coerce(block, elem_type, sentinel_opt, sentinel_src);
+ const casted = try sema.coerce(block, elem_ty, sentinel_opt, sentinel_src);
break :blk try sema.resolveConstValue(block, sentinel_src, casted);
} else null;
- var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
- var return_elem_type = elem_type;
- if (end_opt != .none) {
- if (try sema.resolveDefinedValue(block, src, end_opt)) |end_val| {
- if (try sema.resolveDefinedValue(block, src, start)) |start_val| {
- const start_u64 = start_val.toUnsignedInt();
- const end_u64 = end_val.toUnsignedInt();
- if (start_u64 > end_u64) {
- return sema.fail(block, src, "out of bounds slice", .{});
- }
+ const new_len = try sema.analyzeArithmetic(block, .sub, end, start, src, end_src, start_src);
- const len = end_u64 - start_u64;
- const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
- array_type.sentinel()
- else
- slice_sentinel;
- return_elem_type = try Type.array(sema.arena, len, array_sentinel, elem_type);
- return_ptr_size = .One;
- }
+ const opt_new_ptr_val = try sema.resolveDefinedValue(block, ptr_src, new_ptr);
+ const opt_new_len_val = try sema.resolveDefinedValue(block, src, new_len);
+
+ const new_ptr_ty_info = sema.typeOf(new_ptr).ptrInfo().data;
+
+ if (opt_new_len_val) |new_len_val| {
+ const new_len_int = new_len_val.toUnsignedInt();
+
+ const sentinel = if (array_ty.zigTypeTag() == .Array and new_len_int == array_ty.arrayLen())
+ array_ty.sentinel()
+ else
+ slice_sentinel;
+
+ const return_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = try Type.array(sema.arena, new_len_int, sentinel, elem_ty),
+ .sentinel = null,
+ .@"align" = new_ptr_ty_info.@"align",
+ .@"addrspace" = new_ptr_ty_info.@"addrspace",
+ .mutable = new_ptr_ty_info.mutable,
+ .@"allowzero" = new_ptr_ty_info.@"allowzero",
+ .@"volatile" = new_ptr_ty_info.@"volatile",
+ .size = .One,
+ });
+
+ if (opt_new_ptr_val) |new_ptr_val| {
+ return sema.addConstant(return_ty, new_ptr_val);
+ } else {
+ return block.addBitCast(return_ty, new_ptr);
}
}
- const return_type = try Type.ptr(sema.arena, .{
- .pointee_type = return_elem_type,
- .sentinel = if (end_opt == .none) slice_sentinel else null,
- .@"align" = 0, // TODO alignment
- .@"addrspace" = if (ptr_child.zigTypeTag() == .Pointer) ptr_child.ptrAddressSpace() else .generic,
- .mutable = !ptr_child.isConstPtr(),
- .@"allowzero" = ptr_child.isAllowzeroPtr(),
- .@"volatile" = ptr_child.isVolatilePtr(),
- .size = return_ptr_size,
+
+ const return_ty = try Type.ptr(sema.arena, .{
+ .pointee_type = elem_ty,
+ .sentinel = slice_sentinel,
+ .@"align" = new_ptr_ty_info.@"align",
+ .@"addrspace" = new_ptr_ty_info.@"addrspace",
+ .mutable = new_ptr_ty_info.mutable,
+ .@"allowzero" = new_ptr_ty_info.@"allowzero",
+ .@"volatile" = new_ptr_ty_info.@"volatile",
+ .size = .Slice,
});
- _ = return_type;
- return sema.fail(block, src, "TODO implement analysis of slice", .{});
+ try sema.requireRuntimeBlock(block, src);
+ return block.addInst(.{
+ .tag = .slice,
+ .data = .{ .ty_pl = .{
+ .ty = try sema.addType(return_ty),
+ .payload = try sema.addExtra(Air.Bin{
+ .lhs = new_ptr,
+ .rhs = new_len,
+ }),
+ } },
+ });
}
/// Asserts that lhs and rhs types are both numeric.
@@ -12732,47 +13539,30 @@ fn wrapErrorUnion(
}
switch (dest_err_set_ty.tag()) {
.anyerror => {},
- .error_set_single => {
+ .error_set_single => ok: {
const expected_name = val.castTag(.@"error").?.data.name;
const n = dest_err_set_ty.castTag(.error_set_single).?.data;
- if (!mem.eql(u8, expected_name, n)) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
- }
+ if (mem.eql(u8, expected_name, n)) break :ok;
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
- .error_set => {
+ .error_set => ok: {
const expected_name = val.castTag(.@"error").?.data.name;
const error_set = dest_err_set_ty.castTag(.error_set).?.data;
const names = error_set.names_ptr[0..error_set.names_len];
// TODO this is O(N). I'm putting off solving this until we solve inferred
// error sets at the same time.
- const found = for (names) |name| {
- if (mem.eql(u8, expected_name, name)) break true;
- } else false;
- if (!found) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
+ for (names) |name| {
+ if (mem.eql(u8, expected_name, name)) break :ok;
}
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
- .error_set_inferred => {
+ .error_set_inferred => ok: {
+ const err_set_payload = dest_err_set_ty.castTag(.error_set_inferred).?.data;
+ if (err_set_payload.is_anyerror) break :ok;
const expected_name = val.castTag(.@"error").?.data.name;
- const map = &dest_err_set_ty.castTag(.error_set_inferred).?.data.map;
- if (!map.contains(expected_name)) {
- return sema.fail(
- block,
- inst_src,
- "expected type '{}', found type '{}'",
- .{ dest_err_set_ty, inst_ty },
- );
- }
+ if (err_set_payload.map.contains(expected_name)) break :ok;
+ // TODO error set resolution here before emitting a compile error
+ return sema.failWithErrorSetCodeMissing(block, inst_src, dest_err_set_ty, inst_ty);
},
else => unreachable,
}
@@ -12794,15 +13584,18 @@ fn wrapErrorUnion(
fn unionToTag(
sema: *Sema,
block: *Block,
- dest_ty: Type,
+ enum_ty: Type,
un: Air.Inst.Ref,
un_src: LazySrcLoc,
) !Air.Inst.Ref {
+ if ((try sema.typeHasOnePossibleValue(block, un_src, enum_ty))) |opv| {
+ return sema.addConstant(enum_ty, opv);
+ }
if (try sema.resolveMaybeUndefVal(block, un_src, un)) |un_val| {
- return sema.addConstant(dest_ty, un_val.unionTag());
+ return sema.addConstant(enum_ty, un_val.unionTag());
}
try sema.requireRuntimeBlock(block, un_src);
- return block.addTyOp(.get_union_tag, dest_ty, un);
+ return block.addTyOp(.get_union_tag, enum_ty, un);
}
fn resolvePeerTypes(
@@ -12831,114 +13624,127 @@ fn resolvePeerTypes(
const candidate_ty_tag = candidate_ty.zigTypeTag();
const chosen_ty_tag = chosen_ty.zigTypeTag();
- if (candidate_ty_tag == .NoReturn)
- continue;
- if (chosen_ty_tag == .NoReturn) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (candidate_ty_tag == .Undefined)
- continue;
- if (chosen_ty_tag == .Undefined) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (chosen_ty.isInt() and
- candidate_ty.isInt() and
- chosen_ty.isSignedInt() == candidate_ty.isSignedInt())
- {
- if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- }
- continue;
- }
- if (chosen_ty.isRuntimeFloat() and candidate_ty.isRuntimeFloat()) {
- if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- }
- continue;
- }
-
- if (chosen_ty_tag == .ComptimeInt and candidate_ty.isInt()) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
-
- if (chosen_ty.isInt() and candidate_ty_tag == .ComptimeInt) {
- continue;
- }
-
- if ((chosen_ty_tag == .ComptimeFloat or chosen_ty_tag == .ComptimeInt) and
- candidate_ty.isRuntimeFloat())
- {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (chosen_ty.isRuntimeFloat() and
- (candidate_ty_tag == .ComptimeFloat or candidate_ty_tag == .ComptimeInt))
- {
- continue;
- }
-
- if (chosen_ty_tag == .Enum and candidate_ty_tag == .EnumLiteral) {
- continue;
- }
- if (chosen_ty_tag == .EnumLiteral and candidate_ty_tag == .Enum) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
+ switch (candidate_ty_tag) {
+ .NoReturn, .Undefined => continue,
- if (chosen_ty_tag == .ComptimeFloat and candidate_ty_tag == .ComptimeInt)
- continue;
- if (chosen_ty_tag == .ComptimeInt and candidate_ty_tag == .ComptimeFloat) {
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
+ .Null => {
+ any_are_null = true;
+ continue;
+ },
- if (chosen_ty_tag == .Null) {
- any_are_null = true;
- chosen = candidate;
- chosen_i = candidate_i + 1;
- continue;
- }
- if (candidate_ty_tag == .Null) {
- any_are_null = true;
- continue;
+ .Int => switch (chosen_ty_tag) {
+ .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ .Int => {
+ if (chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) {
+ if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ }
+ continue;
+ }
+ },
+ .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ else => {},
+ },
+ .ComptimeInt => switch (chosen_ty_tag) {
+ .Int, .Float, .ComptimeFloat => continue,
+ .Pointer => if (chosen_ty.ptrSize() == .C) continue,
+ else => {},
+ },
+ .Float => switch (chosen_ty_tag) {
+ .Float => {
+ if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ }
+ continue;
+ },
+ .ComptimeFloat, .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .ComptimeFloat => switch (chosen_ty_tag) {
+ .Float => continue,
+ .ComptimeInt => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .Enum => switch (chosen_ty_tag) {
+ .EnumLiteral => {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ },
+ else => {},
+ },
+ .EnumLiteral => switch (chosen_ty_tag) {
+ .Enum => continue,
+ else => {},
+ },
+ .Pointer => {
+ if (candidate_ty.ptrSize() == .C) {
+ if (chosen_ty_tag == .Int or chosen_ty_tag == .ComptimeInt) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ if (chosen_ty_tag == .Pointer and chosen_ty.ptrSize() != .Slice) {
+ continue;
+ }
+ }
+ },
+ .Optional => {
+ var opt_child_buf: Type.Payload.ElemType = undefined;
+ const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
+ if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) {
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) {
+ any_are_null = true;
+ continue;
+ }
+ },
+ else => {},
}
- if (chosen_ty_tag == .Optional) {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
- if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) {
- continue;
- }
- if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) {
- any_are_null = true;
+ switch (chosen_ty_tag) {
+ .NoReturn, .Undefined => {
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
- }
- }
- if (candidate_ty_tag == .Optional) {
- var opt_child_buf: Type.Payload.ElemType = undefined;
- const opt_child_ty = candidate_ty.optionalChild(&opt_child_buf);
- if (coerceInMemoryAllowed(opt_child_ty, chosen_ty, false, target) == .ok) {
+ },
+ .Null => {
+ any_are_null = true;
chosen = candidate;
chosen_i = candidate_i + 1;
continue;
- }
- if (coerceInMemoryAllowed(chosen_ty, opt_child_ty, false, target) == .ok) {
- any_are_null = true;
- continue;
- }
+ },
+ .Optional => {
+ var opt_child_buf: Type.Payload.ElemType = undefined;
+ const opt_child_ty = chosen_ty.optionalChild(&opt_child_buf);
+ if (coerceInMemoryAllowed(opt_child_ty, candidate_ty, false, target) == .ok) {
+ continue;
+ }
+ if (coerceInMemoryAllowed(candidate_ty, opt_child_ty, false, target) == .ok) {
+ any_are_null = true;
+ chosen = candidate;
+ chosen_i = candidate_i + 1;
+ continue;
+ }
+ },
+ else => {},
}
// At this point, we hit a compile error. We need to recover
@@ -13020,6 +13826,19 @@ pub fn resolveTypeLayout(
}
union_obj.status = .have_layout;
},
+ .Array => {
+ const elem_ty = ty.childType();
+ return sema.resolveTypeLayout(block, src, elem_ty);
+ },
+ .Optional => {
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = ty.optionalChild(&buf);
+ return sema.resolveTypeLayout(block, src, payload_ty);
+ },
+ .ErrorUnion => {
+ const payload_ty = ty.errorUnionPayload();
+ return sema.resolveTypeLayout(block, src, payload_ty);
+ },
else => {},
}
}
@@ -13572,10 +14391,9 @@ fn typeHasOnePossibleValue(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
- starting_type: Type,
+ ty: Type,
) CompileError!?Value {
- var ty = starting_type;
- while (true) switch (ty.tag()) {
+ switch (ty.tag()) {
.f16,
.f32,
.f64,
@@ -13669,7 +14487,7 @@ fn typeHasOnePossibleValue(
const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
- return Value.initTag(.zero); // auto-numbered
+ return Value.zero; // auto-numbered
} else {
return enum_obj.values.keys()[0];
}
@@ -13682,7 +14500,7 @@ fn typeHasOnePossibleValue(
const enum_obj = resolved_ty.castTag(.enum_full).?.data;
if (enum_obj.fields.count() == 1) {
if (enum_obj.values.count() == 0) {
- return Value.initTag(.zero); // auto-numbered
+ return Value.zero; // auto-numbered
} else {
return enum_obj.values.keys()[0];
}
@@ -13694,12 +14512,19 @@ fn typeHasOnePossibleValue(
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_simple = resolved_ty.castTag(.enum_simple).?.data;
if (enum_simple.fields.count() == 1) {
- return Value.initTag(.zero);
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .enum_nonexhaustive => {
+ const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
+ if (!tag_ty.hasCodeGenBits()) {
+ return Value.zero;
} else {
return null;
}
},
- .enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty,
.@"union" => {
return null; // TODO
},
@@ -13710,12 +14535,12 @@ fn typeHasOnePossibleValue(
.empty_struct, .empty_struct_literal => return Value.initTag(.empty_struct_value),
.void => return Value.void,
.noreturn => return Value.initTag(.unreachable_value),
- .@"null" => return Value.initTag(.null_value),
+ .@"null" => return Value.@"null",
.@"undefined" => return Value.initTag(.undef),
.int_unsigned, .int_signed => {
if (ty.cast(Type.Payload.Bits).?.data == 0) {
- return Value.initTag(.zero);
+ return Value.zero;
} else {
return null;
}
@@ -13723,14 +14548,16 @@ fn typeHasOnePossibleValue(
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
- ty = ty.elemType();
- continue;
+ if ((try sema.typeHasOnePossibleValue(block, src, ty.elemType())) != null) {
+ return Value.initTag(.the_only_possible_value);
+ }
+ return null;
},
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.generic_poison => return error.GenericPoison,
- };
+ }
}
fn getAstTree(sema: *Sema, block: *Block) CompileError!*const std.zig.Ast {
@@ -13934,14 +14761,22 @@ fn analyzeComptimeAlloc(
sema: *Sema,
block: *Block,
var_type: Type,
+ alignment: u32,
) CompileError!Air.Inst.Ref {
const ptr_type = try Type.ptr(sema.arena, .{
.pointee_type = var_type,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant),
+ .@"align" = alignment,
});
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
+
+ const align_val = if (alignment == 0)
+ Value.@"null"
+ else
+ try Value.Tag.int_u64.create(anon_decl.arena(), alignment);
+
const decl = try anon_decl.finish(
try var_type.copy(anon_decl.arena()),
// There will be stores before the first load, but they may be to sub-elements or
@@ -13949,6 +14784,8 @@ fn analyzeComptimeAlloc(
// into fields/elements and have those overridden with stored values.
Value.undef,
);
+ decl.align_val = align_val;
+
try sema.mod.declareDeclDependency(sema.owner_decl, decl);
return sema.addConstant(ptr_type, try Value.Tag.decl_ref_mut.create(sema.arena, .{
.runtime_index = block.runtime_index,
@@ -14010,3 +14847,45 @@ pub fn analyzeAddrspace(
return address_space;
}
+
+/// Asserts the value is a pointer and dereferences it.
+/// Returns `null` if the pointer contents cannot be loaded at comptime.
+fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr_ty: Type) CompileError!?Value {
+ const target = sema.mod.getTarget();
+ const load_ty = ptr_ty.childType();
+ const parent = sema.beginComptimePtrLoad(block, src, ptr_val) catch |err| switch (err) {
+ error.RuntimeLoad => return null,
+ else => |e| return e,
+ };
+ // We have a Value that lines up in virtual memory exactly with what we want to load.
+ // If the Type is in-memory coercable to `load_ty`, it may be returned without modifications.
+ const coerce_in_mem_ok =
+ coerceInMemoryAllowed(load_ty, parent.ty, false, target) == .ok or
+ coerceInMemoryAllowed(parent.ty, load_ty, false, target) == .ok;
+ if (coerce_in_mem_ok) {
+ if (parent.is_mutable) {
+ // The decl whose value we are obtaining here may be overwritten with
+ // a different value upon further semantic analysis, which would
+ // invalidate this memory. So we must copy here.
+ return try parent.val.copy(sema.arena);
+ }
+ return parent.val;
+ }
+
+ // The type is not in-memory coercable, so it must be bitcasted according
+ // to the pointer type we are performing the load through.
+
+ // TODO emit a compile error if the types are not allowed to be bitcasted
+
+ if (parent.ty.abiSize(target) >= load_ty.abiSize(target)) {
+ // The Type it is stored as in the compiler has an ABI size greater or equal to
+ // the ABI size of `load_ty`. We may perform the bitcast based on
+ // `parent.val` alone (more efficient).
+ return try parent.val.bitCast(parent.ty, load_ty, target, sema.gpa, sema.arena);
+ }
+
+ // The Type it is stored as in the compiler has an ABI size less than the ABI size
+ // of `load_ty`. The bitcast must be performed based on the `parent.root_val`
+ // and reinterpreted starting at `parent.byte_offset`.
+ return sema.fail(block, src, "TODO: implement bitcast with index offset", .{});
+}
diff --git a/src/Zir.zig b/src/Zir.zig
index e45aac1a6f..7e5937e40d 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -72,6 +72,7 @@ pub fn extraData(code: Zir, comptime T: type, index: usize) struct { data: T, en
Inst.Ref => @intToEnum(Inst.Ref, code.extra[i]),
i32 => @bitCast(i32, code.extra[i]),
Inst.Call.Flags => @bitCast(Inst.Call.Flags, code.extra[i]),
+ Inst.SwitchBlock.Bits => @bitCast(Inst.SwitchBlock.Bits, code.extra[i]),
else => @compileError("bad field type"),
};
i += 1;
@@ -239,10 +240,6 @@ pub const Inst = struct {
/// Reinterpret the memory representation of a value as a different type.
/// Uses the pl_node field with payload `Bin`.
bitcast,
- /// A typed result location pointer is bitcasted to a new result location pointer.
- /// The new result location pointer has an inferred type.
- /// Uses the pl_node field with payload `Bin`.
- bitcast_result_ptr,
/// Bitwise NOT. `~`
/// Uses `un_node`.
bit_not,
@@ -481,6 +478,7 @@ pub const Inst = struct {
/// Includes a token source location.
/// Uses the `un_tok` union field.
/// The operand needs to get coerced to the function's return type.
+ /// TODO rename this to `ret_tok` because coercion is now done unconditionally in Sema.
ret_coerce,
/// Sends control flow back to the function's callee.
/// The return operand is `error.foo` where `foo` is given by the string.
@@ -546,9 +544,6 @@ pub const Inst = struct {
/// Returns the type of a value.
/// Uses the `un_node` field.
typeof,
- /// Given a value which is a pointer, returns the element type.
- /// Uses the `un_node` field.
- typeof_elem,
/// Given a value, look at the type of it, which must be an integer type.
/// Returns the integer type for the RHS of a shift operation.
/// Uses the `un_node` field.
@@ -618,39 +613,16 @@ pub const Inst = struct {
enum_literal,
/// A switch expression. Uses the `pl_node` union field.
/// AST node is the switch, payload is `SwitchBlock`.
- /// All prongs of target handled.
switch_block,
- /// Same as switch_block, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_multi,
- /// Same as switch_block, except has an else prong.
- switch_block_else,
- /// Same as switch_block_else, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_else_multi,
- /// Same as switch_block, except has an underscore prong.
- switch_block_under,
- /// Same as switch_block, except one or more prongs have multiple items.
- /// Payload is `SwitchBlockMulti`
- switch_block_under_multi,
- /// Same as `switch_block` but the target is a pointer to the value being switched on.
- switch_block_ref,
- /// Same as `switch_block_multi` but the target is a pointer to the value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_multi,
- /// Same as `switch_block_else` but the target is a pointer to the value being switched on.
- switch_block_ref_else,
- /// Same as `switch_block_else_multi` but the target is a pointer to the
- /// value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_else_multi,
- /// Same as `switch_block_under` but the target is a pointer to the value
- /// being switched on.
- switch_block_ref_under,
- /// Same as `switch_block_under_multi` but the target is a pointer to
- /// the value being switched on.
- /// Payload is `SwitchBlockMulti`
- switch_block_ref_under_multi,
+ /// Produces the value that will be switched on. For example, for
+ /// integers, it returns the integer with no modifications. For tagged unions, it
+ /// returns the active enum tag.
+ /// Uses the `un_node` union field.
+ switch_cond,
+ /// Same as `switch_cond`, except the input operand is a pointer to
+ /// what will be switched on.
+ /// Uses the `un_node` union field.
+ switch_cond_ref,
/// Produces the capture value for a switch prong.
/// Uses the `switch_capture` field.
switch_capture,
@@ -998,7 +970,6 @@ pub const Inst = struct {
.as_node,
.bit_and,
.bitcast,
- .bitcast_result_ptr,
.bit_or,
.block,
.block_inline,
@@ -1071,7 +1042,6 @@ pub const Inst = struct {
.negate,
.negate_wrap,
.typeof,
- .typeof_elem,
.xor,
.optional_type,
.optional_payload_safe,
@@ -1109,17 +1079,8 @@ pub const Inst = struct {
.switch_capture_else,
.switch_capture_else_ref,
.switch_block,
- .switch_block_multi,
- .switch_block_else,
- .switch_block_else_multi,
- .switch_block_under,
- .switch_block_under_multi,
- .switch_block_ref,
- .switch_block_ref_multi,
- .switch_block_ref_else,
- .switch_block_ref_else_multi,
- .switch_block_ref_under,
- .switch_block_ref_under_multi,
+ .switch_cond,
+ .switch_cond_ref,
.validate_struct_init,
.validate_array_init,
.struct_init_empty,
@@ -1265,7 +1226,6 @@ pub const Inst = struct {
.as_node = .pl_node,
.bit_and = .pl_node,
.bitcast = .pl_node,
- .bitcast_result_ptr = .pl_node,
.bit_not = .un_node,
.bit_or = .pl_node,
.block = .pl_node,
@@ -1348,7 +1308,6 @@ pub const Inst = struct {
.negate = .un_node,
.negate_wrap = .un_node,
.typeof = .un_node,
- .typeof_elem = .un_node,
.typeof_log2_int_type = .un_node,
.log2_int_type = .un_node,
.@"unreachable" = .@"unreachable",
@@ -1367,17 +1326,8 @@ pub const Inst = struct {
.ensure_err_payload_void = .un_tok,
.enum_literal = .str_tok,
.switch_block = .pl_node,
- .switch_block_multi = .pl_node,
- .switch_block_else = .pl_node,
- .switch_block_else_multi = .pl_node,
- .switch_block_under = .pl_node,
- .switch_block_under_multi = .pl_node,
- .switch_block_ref = .pl_node,
- .switch_block_ref_multi = .pl_node,
- .switch_block_ref_else = .pl_node,
- .switch_block_ref_else_multi = .pl_node,
- .switch_block_ref_under = .pl_node,
- .switch_block_ref_under_multi = .pl_node,
+ .switch_cond = .un_node,
+ .switch_cond_ref = .un_node,
.switch_capture = .switch_capture,
.switch_capture_ref = .switch_capture,
.switch_capture_multi = .switch_capture,
@@ -2466,37 +2416,17 @@ pub const Inst = struct {
index: u32,
};
- /// This form is supported when there are no ranges, and exactly 1 item per block.
- /// Depending on zir tag and len fields, extra fields trail
- /// this one in the extra array.
- /// 0. else_body { // If the tag has "_else" or "_under" in it.
+ /// 0. multi_cases_len: u32 // If has_multi_cases is set.
+ /// 1. else_body { // If has_else or has_under is set.
/// body_len: u32,
/// body member Index for every body_len
/// }
- /// 1. cases: {
+ /// 2. scalar_cases: { // for every scalar_cases_len
/// item: Ref,
/// body_len: u32,
/// body member Index for every body_len
- /// } for every cases_len
- pub const SwitchBlock = struct {
- operand: Ref,
- cases_len: u32,
- };
-
- /// This form is required when there exists a block which has more than one item,
- /// or a range.
- /// Depending on zir tag and len fields, extra fields trail
- /// this one in the extra array.
- /// 0. else_body { // If the tag has "_else" or "_under" in it.
- /// body_len: u32,
- /// body member Index for every body_len
/// }
- /// 1. scalar_cases: { // for every scalar_cases_len
- /// item: Ref,
- /// body_len: u32,
- /// body member Index for every body_len
- /// }
- /// 2. multi_cases: { // for every multi_cases_len
+ /// 3. multi_cases: { // for every multi_cases_len
/// items_len: u32,
/// ranges_len: u32,
/// body_len: u32,
@@ -2507,10 +2437,88 @@ pub const Inst = struct {
/// }
/// body member Index for every body_len
/// }
- pub const SwitchBlockMulti = struct {
+ pub const SwitchBlock = struct {
+ /// This is always a `switch_cond` or `switch_cond_ref` instruction.
+ /// If it is a `switch_cond_ref` instruction, bits.is_ref is always true.
+ /// If it is a `switch_cond` instruction, bits.is_ref is always false.
+ /// Both `switch_cond` and `switch_cond_ref` return a value, not a pointer,
+ /// that is useful for the case items, but cannot be used for capture values.
+ /// For the capture values, Sema is expected to find the operand of this operand
+ /// and use that.
operand: Ref,
- scalar_cases_len: u32,
- multi_cases_len: u32,
+ bits: Bits,
+
+ pub const Bits = packed struct {
+ /// If true, one or more prongs have multiple items.
+ has_multi_cases: bool,
+ /// If true, there is an else prong. This is mutually exclusive with `has_under`.
+ has_else: bool,
+ /// If true, there is an underscore prong. This is mutually exclusive with `has_else`.
+ has_under: bool,
+ /// If true, the `operand` is a pointer to the value being switched on.
+ /// TODO this flag is redundant with the tag of operand and can be removed.
+ is_ref: bool,
+ scalar_cases_len: ScalarCasesLen,
+
+ pub const ScalarCasesLen = u28;
+
+ pub fn specialProng(bits: Bits) SpecialProng {
+ const has_else: u2 = @boolToInt(bits.has_else);
+ const has_under: u2 = @boolToInt(bits.has_under);
+ return switch ((has_else << 1) | has_under) {
+ 0b00 => .none,
+ 0b01 => .under,
+ 0b10 => .@"else",
+ 0b11 => unreachable,
+ };
+ }
+ };
+
+ pub const ScalarProng = struct {
+ item: Ref,
+ body: []const Index,
+ };
+
+ /// TODO performance optimization: instead of having this helper method
+ /// change the definition of switch_capture instruction to store extra_index
+ /// instead of prong_index. This way, Sema won't be doing O(N^2) iterations
+ /// over the switch prongs.
+ pub fn getScalarProng(
+ self: SwitchBlock,
+ zir: Zir,
+ extra_end: usize,
+ prong_index: usize,
+ ) ScalarProng {
+ var extra_index: usize = extra_end;
+
+ if (self.bits.has_multi_cases) {
+ extra_index += 1;
+ }
+
+ if (self.bits.specialProng() != .none) {
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+ }
+
+ var scalar_i: usize = 0;
+ while (true) : (scalar_i += 1) {
+ const item = @intToEnum(Ref, zir.extra[extra_index]);
+ extra_index += 1;
+ const body_len = zir.extra[extra_index];
+ extra_index += 1;
+ const body = zir.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
+ if (scalar_i < prong_index) continue;
+
+ return .{
+ .item = item,
+ .body = body,
+ };
+ }
+ }
};
pub const Field = struct {
@@ -2934,7 +2942,7 @@ pub const Inst = struct {
/// Trailing: for each `imports_len` there is an Item
pub const Imports = struct {
- imports_len: Zir.Inst.Index,
+ imports_len: Inst.Index,
pub const Item = struct {
/// null terminated string index
@@ -3077,7 +3085,7 @@ pub fn declIteratorInner(zir: Zir, extra_index: usize, decls_len: u32) DeclItera
/// The iterator would have to allocate memory anyway to iterate. So here we populate
/// an ArrayList as the result.
-pub fn findDecls(zir: Zir, list: *std.ArrayList(Zir.Inst.Index), decl_sub_index: u32) !void {
+pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_sub_index: u32) !void {
const block_inst = zir.extra[decl_sub_index + 6];
list.clearRetainingCapacity();
@@ -3086,8 +3094,8 @@ pub fn findDecls(zir: Zir, list: *std.ArrayList(Zir.Inst.Index), decl_sub_index:
fn findDeclsInner(
zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
+ list: *std.ArrayList(Inst.Index),
+ inst: Inst.Index,
) Allocator.Error!void {
const tags = zir.instructions.items(.tag);
const datas = zir.instructions.items(.data);
@@ -3148,19 +3156,7 @@ fn findDeclsInner(
try zir.findDeclsBody(list, then_body);
try zir.findDeclsBody(list, else_body);
},
- .switch_block => return findDeclsSwitch(zir, list, inst, .none),
- .switch_block_else => return findDeclsSwitch(zir, list, inst, .@"else"),
- .switch_block_under => return findDeclsSwitch(zir, list, inst, .under),
- .switch_block_ref => return findDeclsSwitch(zir, list, inst, .none),
- .switch_block_ref_else => return findDeclsSwitch(zir, list, inst, .@"else"),
- .switch_block_ref_under => return findDeclsSwitch(zir, list, inst, .under),
-
- .switch_block_multi => return findDeclsSwitchMulti(zir, list, inst, .none),
- .switch_block_else_multi => return findDeclsSwitchMulti(zir, list, inst, .@"else"),
- .switch_block_under_multi => return findDeclsSwitchMulti(zir, list, inst, .under),
- .switch_block_ref_multi => return findDeclsSwitchMulti(zir, list, inst, .none),
- .switch_block_ref_else_multi => return findDeclsSwitchMulti(zir, list, inst, .@"else"),
- .switch_block_ref_under_multi => return findDeclsSwitchMulti(zir, list, inst, .under),
+ .switch_block => return findDeclsSwitch(zir, list, inst),
.suspend_block => @panic("TODO iterate suspend block"),
@@ -3170,71 +3166,34 @@ fn findDeclsInner(
fn findDeclsSwitch(
zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
- special_prong: SpecialProng,
+ list: *std.ArrayList(Inst.Index),
+ inst: Inst.Index,
) Allocator.Error!void {
const inst_data = zir.instructions.items(.data)[inst].pl_node;
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = zir.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = zir.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
- try zir.findDeclsBody(list, special.body);
+ var extra_index: usize = extra.end;
- var extra_index: usize = special.end;
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = zir.extra[extra_index];
extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
+
+ const special_prong = extra.data.bits.specialProng();
+ if (special_prong != .none) {
const body_len = zir.extra[extra_index];
extra_index += 1;
const body = zir.extra[extra_index..][0..body_len];
- extra_index += body_len;
+ extra_index += body.len;
try zir.findDeclsBody(list, body);
}
-}
-
-fn findDeclsSwitchMulti(
- zir: Zir,
- list: *std.ArrayList(Zir.Inst.Index),
- inst: Zir.Inst.Index,
- special_prong: SpecialProng,
-) Allocator.Error!void {
- const inst_data = zir.instructions.items(.data)[inst].pl_node;
- const extra = zir.extraData(Inst.SwitchBlockMulti, inst_data.payload_index);
- const special: struct {
- body: []const Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = zir.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = zir.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
- try zir.findDeclsBody(list, special.body);
-
- var extra_index: usize = special.end;
{
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
- while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
extra_index += 1;
const body_len = zir.extra[extra_index];
extra_index += 1;
@@ -3246,7 +3205,7 @@ fn findDeclsSwitchMulti(
}
{
var multi_i: usize = 0;
- while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = zir.extra[extra_index];
extra_index += 1;
const ranges_len = zir.extra[extra_index];
@@ -3353,3 +3312,18 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
.total_params_len = total_params_len,
};
}
+
+const ref_start_index: u32 = Inst.Ref.typed_value_map.len;
+
+pub fn indexToRef(inst: Inst.Index) Inst.Ref {
+ return @intToEnum(Inst.Ref, ref_start_index + inst);
+}
+
+pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
+ const ref_int = @enumToInt(inst);
+ if (ref_int >= ref_start_index) {
+ return ref_int - ref_start_index;
+ } else {
+ return null;
+ }
+}
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 73ada3a9ca..2c6feec70c 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -410,13 +410,15 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
+
+ .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -494,12 +496,14 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
+ .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst),
+ .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst),
+
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
@@ -871,6 +875,13 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+}
+
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement add for {}", .{self.target.cpu.arch});
@@ -1057,6 +1068,18 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
+fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+}
+
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1064,16 +1087,16 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch});
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
+fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_slice_elem_val for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1091,13 +1114,6 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
-fn airPtrPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_ptr_elem_val for {}", .{self.target.cpu.arch});
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
-}
-
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
_ = bin_op;
diff --git a/src/clang.zig b/src/clang.zig
index 430c9093f2..8a8d794e41 100644
--- a/src/clang.zig
+++ b/src/clang.zig
@@ -536,6 +536,9 @@ pub const FunctionDecl = opaque {
pub const isInlineSpecified = ZigClangFunctionDecl_isInlineSpecified;
extern fn ZigClangFunctionDecl_isInlineSpecified(*const FunctionDecl) bool;
+ pub const hasAlwaysInlineAttr = ZigClangFunctionDecl_hasAlwaysInlineAttr;
+ extern fn ZigClangFunctionDecl_hasAlwaysInlineAttr(*const FunctionDecl) bool;
+
pub const isDefined = ZigClangFunctionDecl_isDefined;
extern fn ZigClangFunctionDecl_isDefined(*const FunctionDecl) bool;
diff --git a/src/codegen.zig b/src/codegen.zig
index 0371f32a8a..5f5ee1b549 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -758,13 +758,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.shl, .shl_exact => try self.airShl(inst),
.shl_sat => try self.airShlSat(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
+
+ .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
.cmp_lt => try self.airCmp(inst, .lt),
.cmp_lte => try self.airCmp(inst, .lte),
@@ -842,12 +844,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
+ .ptr_slice_len_ptr => try self.airPtrSliceLenPtr(inst),
+ .ptr_slice_ptr_ptr => try self.airPtrSlicePtrPtr(inst),
+
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
@@ -1241,6 +1245,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
+ fn airSlice(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ }
+
fn airAdd(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@@ -1498,6 +1511,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
+ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{self.target.cpu.arch}),
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
+ }
+
fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -1507,19 +1536,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
- fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}),
+ else => return self.fail("TODO implement slice_elem_ptr for {}", .{self.target.cpu.arch}),
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
- fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
+ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement ptr_slice_elem_val for {}", .{self.target.cpu.arch}),
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
+ else => return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
@@ -1542,15 +1571,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
- fn airPtrPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
- const is_volatile = false; // TODO
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else switch (arch) {
- else => return self.fail("TODO implement ptr_ptr_elem_val for {}", .{self.target.cpu.arch}),
- };
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
- }
-
fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = switch (arch) {
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index ad98dc87c1..dd71590566 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -976,7 +976,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul => try airBinOp (f, inst, " * "),
// TODO use a different strategy for div that communicates to the optimizer
// that wrapping is UB.
- .div => try airBinOp( f, inst, " / "),
+ .div_float, .div_exact, .div_trunc => try airBinOp( f, inst, " / "),
+ .div_floor => try airBinOp( f, inst, " divfloor "),
.rem => try airBinOp( f, inst, " % "),
.mod => try airBinOp( f, inst, " mod "), // TODO implement modulus division
@@ -992,6 +993,8 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.min => try airMinMax(f, inst, "<"),
.max => try airMinMax(f, inst, ">"),
+ .slice => try airSlice(f, inst),
+
.cmp_eq => try airBinOp(f, inst, " == "),
.cmp_gt => try airBinOp(f, inst, " > "),
.cmp_gte => try airBinOp(f, inst, " >= "),
@@ -1075,11 +1078,13 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.slice_ptr => try airSliceField(f, inst, ".ptr;\n"),
.slice_len => try airSliceField(f, inst, ".len;\n"),
+ .ptr_slice_len_ptr => try airPtrSliceFieldPtr(f, inst, ".len;\n"),
+ .ptr_slice_ptr_ptr => try airPtrSliceFieldPtr(f, inst, ".ptr;\n"),
+
.ptr_elem_val => try airPtrElemVal(f, inst, "["),
- .ptr_ptr_elem_val => try airPtrElemVal(f, inst, "[0]["),
.ptr_elem_ptr => try airPtrElemPtr(f, inst),
.slice_elem_val => try airSliceElemVal(f, inst, "["),
- .ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["),
+ .slice_elem_ptr => try airSliceElemPtr(f, inst),
.array_elem_val => try airArrayElemVal(f, inst),
.unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst),
@@ -1101,8 +1106,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
}
fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+ if (f.liveness.isUnused(inst)) return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
@@ -1114,6 +1118,21 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue
return local;
}
+fn airPtrSliceFieldPtr(f: *Function, inst: Air.Inst.Index, suffix: []const u8) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+
+ const ty_op = f.air.instructions.items(.data)[inst].ty_op;
+ const operand = try f.resolveInst(ty_op.operand);
+ const writer = f.object.writer();
+
+ _ = writer;
+ _ = operand;
+ _ = suffix;
+
+ return f.fail("TODO: C backend: airPtrSliceFieldPtr", .{});
+}
+
fn airPtrElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CValue {
const is_volatile = false; // TODO
if (!is_volatile and f.liveness.isUnused(inst))
@@ -1148,6 +1167,24 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CVal
return local;
}
+fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst))
+ return CValue.none;
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const slice = try f.resolveInst(bin_op.lhs);
+ const index = try f.resolveInst(bin_op.rhs);
+ const writer = f.object.writer();
+ const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const);
+ try writer.writeAll(" = &");
+ try f.writeCValue(writer, slice);
+ try writer.writeByte('[');
+ try f.writeCValue(writer, index);
+ try writer.writeAll("];\n");
+ return local;
+}
+
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
@@ -1623,6 +1660,27 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: [*:0]const u8) !CValu
return local;
}
+fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try f.resolveInst(bin_op.lhs);
+ const len = try f.resolveInst(bin_op.rhs);
+
+ const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
+ const local = try f.allocLocal(inst_ty, .Const);
+
+ try writer.writeAll(" = {");
+ try f.writeCValue(writer, ptr);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, len);
+ try writer.writeAll("};\n");
+
+ return local;
+}
+
fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 681b3c36db..80625928cb 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -527,19 +527,11 @@ pub const Object = struct {
if (self.llvm_module.getNamedGlobalAlias(exp_name_z.ptr, exp_name_z.len)) |alias| {
alias.setAliasee(llvm_global);
} else {
- const alias = self.llvm_module.addAlias(llvm_global.typeOf(), llvm_global, exp_name_z);
- switch (exp.options.linkage) {
- .Internal => alias.setLinkage(.Internal),
- .Strong => alias.setLinkage(.External),
- .Weak => {
- if (is_extern) {
- alias.setLinkage(.ExternalWeak);
- } else {
- alias.setLinkage(.WeakODR);
- }
- },
- .LinkOnce => alias.setLinkage(.LinkOnceODR),
- }
+ _ = self.llvm_module.addAlias(
+ llvm_global.typeOf(),
+ llvm_global,
+ exp_name_z,
+ );
}
}
} else {
@@ -589,7 +581,9 @@ pub const DeclGen = struct {
} else if (decl.val.castTag(.extern_fn)) |extern_fn| {
_ = try self.resolveLlvmFunction(extern_fn.data);
} else {
+ const target = self.module.getTarget();
const global = try self.resolveGlobalDecl(decl);
+ global.setAlignment(decl.getAlignment(target));
assert(decl.has_tv);
const init_val = if (decl.val.castTag(.variable)) |payload| init_val: {
const variable = payload.data;
@@ -1090,6 +1084,37 @@ pub const DeclGen = struct {
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False);
return llvm_int.constIntToPtr(try self.llvmType(tv.ty));
},
+ .field_ptr => {
+ const field_ptr = tv.val.castTag(.field_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = tv.val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try self.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try self.llvmType(Type.usize);
+ if (parent_ptr.typeOf().getElementType().getTypeKind() == .Array) {
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ } else {
+ const indices: [1]*const llvm.Value = .{
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ }
+ },
+ .null_value, .zero => {
+ const llvm_type = try self.llvmType(tv.ty);
+ return llvm_type.constNull();
+ },
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
},
.Array => switch (tv.val.tag()) {
@@ -1254,6 +1279,10 @@ pub const DeclGen = struct {
}
const field_ty = tv.ty.unionFieldType(tag_and_val.tag);
const payload = p: {
+ if (!field_ty.hasCodeGenBits()) {
+ const padding_len = @intCast(c_uint, layout.payload_size);
+ break :p self.context.intType(8).arrayType(padding_len).getUndef();
+ }
const field = try genTypedValue(self, .{ .ty = field_ty, .val = tag_and_val.val });
const field_size = field_ty.abiSize(target);
if (field_size == layout.payload_size) {
@@ -1284,6 +1313,66 @@ pub const DeclGen = struct {
}
return llvm_union_ty.constNamedStruct(&fields, fields.len);
},
+ .Vector => switch (tv.val.tag()) {
+ .bytes => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const bytes = tv.val.castTag(.bytes).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
+
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ var byte_payload: Value.Payload.U64 = .{
+ .base = .{ .tag = .int_u64 },
+ .data = bytes[i],
+ };
+
+ elem.* = try self.genTypedValue(.{
+ .ty = elem_ty,
+ .val = Value.initPayload(&byte_payload.base),
+ });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .array => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ // The value includes the sentinel in those cases.
+ const elem_vals = tv.val.castTag(.array).?.data;
+ const vector_len = tv.ty.arrayLen();
+ assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
+ const elem_ty = tv.ty.elemType();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem, i| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ .repeated => {
+ // Note, sentinel is not stored even if the type has a sentinel.
+ const val = tv.val.castTag(.repeated).?.data;
+ const elem_ty = tv.ty.elemType();
+ const len = tv.ty.arrayLen();
+ const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
+ defer self.gpa.free(llvm_elems);
+ for (llvm_elems) |*elem| {
+ elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
+ }
+ return llvm.constVector(
+ llvm_elems.ptr,
+ @intCast(c_uint, llvm_elems.len),
+ );
+ },
+ else => unreachable,
+ },
.ComptimeInt => unreachable,
.ComptimeFloat => unreachable,
@@ -1298,11 +1387,73 @@ pub const DeclGen = struct {
.Frame,
.AnyFrame,
- .Vector,
=> return self.todo("implement const of type '{}'", .{tv.ty}),
}
}
+ const ParentPtr = struct {
+ ty: Type,
+ llvm_ptr: *const llvm.Value,
+ };
+
+ fn lowerParentPtrDecl(
+ dg: *DeclGen,
+ ptr_val: Value,
+ decl: *Module.Decl,
+ ) Error!ParentPtr {
+ decl.alive = true;
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = decl.ty,
+ };
+ const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ const llvm_ptr = try dg.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl);
+ return ParentPtr{
+ .llvm_ptr = llvm_ptr,
+ .ty = decl.ty,
+ };
+ }
+
+ fn lowerParentPtr(dg: *DeclGen, ptr_val: Value) Error!*const llvm.Value {
+ switch (ptr_val.tag()) {
+ .decl_ref_mut => {
+ const decl = ptr_val.castTag(.decl_ref_mut).?.data.decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .decl_ref => {
+ const decl = ptr_val.castTag(.decl_ref).?.data;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .variable => {
+ const decl = ptr_val.castTag(.variable).?.data.owner_decl;
+ return (try dg.lowerParentPtrDecl(ptr_val, decl)).llvm_ptr;
+ },
+ .field_ptr => {
+ const field_ptr = ptr_val.castTag(.field_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(field_ptr.container_ptr);
+ const llvm_u32 = dg.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ llvm_u32.constInt(0, .False),
+ llvm_u32.constInt(field_ptr.field_index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .elem_ptr => {
+ const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
+ const parent_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr);
+ const llvm_usize = try dg.llvmType(Type.usize);
+ const indices: [2]*const llvm.Value = .{
+ llvm_usize.constInt(0, .False),
+ llvm_usize.constInt(elem_ptr.index, .False),
+ };
+ return parent_ptr.constInBoundsGEP(&indices, indices.len);
+ },
+ .opt_payload_ptr => return dg.todo("implement lowerParentPtr for optional payload", .{}),
+ .eu_payload_ptr => return dg.todo("implement lowerParentPtr for error union payload", .{}),
+ else => unreachable,
+ }
+ }
+
fn lowerDeclRefValue(
self: *DeclGen,
tv: TypedValue,
@@ -1328,12 +1479,13 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
}
- decl.alive = true;
const llvm_type = try self.llvmType(tv.ty);
if (!tv.ty.childType().hasCodeGenBits()) {
return self.lowerPtrToVoid(tv.ty);
}
+ decl.alive = true;
+
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
try self.resolveLlvmFunction(decl)
else
@@ -1523,6 +1675,7 @@ pub const FuncGen = struct {
global.setInitializer(llvm_val);
global.setLinkage(.Private);
global.setGlobalConstant(.True);
+ global.setUnnamedAddr(.True);
global.setAlignment(ty.abiAlignment(target));
gop.value_ptr.* = global;
return global;
@@ -1542,7 +1695,10 @@ pub const FuncGen = struct {
.mul => try self.airMul(inst),
.mulwrap => try self.airMulWrap(inst),
.mul_sat => try self.airMulSat(inst),
- .div => try self.airDiv(inst),
+ .div_float => try self.airDivFloat(inst),
+ .div_trunc => try self.airDivTrunc(inst),
+ .div_floor => try self.airDivFloor(inst),
+ .div_exact => try self.airDivExact(inst),
.rem => try self.airRem(inst),
.mod => try self.airMod(inst),
.ptr_add => try self.airPtrAdd(inst),
@@ -1552,6 +1708,7 @@ pub const FuncGen = struct {
.shl_exact => try self.airShlExact(inst),
.min => try self.airMin(inst),
.max => try self.airMax(inst),
+ .slice => try self.airSlice(inst),
.bit_and, .bool_and => try self.airAnd(inst),
.bit_or, .bool_or => try self.airOr(inst),
@@ -1600,6 +1757,10 @@ pub const FuncGen = struct {
.assembly => try self.airAssembly(inst),
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
+
+ .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0),
+ .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1),
+
.array_to_slice => try self.airArrayToSlice(inst),
.float_to_int => try self.airFloatToInt(inst),
.int_to_float => try self.airIntToFloat(inst),
@@ -1630,13 +1791,12 @@ pub const FuncGen = struct {
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
- .ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
+ .slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
- .ptr_ptr_elem_val => try self.airPtrPtrElemVal(inst),
- .optional_payload => try self.airOptionalPayload(inst, false),
- .optional_payload_ptr => try self.airOptionalPayload(inst, true),
+ .optional_payload => try self.airOptionalPayload(inst),
+ .optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
.unwrap_errunion_payload => try self.airErrUnionPayload(inst, false),
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true),
@@ -1829,14 +1989,16 @@ pub const FuncGen = struct {
const raw_llvm_ty = try self.dg.llvmType(inst_ty);
- // If the zig tag type is a function, this represents an actual function body; not
- // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
- // of function pointers, however the phi makes it a runtime value and therefore
- // the LLVM type has to be wrapped in a pointer.
- const llvm_ty = if (inst_ty.zigTypeTag() == .Fn)
- raw_llvm_ty.pointerType(0)
- else
- raw_llvm_ty;
+ const llvm_ty = ty: {
+ // If the zig tag type is a function, this represents an actual function body; not
+ // a pointer to it. LLVM IR allows the call instruction to use function bodies instead
+ // of function pointers, however the phi makes it a runtime value and therefore
+ // the LLVM type has to be wrapped in a pointer.
+ if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) {
+ break :ty raw_llvm_ty.pointerType(0);
+ }
+ break :ty raw_llvm_ty;
+ };
const phi_node = self.builder.buildPhi(llvm_ty, "");
phi_node.addIncoming(
@@ -1874,23 +2036,55 @@ pub const FuncGen = struct {
const then_block = self.context.appendBasicBlock(self.llvm_func, "Then");
const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
- {
- const prev_block = self.builder.getInsertBlock();
- defer self.builder.positionBuilderAtEnd(prev_block);
+ _ = self.builder.buildCondBr(cond, then_block, else_block);
- self.builder.positionBuilderAtEnd(then_block);
- try self.genBody(then_body);
+ self.builder.positionBuilderAtEnd(then_block);
+ try self.genBody(then_body);
- self.builder.positionBuilderAtEnd(else_block);
- try self.genBody(else_body);
- }
- _ = self.builder.buildCondBr(cond, then_block, else_block);
+ self.builder.positionBuilderAtEnd(else_block);
+ try self.genBody(else_body);
+
+ // No need to reset the insert cursor since this instruction is noreturn.
return null;
}
fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- _ = inst;
- return self.todo("implement llvm codegen for switch_br", .{});
+ const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+ const cond = try self.resolveInst(pl_op.operand);
+ const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
+ const else_block = self.context.appendBasicBlock(self.llvm_func, "Else");
+ const llvm_switch = self.builder.buildSwitch(cond, else_block, switch_br.data.cases_len);
+
+ var extra_index: usize = switch_br.end;
+ var case_i: u32 = 0;
+
+ while (case_i < switch_br.data.cases_len) : (case_i += 1) {
+ const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
+ const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
+ const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+ extra_index = case.end + case.data.items_len + case_body.len;
+
+ const case_block = self.context.appendBasicBlock(self.llvm_func, "Case");
+
+ for (items) |item| {
+ const llvm_item = try self.resolveInst(item);
+ llvm_switch.addCase(llvm_item, case_block);
+ }
+
+ self.builder.positionBuilderAtEnd(case_block);
+ try self.genBody(case_body);
+ }
+
+ self.builder.positionBuilderAtEnd(else_block);
+ const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+ if (else_body.len != 0) {
+ try self.genBody(else_body);
+ } else {
+ _ = self.builder.buildUnreachable();
+ }
+
+ // No need to reset the insert cursor since this instruction is noreturn.
+ return null;
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -1972,14 +2166,22 @@ pub const FuncGen = struct {
}
fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
return self.builder.buildExtractValue(operand, index, "");
}
+ fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const slice_ptr = try self.resolveInst(ty_op.operand);
+
+ return self.builder.buildStructGEP(slice_ptr, index, "");
+ }
+
fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
@@ -1987,28 +2189,18 @@ pub const FuncGen = struct {
const slice = try self.resolveInst(bin_op.lhs);
const index = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*const llvm.Value = .{index};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr = self.sliceElemPtr(slice, index);
return self.load(ptr, slice_ty);
}
- fn airPtrSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const slice_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
-
- const base_ptr = ptr: {
- const ptr_field_ptr = self.builder.buildStructGEP(lhs, 0, "");
- break :ptr self.builder.buildLoad(ptr_field_ptr, "");
- };
+ fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, slice_ty);
+ const slice = try self.resolveInst(bin_op.lhs);
+ const index = try self.resolveInst(bin_op.rhs);
+ return self.sliceElemPtr(slice, index);
}
fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2068,19 +2260,6 @@ pub const FuncGen = struct {
}
}
- fn airPtrPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- const bin_op = self.air.instructions.items(.data)[inst].bin_op;
- const ptr_ty = self.air.typeOf(bin_op.lhs).childType();
- if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
-
- const lhs = try self.resolveInst(bin_op.lhs);
- const rhs = try self.resolveInst(bin_op.rhs);
- const base_ptr = self.builder.buildLoad(lhs, "");
- const indices: [1]*const llvm.Value = .{rhs};
- const ptr = self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
- return self.load(ptr, ptr_ty);
- }
-
fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -2112,17 +2291,34 @@ pub const FuncGen = struct {
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.air.typeOf(struct_field.struct_operand);
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
- const field_index = llvmFieldIndex(struct_ty, struct_field.field_index);
- if (isByRef(struct_ty)) {
- const field_ptr = self.builder.buildStructGEP(struct_llvm_val, field_index, "");
- const field_ty = struct_ty.structFieldType(struct_field.field_index);
- if (isByRef(field_ty)) {
- return field_ptr;
- } else {
- return self.builder.buildLoad(field_ptr, "");
- }
+ const field_index = struct_field.field_index;
+ const field_ty = struct_ty.structFieldType(field_index);
+ if (!field_ty.hasCodeGenBits()) {
+ return null;
+ }
+
+ assert(isByRef(struct_ty));
+
+ const field_ptr = switch (struct_ty.zigTypeTag()) {
+ .Struct => blk: {
+ const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
+ break :blk self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
+ },
+ .Union => blk: {
+ const llvm_field_ty = try self.dg.llvmType(field_ty);
+ const target = self.dg.module.getTarget();
+ const layout = struct_ty.unionGetLayout(target);
+ const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
+ const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, "");
+ break :blk self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
+ },
+ else => unreachable,
+ };
+
+ if (isByRef(field_ty)) {
+ return field_ptr;
} else {
- return self.builder.buildExtractValue(struct_llvm_val, field_index, "");
+ return self.builder.buildLoad(field_ptr, "");
}
}
@@ -2154,17 +2350,19 @@ pub const FuncGen = struct {
const air_asm = self.air.extraData(Air.Asm, ty_pl.payload);
const zir = self.dg.decl.getFileScope().zir;
const extended = zir.instructions.items(.data)[air_asm.data.zir_index].extended;
- const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
- const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
- const outputs_len = @truncate(u5, extended.small);
- const args_len = @truncate(u5, extended.small >> 5);
- const clobbers_len = @truncate(u5, extended.small >> 10);
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
- const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..outputs_len]);
- const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end + outputs.len ..][0..args_len]);
+ if (!is_volatile and self.liveness.isUnused(inst)) {
+ return null;
+ }
+ const outputs_len = @truncate(u5, extended.small);
if (outputs_len > 1) {
return self.todo("implement llvm codegen for asm with more than 1 output", .{});
}
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_asm.end..][0..args_len]);
var extra_i: usize = zir_extra.end;
const output_constraint: ?[]const u8 = out: {
@@ -2177,10 +2375,6 @@ pub const FuncGen = struct {
break :out null;
};
- if (!is_volatile and self.liveness.isUnused(inst)) {
- return null;
- }
-
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
defer llvm_constraints.deinit(self.gpa);
@@ -2188,7 +2382,7 @@ pub const FuncGen = struct {
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
- const llvm_params_len = args.len + @boolToInt(output_constraint != null);
+ const llvm_params_len = args.len;
const llvm_param_types = try arena.alloc(*const llvm.Type, llvm_params_len);
const llvm_param_values = try arena.alloc(*const llvm.Value, llvm_params_len);
@@ -2200,7 +2394,8 @@ pub const FuncGen = struct {
if (total_i != 0) {
llvm_constraints.appendAssumeCapacity(',');
}
- llvm_constraints.appendSliceAssumeCapacity(constraint);
+ llvm_constraints.appendAssumeCapacity('=');
+ llvm_constraints.appendSliceAssumeCapacity(constraint[1..]);
total_i += 1;
}
@@ -2326,8 +2521,7 @@ pub const FuncGen = struct {
op: llvm.IntPredicate,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
@@ -2341,7 +2535,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(err_union_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
const loaded = self.builder.buildLoad(err_field_ptr, "");
return self.builder.buildICmp(op, loaded, zero, "");
@@ -2351,35 +2545,55 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- fn airOptionalPayload(
- self: *FuncGen,
- inst: Air.Inst.Index,
- operand_is_ptr: bool,
- ) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand).childType();
+ var buf: Type.Payload.ElemType = undefined;
+ const payload_ty = optional_ty.optionalChild(&buf);
+ if (!payload_ty.hasCodeGenBits()) {
+ // We have a pointer to a zero-bit value and we need to return
+ // a pointer to a zero-bit value.
+ return operand;
+ }
+ if (optional_ty.isPtrLikeOptional()) {
+ // The payload and the optional are the same value.
+ return operand;
+ }
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
- if (operand_is_ptr) {
- const operand_ty = self.air.typeOf(ty_op.operand).elemType();
- if (operand_ty.isPtrLikeOptional()) {
- return self.builder.buildLoad(operand, "");
- }
+ fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
- const index_type = self.context.intType(32);
- var indices: [2]*const llvm.Value = .{
- index_type.constNull(), index_type.constNull(),
- };
- return self.builder.buildInBoundsGEP(operand, &indices, 2, "");
- }
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const operand = try self.resolveInst(ty_op.operand);
+ const optional_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOfIndex(inst);
+ if (!payload_ty.hasCodeGenBits()) return null;
- const operand_ty = self.air.typeOf(ty_op.operand);
- if (operand_ty.isPtrLikeOptional()) {
+ if (optional_ty.isPtrLikeOptional()) {
+ // Payload value is the same as the optional value.
return operand;
}
+ if (isByRef(payload_ty)) {
+ // We have a pointer and we need to return a pointer to the first field.
+ const index_type = self.context.intType(32);
+ const indices: [2]*const llvm.Value = .{
+ index_type.constNull(), // dereference the pointer
+ index_type.constNull(), // first field is the payload
+ };
+ return self.builder.buildInBoundsGEP(operand, &indices, indices.len, "");
+ }
+
return self.builder.buildExtractValue(operand, 0, "");
}
@@ -2388,22 +2602,16 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
-
- if (!payload_ty.hasCodeGenBits()) {
- return null;
- }
-
- if (operand_is_ptr) {
+ if (!payload_ty.hasCodeGenBits()) return null;
+ if (operand_is_ptr or isByRef(payload_ty)) {
return self.builder.buildStructGEP(operand, 1, "");
}
-
return self.builder.buildExtractValue(operand, 1, "");
}
@@ -2425,7 +2633,7 @@ pub const FuncGen = struct {
return self.builder.buildLoad(operand, "");
}
- if (operand_is_ptr) {
+ if (operand_is_ptr or isByRef(payload_ty)) {
const err_field_ptr = self.builder.buildStructGEP(operand, 0, "");
return self.builder.buildLoad(err_field_ptr, "");
}
@@ -2437,9 +2645,9 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const operand_ty = self.air.typeOf(ty_op.operand);
+ const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(1).constAllOnes();
- if (!operand_ty.hasCodeGenBits()) return non_null_bit;
+ if (!payload_ty.hasCodeGenBits()) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.isPtrLikeOptional()) return operand;
@@ -2447,8 +2655,6 @@ pub const FuncGen = struct {
if (isByRef(optional_ty)) {
const optional_ptr = self.buildAlloca(llvm_optional_ty);
const payload_ptr = self.builder.buildStructGEP(optional_ptr, 0, "");
- var buf: Type.Payload.ElemType = undefined;
- const payload_ty = operand_ty.optionalChild(&buf);
var ptr_ty_payload: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = payload_ty,
@@ -2494,10 +2700,35 @@ pub const FuncGen = struct {
}
fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const err_un_ty = self.air.typeOfIndex(inst);
+ const payload_ty = err_un_ty.errorUnionPayload();
+ const operand = try self.resolveInst(ty_op.operand);
+ if (!payload_ty.hasCodeGenBits()) {
+ return operand;
+ }
+ const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
+ if (isByRef(err_un_ty)) {
+ const result_ptr = self.buildAlloca(err_un_llvm_ty);
+ const err_ptr = self.builder.buildStructGEP(result_ptr, 0, "");
+ _ = self.builder.buildStore(operand, err_ptr);
+ const payload_ptr = self.builder.buildStructGEP(result_ptr, 1, "");
+ var ptr_ty_payload: Type.Payload.ElemType = .{
+ .base = .{ .tag = .single_mut_pointer },
+ .data = payload_ty,
+ };
+ const payload_ptr_ty = Type.initPayload(&ptr_ty_payload.base);
+ // TODO store undef to payload_ptr
+ _ = payload_ptr;
+ _ = payload_ptr_ty;
+ return result_ptr;
+ }
- return self.todo("implement llvm codegen for 'airWrapErrUnionErr'", .{});
+ const partial = self.builder.buildInsertValue(err_un_llvm_ty.getUndef(), operand, 0, "");
+ // TODO set payload bytes to undef
+ return partial;
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2526,6 +2757,20 @@ pub const FuncGen = struct {
return self.builder.buildUMax(lhs, rhs, "");
}
+ fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ptr = try self.resolveInst(bin_op.lhs);
+ const len = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+ const llvm_slice_ty = try self.dg.llvmType(inst_ty);
+
+ const partial = self.builder.buildInsertValue(llvm_slice_ty.getUndef(), ptr, 0, "");
+ return self.builder.buildInsertValue(partial, len, 1, "");
+ }
+
fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2635,20 +2880,76 @@ pub const FuncGen = struct {
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
- fn airDiv(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+
+ return self.builder.buildFDiv(lhs, rhs, "");
+ }
+
+ fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.air.typeOfIndex(inst);
- if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return self.callTrunc(result, inst_ty);
+ }
if (inst_ty.isSignedInt()) return self.builder.buildSDiv(lhs, rhs, "");
return self.builder.buildUDiv(lhs, rhs, "");
}
+ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) {
+ const result = self.builder.buildFDiv(lhs, rhs, "");
+ return try self.callFloor(result, inst_ty);
+ }
+ if (inst_ty.isSignedInt()) {
+ // const d = @divTrunc(a, b);
+ // const r = @rem(a, b);
+ // return if (r == 0) d else d - ((a < 0) ^ (b < 0));
+ const result_llvm_ty = try self.dg.llvmType(inst_ty);
+ const zero = result_llvm_ty.constNull();
+ const div_trunc = self.builder.buildSDiv(lhs, rhs, "");
+ const rem = self.builder.buildSRem(lhs, rhs, "");
+ const rem_eq_0 = self.builder.buildICmp(.EQ, rem, zero, "");
+ const a_lt_0 = self.builder.buildICmp(.SLT, lhs, zero, "");
+ const b_lt_0 = self.builder.buildICmp(.SLT, rhs, zero, "");
+ const a_b_xor = self.builder.buildXor(a_lt_0, b_lt_0, "");
+ const a_b_xor_ext = self.builder.buildZExt(a_b_xor, div_trunc.typeOf(), "");
+ const d_sub_xor = self.builder.buildSub(div_trunc, a_b_xor_ext, "");
+ return self.builder.buildSelect(rem_eq_0, div_trunc, d_sub_xor, "");
+ }
+ return self.builder.buildUDiv(lhs, rhs, "");
+ }
+
+ fn airDivExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ if (self.liveness.isUnused(inst)) return null;
+
+ const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+ const lhs = try self.resolveInst(bin_op.lhs);
+ const rhs = try self.resolveInst(bin_op.rhs);
+ const inst_ty = self.air.typeOfIndex(inst);
+
+ if (inst_ty.isRuntimeFloat()) return self.builder.buildFDiv(lhs, rhs, "");
+ if (inst_ty.isSignedInt()) return self.builder.buildExactSDiv(lhs, rhs, "");
+ return self.builder.buildExactUDiv(lhs, rhs, "");
+ }
+
fn airRem(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
@@ -2691,26 +2992,42 @@ pub const FuncGen = struct {
}
fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
- const indices: [1]*const llvm.Value = .{offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
- if (self.liveness.isUnused(inst))
- return null;
+ if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const base_ptr = try self.resolveInst(bin_op.lhs);
const offset = try self.resolveInst(bin_op.rhs);
const negative_offset = self.builder.buildNeg(offset, "");
- const indices: [1]*const llvm.Value = .{negative_offset};
- return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ const ptr_ty = self.air.typeOf(bin_op.lhs);
+ if (ptr_ty.ptrSize() == .One) {
+ // It's a pointer to an array, so according to LLVM we need an extra GEP index.
+ const indices: [2]*const llvm.Value = .{
+ self.context.intType(32).constNull(), negative_offset,
+ };
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ } else {
+ const indices: [1]*const llvm.Value = .{negative_offset};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
}
fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@@ -2883,8 +3200,9 @@ pub const FuncGen = struct {
const inst_ty = self.air.typeOfIndex(inst);
const llvm_dest_ty = try self.dg.llvmType(inst_ty);
- // TODO look into pulling this logic out into a different AIR instruction than bitcast
- if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
+ if (operand_ty.zigTypeTag() == .Int and inst_ty.zigTypeTag() == .Pointer) {
+ return self.builder.buildIntToPtr(operand, llvm_dest_ty, "");
+ } else if (operand_ty.zigTypeTag() == .Vector and inst_ty.zigTypeTag() == .Array) {
const target = self.dg.module.getTarget();
const elem_ty = operand_ty.childType();
if (!isByRef(inst_ty)) {
@@ -2914,6 +3232,43 @@ pub const FuncGen = struct {
}
}
return array_ptr;
+ } else if (operand_ty.zigTypeTag() == .Array and inst_ty.zigTypeTag() == .Vector) {
+ const target = self.dg.module.getTarget();
+ const elem_ty = operand_ty.childType();
+ const llvm_vector_ty = try self.dg.llvmType(inst_ty);
+ if (!isByRef(operand_ty)) {
+ return self.dg.todo("implement bitcast non-ref array to vector", .{});
+ }
+
+ const bitcast_ok = elem_ty.bitSize(target) == elem_ty.abiSize(target) * 8;
+ if (bitcast_ok) {
+ const llvm_vector_ptr_ty = llvm_vector_ty.pointerType(0);
+ const casted_ptr = self.builder.buildBitCast(operand, llvm_vector_ptr_ty, "");
+ const vector = self.builder.buildLoad(casted_ptr, "");
+ // The array is aligned to the element's alignment, while the vector might have a completely
+ // different alignment. This means we need to enforce the alignment of this load.
+ vector.setAlignment(elem_ty.abiAlignment(target));
+ return vector;
+ } else {
+ // If the ABI size of the element type is not evenly divisible by size in bits;
+ // a simple bitcast will not work, and we fall back to extractelement.
+ const llvm_usize = try self.dg.llvmType(Type.usize);
+ const llvm_u32 = self.context.intType(32);
+ const zero = llvm_usize.constNull();
+ const vector_len = operand_ty.arrayLen();
+ var vector = llvm_vector_ty.getUndef();
+ var i: u64 = 0;
+ while (i < vector_len) : (i += 1) {
+ const index_usize = llvm_usize.constInt(i, .False);
+ const index_u32 = llvm_u32.constInt(i, .False);
+ const indexes: [2]*const llvm.Value = .{ zero, index_usize };
+ const elem_ptr = self.builder.buildInBoundsGEP(operand, &indexes, indexes.len, "");
+ const elem = self.builder.buildLoad(elem_ptr, "");
+ vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
+ }
+
+ return vector;
+ }
}
return self.builder.buildBitCast(operand, llvm_dest_ty, "");
@@ -3298,6 +3653,37 @@ pub const FuncGen = struct {
}
}
+ fn callFloor(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "floor");
+ }
+
+ fn callCeil(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "ceil");
+ }
+
+ fn callTrunc(self: *FuncGen, arg: *const llvm.Value, ty: Type) !*const llvm.Value {
+ return self.callFloatUnary(arg, ty, "trunc");
+ }
+
+ fn callFloatUnary(self: *FuncGen, arg: *const llvm.Value, ty: Type, name: []const u8) !*const llvm.Value {
+ const target = self.dg.module.getTarget();
+
+ var fn_name_buf: [100]u8 = undefined;
+ const llvm_fn_name = std.fmt.bufPrintZ(&fn_name_buf, "llvm.{s}.f{d}", .{
+ name, ty.floatBits(target),
+ }) catch unreachable;
+
+ const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
+ const operand_llvm_ty = try self.dg.llvmType(ty);
+ const param_types = [_]*const llvm.Type{operand_llvm_ty};
+ const fn_type = llvm.functionType(operand_llvm_ty, &param_types, param_types.len, .False);
+ break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
+ };
+
+ const args: [1]*const llvm.Value = .{arg};
+ return self.builder.buildCall(llvm_fn, &args, args.len, .C, .Auto, "");
+ }
+
fn fieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
@@ -3336,6 +3722,16 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
}
+ fn sliceElemPtr(
+ self: *FuncGen,
+ slice: *const llvm.Value,
+ index: *const llvm.Value,
+ ) *const llvm.Value {
+ const base_ptr = self.builder.buildExtractValue(slice, 0, "");
+ const indices: [1]*const llvm.Value = .{index};
+ return self.builder.buildInBoundsGEP(base_ptr, &indices, indices.len, "");
+ }
+
fn getIntrinsic(self: *FuncGen, name: []const u8) *const llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig
index ab4cf97350..43aca87532 100644
--- a/src/codegen/llvm/bindings.zig
+++ b/src/codegen/llvm/bindings.zig
@@ -181,6 +181,9 @@ pub const Value = opaque {
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
+
+ pub const addCase = LLVMAddCase;
+ extern fn LLVMAddCase(Switch: *const Value, OnVal: *const Value, Dest: *const BasicBlock) void;
};
pub const Type = opaque {
@@ -234,6 +237,9 @@ pub const Type = opaque {
pub const getTypeKind = LLVMGetTypeKind;
extern fn LLVMGetTypeKind(Ty: *const Type) TypeKind;
+
+ pub const getElementType = LLVMGetElementType;
+ extern fn LLVMGetElementType(Ty: *const Type) *const Type;
};
pub const Module = opaque {
@@ -316,6 +322,12 @@ pub const VerifierFailureAction = enum(c_int) {
pub const constNeg = LLVMConstNeg;
extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
+pub const constVector = LLVMConstVector;
+extern fn LLVMConstVector(
+ ScalarConstantVals: [*]*const Value,
+ Size: c_uint,
+) *const Value;
+
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;
@@ -551,6 +563,9 @@ pub const Builder = opaque {
pub const buildCondBr = LLVMBuildCondBr;
extern fn LLVMBuildCondBr(*const Builder, If: *const Value, Then: *const BasicBlock, Else: *const BasicBlock) *const Value;
+ pub const buildSwitch = LLVMBuildSwitch;
+ extern fn LLVMBuildSwitch(*const Builder, V: *const Value, Else: *const BasicBlock, NumCases: c_uint) *const Value;
+
pub const buildPhi = LLVMBuildPhi;
extern fn LLVMBuildPhi(*const Builder, Ty: *const Type, Name: [*:0]const u8) *const Value;
@@ -570,6 +585,15 @@ pub const Builder = opaque {
Name: [*:0]const u8,
) *const Value;
+ pub const buildInsertElement = LLVMBuildInsertElement;
+ extern fn LLVMBuildInsertElement(
+ *const Builder,
+ VecVal: *const Value,
+ EltVal: *const Value,
+ Index: *const Value,
+ Name: [*:0]const u8,
+ ) *const Value;
+
pub const buildPtrToInt = LLVMBuildPtrToInt;
extern fn LLVMBuildPtrToInt(
*const Builder,
@@ -735,6 +759,12 @@ pub const Builder = opaque {
pub const buildSMin = ZigLLVMBuildSMin;
extern fn ZigLLVMBuildSMin(builder: *const Builder, LHS: *const Value, RHS: *const Value, name: [*:0]const u8) *const Value;
+
+ pub const buildExactUDiv = LLVMBuildExactUDiv;
+ extern fn LLVMBuildExactUDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
+
+ pub const buildExactSDiv = LLVMBuildExactSDiv;
+ extern fn LLVMBuildExactSDiv(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
};
pub const IntPredicate = enum(c_uint) {
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index 25a1d228e0..da2fa66fee 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -669,7 +669,6 @@ pub const DeclGen = struct {
.add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
.sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
.mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
- .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
.bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd),
.bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr),
diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig
index 6902553257..75e6a1d78e 100644
--- a/src/codegen/wasm.zig
+++ b/src/codegen/wasm.zig
@@ -822,7 +822,7 @@ pub const Context = struct {
.subwrap => self.airWrapBinOp(inst, .sub),
.mul => self.airBinOp(inst, .mul),
.mulwrap => self.airWrapBinOp(inst, .mul),
- .div => self.airBinOp(inst, .div),
+ .div_trunc => self.airBinOp(inst, .div),
.bit_and => self.airBinOp(inst, .@"and"),
.bit_or => self.airBinOp(inst, .@"or"),
.bool_and => self.airBinOp(inst, .@"and"),
@@ -866,6 +866,7 @@ pub const Context = struct {
.struct_field_ptr_index_1 => self.airStructFieldPtrIndex(inst, 1),
.struct_field_ptr_index_2 => self.airStructFieldPtrIndex(inst, 2),
.struct_field_ptr_index_3 => self.airStructFieldPtrIndex(inst, 3),
+ .struct_field_val => self.airStructFieldVal(inst),
.switch_br => self.airSwitchBr(inst),
.unreach => self.airUnreachable(inst),
.wrap_optional => self.airWrapOptional(inst),
@@ -1456,6 +1457,15 @@ pub const Context = struct {
return WValue{ .local = struct_ptr.multi_value.index + index };
}
+ fn airStructFieldVal(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
+ if (self.liveness.isUnused(inst)) return WValue.none;
+
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
+ const struct_multivalue = self.resolveInst(extra.struct_operand).multi_value;
+ return WValue{ .local = struct_multivalue.index + extra.field_index };
+ }
+
fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue {
// result type is always 'noreturn'
const blocktype = wasm.block_empty;
diff --git a/src/config.zig.in b/src/config.zig.in
index 62e8785ccb..f193fddb20 100644
--- a/src/config.zig.in
+++ b/src/config.zig.in
@@ -6,6 +6,7 @@ pub const llvm_has_arc = false;
pub const version: [:0]const u8 = "@ZIG_VERSION@";
pub const semver = @import("std").SemanticVersion.parse(version) catch unreachable;
pub const enable_logging: bool = @ZIG_ENABLE_LOGGING_BOOL@;
+pub const enable_link_snapshots: bool = false;
pub const enable_tracy = false;
pub const is_stage1 = true;
pub const skip_non_native = false;
diff --git a/src/link.zig b/src/link.zig
index 675c218d68..a1df48f759 100644
--- a/src/link.zig
+++ b/src/link.zig
@@ -127,6 +127,9 @@ pub const Options = struct {
/// WASI-only. Type of WASI execution model ("command" or "reactor").
wasi_exec_model: std.builtin.WasiExecModel = undefined,
+ /// (Zig compiler development) Enable dumping of linker's state as JSON.
+ enable_link_snapshots: bool = false,
+
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
return if (options.use_lld) .Obj else options.output_mode;
}
@@ -193,12 +196,16 @@ pub const File = struct {
/// rewriting it. A malicious file is detected as incremental link failure
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: *Allocator, options: Options) !*File {
+ if (options.object_format == .macho) {
+ return &(try MachO.openPath(allocator, options)).base;
+ }
+
const use_stage1 = build_options.is_stage1 and options.use_stage1;
if (use_stage1 or options.emit == null) {
return switch (options.object_format) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
- .macho => &(try MachO.createEmpty(allocator, options)).base,
+ .macho => unreachable,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.plan9 => return &(try Plan9.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
@@ -216,7 +223,7 @@ pub const File = struct {
return switch (options.object_format) {
.coff => &(try Coff.createEmpty(allocator, options)).base,
.elf => &(try Elf.createEmpty(allocator, options)).base,
- .macho => &(try MachO.createEmpty(allocator, options)).base,
+ .macho => unreachable,
.plan9 => &(try Plan9.createEmpty(allocator, options)).base,
.wasm => &(try Wasm.createEmpty(allocator, options)).base,
.c => unreachable, // Reported error earlier.
@@ -236,7 +243,7 @@ pub const File = struct {
const file: *File = switch (options.object_format) {
.coff => &(try Coff.openPath(allocator, sub_path, options)).base,
.elf => &(try Elf.openPath(allocator, sub_path, options)).base,
- .macho => &(try MachO.openPath(allocator, sub_path, options)).base,
+ .macho => unreachable,
.plan9 => &(try Plan9.openPath(allocator, sub_path, options)).base,
.wasm => &(try Wasm.openPath(allocator, sub_path, options)).base,
.c => &(try C.openPath(allocator, sub_path, options)).base,
@@ -577,7 +584,11 @@ pub const File = struct {
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
- try base.flushModule(comp);
+ if (base.options.object_format == .macho) {
+ try base.cast(MachO).?.flushObject(comp);
+ } else {
+ try base.flushModule(comp);
+ }
const obj_basename = base.intermediary_basename.?;
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
diff --git a/src/link/C/zig.h b/src/link/C/zig.h
index 72868e4400..32612fcc41 100644
--- a/src/link/C/zig.h
+++ b/src/link/C/zig.h
@@ -419,7 +419,7 @@ zig_mul_sat_u(u32, uint32_t, uint64_t)
zig_mul_sat_s(i32, int32_t, int64_t)
zig_mul_sat_u(u64, uint64_t, uint128_t)
zig_mul_sat_s(i64, int64_t, int128_t)
-zig_mul_sat_s(isize, intptr_t, int128_t)
+zig_mul_sat_s(isize, intptr_t, int128_t)
zig_mul_sat_s(short, short, int)
zig_mul_sat_s(int, int, long)
zig_mul_sat_s(long, long, long long)
diff --git a/src/link/Coff.zig b/src/link/Coff.zig
index fd009ca9f8..a371efaa8a 100644
--- a/src/link/Coff.zig
+++ b/src/link/Coff.zig
@@ -1263,6 +1263,10 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
if (self.base.options.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
+ }
+
+ // libunwind dep
+ if (self.base.options.link_libunwind) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
}
diff --git a/src/link/MachO.zig b/src/link/MachO.zig
index 0d8487ad79..2490ec9124 100644
--- a/src/link/MachO.zig
+++ b/src/link/MachO.zig
@@ -275,18 +275,15 @@ pub const SrcFn = struct {
};
};
-pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO {
+pub fn openPath(allocator: *Allocator, options: link.Options) !*MachO {
assert(options.object_format == .macho);
- if (build_options.have_llvm and options.use_llvm) {
- const self = try createEmpty(allocator, options);
- errdefer self.base.destroy();
-
- self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
- return self;
+ const use_stage1 = build_options.is_stage1 and options.use_stage1;
+ if (use_stage1 or options.emit == null) {
+ return createEmpty(allocator, options);
}
-
- const file = try options.emit.?.directory.handle.createFile(sub_path, .{
+ const emit = options.emit.?;
+ const file = try emit.directory.handle.createFile(emit.sub_path, .{
.truncate = false,
.read = true,
.mode = link.determineMode(options),
@@ -301,7 +298,20 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
self.base.file = file;
- if (options.output_mode == .Lib and options.link_mode == .Static) {
+ if (build_options.have_llvm and options.use_llvm and options.module != null) {
+ // TODO this intermediary_basename isn't enough; in the case of `zig build-exe`,
+ // we also want to put the intermediary object file in the cache while the
+ // main emit directory is the cwd.
+ const sub_path = try std.fmt.allocPrint(allocator, "{s}{s}", .{
+ emit.sub_path, options.object_format.fileExt(options.target.cpu.arch),
+ });
+ self.llvm_object = try LlvmObject.create(allocator, sub_path, options);
+ self.base.intermediary_basename = sub_path;
+ }
+
+ if (options.output_mode == .Lib and
+ options.link_mode == .Static and self.base.intermediary_basename != null)
+ {
return self;
}
@@ -384,16 +394,22 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
return error.TODOImplementWritingStaticLibFiles;
}
}
+ try self.flushModule(comp);
+}
+pub fn flushModule(self: *MachO, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
+ const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
+ if (!use_stage1 and self.base.options.output_mode == .Obj)
+ return self.flushObject(comp);
+
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
- const use_stage1 = build_options.is_stage1 and self.base.options.use_stage1;
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
@@ -410,7 +426,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
}
const obj_basename = self.base.intermediary_basename orelse break :blk null;
- try self.flushModule(comp);
+ try self.flushObject(comp);
const full_obj_path = try directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
} else null;
@@ -534,15 +550,16 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
.read = true,
.mode = link.determineMode(self.base.options),
});
- try self.populateMissingMetadata();
+ // Index 0 is always a null symbol.
try self.locals.append(self.base.allocator, .{
.n_strx = 0,
- .n_type = macho.N_UNDF,
+ .n_type = 0,
.n_sect = 0,
.n_desc = 0,
.n_value = 0,
});
try self.strtab.append(self.base.allocator, 0);
+ try self.populateMissingMetadata();
}
if (needs_full_relink) {
@@ -887,7 +904,45 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
sect.offset = 0;
}
- try self.flushModule(comp);
+ try self.setEntryPoint();
+ try self.updateSectionOrdinals();
+ try self.writeLinkeditSegment();
+
+ if (self.d_sym) |*ds| {
+ // Flush debug symbols bundle.
+ try ds.flushModule(self.base.allocator, self.base.options);
+ }
+
+ if (self.requires_adhoc_codesig) {
+ // Preallocate space for the code signature.
+ // We need to do this at this stage so that we have the load commands with proper values
+ // written out to the file.
+ // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
+ // where the code signature goes into.
+ try self.writeCodeSignaturePadding();
+ }
+
+ try self.writeLoadCommands();
+ try self.writeHeader();
+
+ if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
+ log.debug("flushing. no_entry_point_found = true", .{});
+ self.error_flags.no_entry_point_found = true;
+ } else {
+ log.debug("flushing. no_entry_point_found = false", .{});
+ self.error_flags.no_entry_point_found = false;
+ }
+
+ assert(!self.load_commands_dirty);
+
+ if (self.requires_adhoc_codesig) {
+ try self.writeCodeSignature(); // code signing always comes last
+ }
+
+ if (build_options.enable_link_snapshots) {
+ if (self.base.options.enable_link_snapshots)
+ try self.snapshotState();
+ }
}
cache: {
@@ -909,46 +964,14 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
self.cold_start = false;
}
-pub fn flushModule(self: *MachO, comp: *Compilation) !void {
- _ = comp;
-
+pub fn flushObject(self: *MachO, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
- try self.setEntryPoint();
- try self.updateSectionOrdinals();
- try self.writeLinkeditSegment();
-
- if (self.d_sym) |*ds| {
- // Flush debug symbols bundle.
- try ds.flushModule(self.base.allocator, self.base.options);
- }
+ if (build_options.have_llvm)
+ if (self.llvm_object) |llvm_object| return llvm_object.flushModule(comp);
- if (self.requires_adhoc_codesig) {
- // Preallocate space for the code signature.
- // We need to do this at this stage so that we have the load commands with proper values
- // written out to the file.
- // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
- // where the code signature goes into.
- try self.writeCodeSignaturePadding();
- }
-
- try self.writeLoadCommands();
- try self.writeHeader();
-
- if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
- log.debug("flushing. no_entry_point_found = true", .{});
- self.error_flags.no_entry_point_found = true;
- } else {
- log.debug("flushing. no_entry_point_found = false", .{});
- self.error_flags.no_entry_point_found = false;
- }
-
- assert(!self.load_commands_dirty);
-
- if (self.requires_adhoc_codesig) {
- try self.writeCodeSignature(); // code signing always comes last
- }
+ return error.TODOImplementWritingObjFiles;
}
fn resolveSearchDir(
@@ -2288,7 +2311,7 @@ fn createDsoHandleAtom(self: *MachO) !void {
nlist.n_desc = macho.N_WEAK_DEF;
try self.globals.append(self.base.allocator, nlist);
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
undef.* = .{
.n_strx = 0,
@@ -2386,7 +2409,7 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
const global = &self.globals.items[resolv.where_index];
if (symbolIsTentative(global.*)) {
- _ = self.tentatives.fetchSwapRemove(resolv.where_index);
+ assert(self.tentatives.swapRemove(resolv.where_index));
} else if (!(symbolIsWeakDef(sym) or symbolIsPext(sym)) and
!(symbolIsWeakDef(global.*) or symbolIsPext(global.*)))
{
@@ -2406,7 +2429,15 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
continue;
},
.undef => {
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ const undef = &self.undefs.items[resolv.where_index];
+ undef.* = .{
+ .n_strx = 0,
+ .n_type = macho.N_UNDF,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = 0,
+ };
+ assert(self.unresolved.swapRemove(resolv.where_index));
},
}
@@ -2465,6 +2496,8 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
.n_value = sym.n_value,
});
_ = try self.tentatives.getOrPut(self.base.allocator, global_sym_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
+
resolv.* = .{
.where = .global,
.where_index = global_sym_index,
@@ -2477,7 +2510,6 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void {
.n_desc = 0,
.n_value = 0,
};
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
},
}
} else {
@@ -3035,6 +3067,7 @@ fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64, match
}
pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void {
+ if (self.llvm_object) |_| return;
if (decl.link.macho.local_sym_index != 0) return;
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
@@ -3380,7 +3413,7 @@ pub fn updateDeclExports(
const sym = &self.globals.items[resolv.where_index];
if (symbolIsTentative(sym.*)) {
- _ = self.tentatives.fetchSwapRemove(resolv.where_index);
+ assert(self.tentatives.swapRemove(resolv.where_index));
} else if (!is_weak and !(symbolIsWeakDef(sym.*) or symbolIsPext(sym.*))) {
_ = try module.failed_exports.put(
module.gpa,
@@ -3406,7 +3439,7 @@ pub fn updateDeclExports(
continue;
},
.undef => {
- _ = self.unresolved.fetchSwapRemove(resolv.where_index);
+ assert(self.unresolved.swapRemove(resolv.where_index));
_ = self.symbol_resolver.remove(n_strx);
},
}
@@ -3458,6 +3491,7 @@ pub fn updateDeclExports(
}
pub fn deleteExport(self: *MachO, exp: Export) void {
+ if (self.llvm_object) |_| return;
const sym_index = exp.sym_index orelse return;
self.globals_free_list.append(self.base.allocator, sym_index) catch {};
const global = &self.globals.items[sym_index];
@@ -4806,9 +4840,17 @@ fn writeSymbolTable(self: *MachO) !void {
}
}
+ var undefs = std.ArrayList(macho.nlist_64).init(self.base.allocator);
+ defer undefs.deinit();
+
+ for (self.undefs.items) |sym| {
+ if (sym.n_strx == 0) continue;
+ try undefs.append(sym);
+ }
+
const nlocals = locals.items.len;
const nexports = self.globals.items.len;
- const nundefs = self.undefs.items.len;
+ const nundefs = undefs.items.len;
const locals_off = symtab.symoff;
const locals_size = nlocals * @sizeOf(macho.nlist_64);
@@ -4823,7 +4865,7 @@ fn writeSymbolTable(self: *MachO) !void {
const undefs_off = exports_off + exports_size;
const undefs_size = nundefs * @sizeOf(macho.nlist_64);
log.debug("writing undefined symbols from 0x{x} to 0x{x}", .{ undefs_off, undefs_size + undefs_off });
- try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undefs.items), undefs_off);
+ try self.base.file.?.pwriteAll(mem.sliceAsBytes(undefs.items), undefs_off);
symtab.nsyms = @intCast(u32, nlocals + nexports + nundefs);
seg.inner.filesize += locals_size + exports_size + undefs_size;
@@ -5168,3 +5210,274 @@ pub fn findFirst(comptime T: type, haystack: []T, start: usize, predicate: anyty
}
return i;
}
+
+fn snapshotState(self: *MachO) !void {
+ const emit = self.base.options.emit orelse {
+ log.debug("no emit directory found; skipping snapshot...", .{});
+ return;
+ };
+
+ const Snapshot = struct {
+ const Node = struct {
+ const Tag = enum {
+ section_start,
+ section_end,
+ atom_start,
+ atom_end,
+ relocation,
+
+ pub fn jsonStringify(
+ tag: Tag,
+ options: std.json.StringifyOptions,
+ out_stream: anytype,
+ ) !void {
+ _ = options;
+ switch (tag) {
+ .section_start => try out_stream.writeAll("\"section_start\""),
+ .section_end => try out_stream.writeAll("\"section_end\""),
+ .atom_start => try out_stream.writeAll("\"atom_start\""),
+ .atom_end => try out_stream.writeAll("\"atom_end\""),
+ .relocation => try out_stream.writeAll("\"relocation\""),
+ }
+ }
+ };
+ const Payload = struct {
+ name: []const u8 = "",
+ aliases: [][]const u8 = &[0][]const u8{},
+ is_global: bool = false,
+ target: u64 = 0,
+ };
+ address: u64,
+ tag: Tag,
+ payload: Payload,
+ };
+ timestamp: i128,
+ nodes: []Node,
+ };
+
+ var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
+ defer arena_allocator.deinit();
+ const arena = &arena_allocator.allocator;
+
+ const out_file = try emit.directory.handle.createFile("snapshots.json", .{
+ .truncate = self.cold_start,
+ .read = true,
+ });
+ defer out_file.close();
+
+ if (out_file.seekFromEnd(-1)) {
+ try out_file.writer().writeByte(',');
+ } else |err| switch (err) {
+ error.Unseekable => try out_file.writer().writeByte('['),
+ else => |e| return e,
+ }
+ var writer = out_file.writer();
+
+ var snapshot = Snapshot{
+ .timestamp = std.time.nanoTimestamp(),
+ .nodes = undefined,
+ };
+ var nodes = std.ArrayList(Snapshot.Node).init(arena);
+
+ for (self.section_ordinals.keys()) |key| {
+ const seg = self.load_commands.items[key.seg].Segment;
+ const sect = seg.sections.items[key.sect];
+ const sect_name = try std.fmt.allocPrint(arena, "{s},{s}", .{
+ commands.segmentName(sect),
+ commands.sectionName(sect),
+ });
+ try nodes.append(.{
+ .address = sect.addr,
+ .tag = .section_start,
+ .payload = .{ .name = sect_name },
+ });
+
+ var atom: *Atom = self.atoms.get(key) orelse {
+ try nodes.append(.{
+ .address = sect.addr + sect.size,
+ .tag = .section_end,
+ .payload = .{},
+ });
+ continue;
+ };
+
+ while (atom.prev) |prev| {
+ atom = prev;
+ }
+
+ while (true) {
+ const atom_sym = self.locals.items[atom.local_sym_index];
+ var node = Snapshot.Node{
+ .address = atom_sym.n_value,
+ .tag = .atom_start,
+ .payload = .{
+ .name = self.getString(atom_sym.n_strx),
+ .is_global = self.symbol_resolver.contains(atom_sym.n_strx),
+ },
+ };
+
+ var aliases = std.ArrayList([]const u8).init(arena);
+ for (atom.aliases.items) |loc| {
+ try aliases.append(self.getString(self.locals.items[loc].n_strx));
+ }
+ node.payload.aliases = aliases.toOwnedSlice();
+ try nodes.append(node);
+
+ var relocs = std.ArrayList(Snapshot.Node).init(arena);
+ try relocs.ensureTotalCapacity(atom.relocs.items.len);
+ for (atom.relocs.items) |rel| {
+ const arch = self.base.options.target.cpu.arch;
+ const source_addr = blk: {
+ const sym = self.locals.items[atom.local_sym_index];
+ break :blk sym.n_value + rel.offset;
+ };
+ const target_addr = blk: {
+ const is_via_got = got: {
+ switch (arch) {
+ .aarch64 => break :got switch (@intToEnum(macho.reloc_type_arm64, rel.@"type")) {
+ .ARM64_RELOC_GOT_LOAD_PAGE21, .ARM64_RELOC_GOT_LOAD_PAGEOFF12 => true,
+ else => false,
+ },
+ .x86_64 => break :got switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
+ .X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => true,
+ else => false,
+ },
+ else => unreachable,
+ }
+ };
+
+ if (is_via_got) {
+ const got_atom = self.got_entries_map.get(rel.target).?;
+ break :blk self.locals.items[got_atom.local_sym_index].n_value;
+ }
+
+ switch (rel.target) {
+ .local => |sym_index| {
+ const sym = self.locals.items[sym_index];
+ const is_tlv = is_tlv: {
+ const source_sym = self.locals.items[atom.local_sym_index];
+ const match = self.section_ordinals.keys()[source_sym.n_sect - 1];
+ const match_seg = self.load_commands.items[match.seg].Segment;
+ const match_sect = match_seg.sections.items[match.sect];
+ break :is_tlv commands.sectionType(match_sect) == macho.S_THREAD_LOCAL_VARIABLES;
+ };
+ if (is_tlv) {
+ const match_seg = self.load_commands.items[self.data_segment_cmd_index.?].Segment;
+ const base_address = inner: {
+ if (self.tlv_data_section_index) |i| {
+ break :inner match_seg.sections.items[i].addr;
+ } else if (self.tlv_bss_section_index) |i| {
+ break :inner match_seg.sections.items[i].addr;
+ } else unreachable;
+ };
+ break :blk sym.n_value - base_address;
+ }
+ break :blk sym.n_value;
+ },
+ .global => |n_strx| {
+ const resolv = self.symbol_resolver.get(n_strx).?;
+ switch (resolv.where) {
+ .global => break :blk self.globals.items[resolv.where_index].n_value,
+ .undef => {
+ break :blk if (self.stubs_map.get(n_strx)) |stub_atom|
+ self.locals.items[stub_atom.local_sym_index].n_value
+ else
+ 0;
+ },
+ }
+ },
+ }
+ };
+
+ relocs.appendAssumeCapacity(.{
+ .address = source_addr,
+ .tag = .relocation,
+ .payload = .{ .target = target_addr },
+ });
+ }
+
+ if (atom.contained.items.len == 0) {
+ try nodes.appendSlice(relocs.items);
+ } else {
+ // Need to reverse iteration order of relocs since by default for relocatable sources
+ // they come in reverse. For linking, this doesn't matter in any way, however, for
+ // arranging the memoryline for displaying it does.
+ std.mem.reverse(Snapshot.Node, relocs.items);
+
+ var next_i: usize = 0;
+ var last_rel: usize = 0;
+ while (next_i < atom.contained.items.len) : (next_i += 1) {
+ const loc = atom.contained.items[next_i];
+ const cont_sym = self.locals.items[loc.local_sym_index];
+ const cont_sym_name = self.getString(cont_sym.n_strx);
+ var contained_node = Snapshot.Node{
+ .address = cont_sym.n_value,
+ .tag = .atom_start,
+ .payload = .{
+ .name = cont_sym_name,
+ .is_global = self.symbol_resolver.contains(cont_sym.n_strx),
+ },
+ };
+
+ // Accumulate aliases
+ var inner_aliases = std.ArrayList([]const u8).init(arena);
+ while (true) {
+ if (next_i + 1 >= atom.contained.items.len) break;
+ const next_sym = self.locals.items[atom.contained.items[next_i + 1].local_sym_index];
+ if (next_sym.n_value != cont_sym.n_value) break;
+ const next_sym_name = self.getString(next_sym.n_strx);
+ if (self.symbol_resolver.contains(next_sym.n_strx)) {
+ try inner_aliases.append(contained_node.payload.name);
+ contained_node.payload.name = next_sym_name;
+ contained_node.payload.is_global = true;
+ } else try inner_aliases.append(next_sym_name);
+ next_i += 1;
+ }
+
+ const cont_size = if (next_i + 1 < atom.contained.items.len)
+ self.locals.items[atom.contained.items[next_i + 1].local_sym_index].n_value - cont_sym.n_value
+ else
+ atom_sym.n_value + atom.size - cont_sym.n_value;
+
+ contained_node.payload.aliases = inner_aliases.toOwnedSlice();
+ try nodes.append(contained_node);
+
+ for (relocs.items[last_rel..]) |rel, rel_i| {
+ if (rel.address >= cont_sym.n_value + cont_size) {
+ last_rel = rel_i;
+ break;
+ }
+ try nodes.append(rel);
+ }
+
+ try nodes.append(.{
+ .address = cont_sym.n_value + cont_size,
+ .tag = .atom_end,
+ .payload = .{},
+ });
+ }
+ }
+
+ try nodes.append(.{
+ .address = atom_sym.n_value + atom.size,
+ .tag = .atom_end,
+ .payload = .{},
+ });
+
+ if (atom.next) |next| {
+ atom = next;
+ } else break;
+ }
+
+ try nodes.append(.{
+ .address = sect.addr + sect.size,
+ .tag = .section_end,
+ .payload = .{},
+ });
+ }
+
+ snapshot.nodes = nodes.toOwnedSlice();
+
+ try std.json.stringify(snapshot, .{}, writer);
+ try writer.writeByte(']');
+}
diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig
index c32d1f1d8f..07a20ac336 100644
--- a/src/link/MachO/Atom.zig
+++ b/src/link/MachO/Atom.zig
@@ -345,15 +345,9 @@ pub fn parseRelocs(self: *Atom, relocs: []macho.relocation_info, context: RelocC
const seg = context.object.load_commands.items[context.object.segment_cmd_index.?].Segment;
const sect = seg.sections.items[sect_id];
const match = (try context.macho_file.getMatchingSection(sect)) orelse unreachable;
- const sym_name = try std.fmt.allocPrint(context.allocator, "{s}_{s}_{s}", .{
- context.object.name,
- commands.segmentName(sect),
- commands.sectionName(sect),
- });
- defer context.allocator.free(sym_name);
const local_sym_index = @intCast(u32, context.macho_file.locals.items.len);
try context.macho_file.locals.append(context.allocator, .{
- .n_strx = try context.macho_file.makeString(sym_name),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, context.macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig
index de747eb4c7..f0a299182c 100644
--- a/src/link/MachO/Object.zig
+++ b/src/link/MachO/Object.zig
@@ -174,7 +174,13 @@ pub fn free(self: *Object, allocator: *Allocator, macho_file: *MachO) void {
if (atom.local_sym_index != 0) {
macho_file.locals_free_list.append(allocator, atom.local_sym_index) catch {};
const local = &macho_file.locals.items[atom.local_sym_index];
- local.n_type = 0;
+ local.* = .{
+ .n_strx = 0,
+ .n_type = 0,
+ .n_sect = 0,
+ .n_desc = 0,
+ .n_value = 0,
+ };
atom.local_sym_index = 0;
}
if (atom == last_atom) {
@@ -458,15 +464,9 @@ pub fn parseIntoAtoms(self: *Object, allocator: *Allocator, macho_file: *MachO)
// a temp one, unless we already did that when working out the relocations
// of other atoms.
const atom_local_sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
- const sym_name = try std.fmt.allocPrint(allocator, "{s}_{s}_{s}", .{
- self.name,
- segmentName(sect),
- sectionName(sect),
- });
- defer allocator.free(sym_name);
const atom_local_sym_index = @intCast(u32, macho_file.locals.items.len);
try macho_file.locals.append(allocator, .{
- .n_strx = try macho_file.makeString(sym_name),
+ .n_strx = 0,
.n_type = macho.N_SECT,
.n_sect = @intCast(u8, macho_file.section_ordinals.getIndex(match).? + 1),
.n_desc = 0,
diff --git a/src/main.zig b/src/main.zig
index d978df565d..a2dd8d1d96 100644
--- a/src/main.zig
+++ b/src/main.zig
@@ -27,7 +27,7 @@ const crash_report = @import("crash_report.zig");
pub usingnamespace crash_report.root_decls;
pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
- std.log.emerg(format, args);
+ std.log.err(format, args);
process.exit(1);
}
@@ -94,7 +94,7 @@ const usage = if (debug_extensions_enabled) debug_usage else normal_usage;
pub const log_level: std.log.Level = switch (builtin.mode) {
.Debug => .debug,
.ReleaseSafe, .ReleaseFast => .info,
- .ReleaseSmall => .crit,
+ .ReleaseSmall => .err,
};
var log_scopes: std.ArrayListUnmanaged([]const u8) = .{};
@@ -120,14 +120,7 @@ pub fn log(
} else return;
}
- // We only recognize 4 log levels in this application.
- const level_txt = switch (level) {
- .emerg, .alert, .crit, .err => "error",
- .warn => "warning",
- .notice, .info => "info",
- .debug => "debug",
- };
- const prefix1 = level_txt;
+ const prefix1 = comptime level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
// Print the message to stderr, silently ignoring any errors
@@ -436,6 +429,7 @@ const usage_build_generic =
\\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features
\\ --debug-log [scope] Enable printing debug/info log messages for scope
\\ --debug-compile-errors Crash with helpful diagnostics at the first compile error
+ \\ --debug-link-snapshot Enable dumping of the linker's state in JSON format
\\
;
@@ -635,6 +629,7 @@ fn buildOutputType(
var major_subsystem_version: ?u32 = null;
var minor_subsystem_version: ?u32 = null;
var wasi_exec_model: ?std.builtin.WasiExecModel = null;
+ var enable_link_snapshots: bool = false;
var system_libs = std.ArrayList([]const u8).init(gpa);
defer system_libs.deinit();
@@ -932,6 +927,12 @@ fn buildOutputType(
} else {
try log_scopes.append(gpa, args[i]);
}
+ } else if (mem.eql(u8, arg, "--debug-link-snapshot")) {
+ if (!build_options.enable_link_snapshots) {
+ std.log.warn("Zig was compiled without linker snapshots enabled (-Dlink-snapshot). --debug-link-snapshot has no effect.", .{});
+ } else {
+ enable_link_snapshots = true;
+ }
} else if (mem.eql(u8, arg, "-fcompiler-rt")) {
want_compiler_rt = true;
} else if (mem.eql(u8, arg, "-fno-compiler-rt")) {
@@ -2149,6 +2150,7 @@ fn buildOutputType(
.subsystem = subsystem,
.wasi_exec_model = wasi_exec_model,
.debug_compile_errors = debug_compile_errors,
+ .enable_link_snapshots = enable_link_snapshots,
}) catch |err| {
fatal("unable to create compilation: {s}", .{@errorName(err)});
};
diff --git a/src/print_air.zig b/src/print_air.zig
index 861483abac..17efa8297d 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -111,7 +111,10 @@ const Writer = struct {
.mul,
.mulwrap,
.mul_sat,
- .div,
+ .div_float,
+ .div_trunc,
+ .div_floor,
+ .div_exact,
.rem,
.mod,
.ptr_add,
@@ -130,9 +133,7 @@ const Writer = struct {
.store,
.array_elem_val,
.slice_elem_val,
- .ptr_slice_elem_val,
.ptr_elem_val,
- .ptr_ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
@@ -183,6 +184,8 @@ const Writer = struct {
.wrap_errunion_err,
.slice_ptr,
.slice_len,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
@@ -199,7 +202,11 @@ const Writer = struct {
.loop,
=> try w.writeBlock(s, inst),
- .ptr_elem_ptr => try w.writePtrElemPtr(s, inst),
+ .slice,
+ .slice_elem_ptr,
+ .ptr_elem_ptr,
+ => try w.writeTyPlBin(s, inst),
+
.struct_field_ptr => try w.writeStructField(s, inst),
.struct_field_val => try w.writeStructField(s, inst),
.constant => try w.writeConstant(s, inst),
@@ -280,7 +287,7 @@ const Writer = struct {
try s.print(", {d}", .{extra.field_index});
}
- fn writePtrElemPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
+ fn writeTyPlBin(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const extra = w.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -368,9 +375,52 @@ const Writer = struct {
}
fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
- _ = w;
- _ = inst;
- try s.writeAll("TODO");
+ const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
+ const air_asm = w.air.extraData(Air.Asm, ty_pl.payload);
+ const zir = w.zir;
+ const extended = zir.instructions.items(.data)[air_asm.data.zir_index].extended;
+ const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand);
+ const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source);
+ const outputs_len = @truncate(u5, extended.small);
+ const args_len = @truncate(u5, extended.small >> 5);
+ const clobbers_len = @truncate(u5, extended.small >> 10);
+ const args = @bitCast([]const Air.Inst.Ref, w.air.extra[air_asm.end..][0..args_len]);
+
+ var extra_i: usize = zir_extra.end;
+ const output_constraint: ?[]const u8 = out: {
+ var i: usize = 0;
+ while (i < outputs_len) : (i += 1) {
+ const output = zir.extraData(Zir.Inst.Asm.Output, extra_i);
+ extra_i = output.end;
+ break :out zir.nullTerminatedString(output.data.constraint);
+ }
+ break :out null;
+ };
+
+ try s.print("\"{s}\"", .{asm_source});
+
+ if (output_constraint) |constraint| {
+ const ret_ty = w.air.typeOfIndex(inst);
+ try s.print(", {s} -> {}", .{ constraint, ret_ty });
+ }
+
+ for (args) |arg| {
+ const input = zir.extraData(Zir.Inst.Asm.Input, extra_i);
+ extra_i = input.end;
+ const constraint = zir.nullTerminatedString(input.data.constraint);
+
+ try s.print(", {s} = (", .{constraint});
+ try w.writeOperand(s, inst, 0, arg);
+ try s.writeByte(')');
+ }
+
+ const clobbers = zir.extra[extra_i..][0..clobbers_len];
+ for (clobbers) |clobber_index| {
+ const clobber = zir.nullTerminatedString(clobber_index);
+ try s.writeAll(", ~{");
+ try s.writeAll(clobber);
+ try s.writeAll("}");
+ }
}
fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
diff --git a/src/print_zir.zig b/src/print_zir.zig
index f0f282f55d..a3988986f0 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -184,7 +184,6 @@ const Writer = struct {
.is_non_err,
.is_non_err_ptr,
.typeof,
- .typeof_elem,
.struct_init_empty,
.type_info,
.size_of,
@@ -234,6 +233,8 @@ const Writer = struct {
.@"await",
.await_nosuspend,
.fence,
+ .switch_cond,
+ .switch_cond_ref,
=> try self.writeUnNode(stream, inst),
.ref,
@@ -347,7 +348,6 @@ const Writer = struct {
.reduce,
.atomic_load,
.bitcast,
- .bitcast_result_ptr,
.vector_type,
.maximum,
.minimum,
@@ -379,19 +379,7 @@ const Writer = struct {
.error_set_decl_anon => try self.writeErrorSetDecl(stream, inst, .anon),
.error_set_decl_func => try self.writeErrorSetDecl(stream, inst, .func),
- .switch_block => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_under => try self.writePlNodeSwitchBr(stream, inst, .under),
- .switch_block_ref => try self.writePlNodeSwitchBr(stream, inst, .none),
- .switch_block_ref_else => try self.writePlNodeSwitchBr(stream, inst, .@"else"),
- .switch_block_ref_under => try self.writePlNodeSwitchBr(stream, inst, .under),
-
- .switch_block_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
- .switch_block_ref_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .none),
- .switch_block_ref_else_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .@"else"),
- .switch_block_ref_under_multi => try self.writePlNodeSwitchBlockMulti(stream, inst, .under),
+ .switch_block => try self.writePlNodeSwitchBlock(stream, inst),
.field_ptr,
.field_val,
@@ -1649,113 +1637,46 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
- fn writePlNodeSwitchBr(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- special_prong: Zir.SpecialProng,
- ) !void {
+ fn writePlNodeSwitchBlock(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index);
- const special: struct {
- body: []const Zir.Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
-
- try self.writeInstRef(stream, extra.data.operand);
-
- self.indent += 2;
-
- if (special.body.len != 0) {
- const prong_name = switch (special_prong) {
- .@"else" => "else",
- .under => "_",
- else => unreachable,
- };
- try stream.writeAll(",\n");
- try stream.writeByteNTimes(' ', self.indent);
- try stream.print("{s} => ", .{prong_name});
- try self.writeBracedBody(stream, special.body);
- }
-
- var extra_index: usize = special.end;
- {
- var scalar_i: usize = 0;
- while (scalar_i < extra.data.cases_len) : (scalar_i += 1) {
- const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
- extra_index += 1;
- const body_len = self.code.extra[extra_index];
- extra_index += 1;
- const body = self.code.extra[extra_index..][0..body_len];
- extra_index += body_len;
-
- try stream.writeAll(",\n");
- try stream.writeByteNTimes(' ', self.indent);
- try self.writeInstRef(stream, item_ref);
- try stream.writeAll(" => ");
- try self.writeBracedBody(stream, body);
- }
- }
-
- self.indent -= 2;
- try stream.writeAll(") ");
- try self.writeSrc(stream, inst_data.src());
- }
+ var extra_index: usize = extra.end;
- fn writePlNodeSwitchBlockMulti(
- self: *Writer,
- stream: anytype,
- inst: Zir.Inst.Index,
- special_prong: Zir.SpecialProng,
- ) !void {
- const inst_data = self.code.instructions.items(.data)[inst].pl_node;
- const extra = self.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index);
- const special: struct {
- body: []const Zir.Inst.Index,
- end: usize,
- } = switch (special_prong) {
- .none => .{ .body = &.{}, .end = extra.end },
- .under, .@"else" => blk: {
- const body_len = self.code.extra[extra.end];
- const extra_body_start = extra.end + 1;
- break :blk .{
- .body = self.code.extra[extra_body_start..][0..body_len],
- .end = extra_body_start + body_len,
- };
- },
- };
+ const multi_cases_len = if (extra.data.bits.has_multi_cases) blk: {
+ const multi_cases_len = self.code.extra[extra_index];
+ extra_index += 1;
+ break :blk multi_cases_len;
+ } else 0;
try self.writeInstRef(stream, extra.data.operand);
+ try self.writeFlag(stream, ", ref", extra.data.bits.is_ref);
self.indent += 2;
- if (special.body.len != 0) {
+ else_prong: {
+ const special_prong = extra.data.bits.specialProng();
const prong_name = switch (special_prong) {
.@"else" => "else",
.under => "_",
- else => unreachable,
+ else => break :else_prong,
};
+
+ const body_len = self.code.extra[extra_index];
+ extra_index += 1;
+ const body = self.code.extra[extra_index..][0..body_len];
+ extra_index += body.len;
+
try stream.writeAll(",\n");
try stream.writeByteNTimes(' ', self.indent);
try stream.print("{s} => ", .{prong_name});
- try self.writeBracedBody(stream, special.body);
+ try self.writeBracedBody(stream, body);
}
- var extra_index: usize = special.end;
{
+ const scalar_cases_len = extra.data.bits.scalar_cases_len;
var scalar_i: usize = 0;
- while (scalar_i < extra.data.scalar_cases_len) : (scalar_i += 1) {
+ while (scalar_i < scalar_cases_len) : (scalar_i += 1) {
const item_ref = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]);
extra_index += 1;
const body_len = self.code.extra[extra_index];
@@ -1772,7 +1693,7 @@ const Writer = struct {
}
{
var multi_i: usize = 0;
- while (multi_i < extra.data.multi_cases_len) : (multi_i += 1) {
+ while (multi_i < multi_cases_len) : (multi_i += 1) {
const items_len = self.code.extra[extra_index];
extra_index += 1;
const ranges_len = self.code.extra[extra_index];
diff --git a/src/stage1/codegen.cpp b/src/stage1/codegen.cpp
index 10dbd3b359..047fb54b85 100644
--- a/src/stage1/codegen.cpp
+++ b/src/stage1/codegen.cpp
@@ -2960,33 +2960,7 @@ static LLVMValueRef gen_div(CodeGen *g, bool want_runtime_safety, bool want_fast
}
return result;
case DivKindTrunc:
- {
- LLVMBasicBlockRef ltz_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncLTZero");
- LLVMBasicBlockRef gez_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncGEZero");
- LLVMBasicBlockRef end_block = LLVMAppendBasicBlock(g->cur_fn_val, "DivTruncEnd");
- LLVMValueRef ltz = LLVMBuildFCmp(g->builder, LLVMRealOLT, val1, zero, "");
- if (operand_type->id == ZigTypeIdVector) {
- ltz = ZigLLVMBuildOrReduce(g->builder, ltz);
- }
- LLVMBuildCondBr(g->builder, ltz, ltz_block, gez_block);
-
- LLVMPositionBuilderAtEnd(g->builder, ltz_block);
- LLVMValueRef ceiled = gen_float_op(g, result, operand_type, BuiltinFnIdCeil);
- LLVMBasicBlockRef ceiled_end_block = LLVMGetInsertBlock(g->builder);
- LLVMBuildBr(g->builder, end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, gez_block);
- LLVMValueRef floored = gen_float_op(g, result, operand_type, BuiltinFnIdFloor);
- LLVMBasicBlockRef floored_end_block = LLVMGetInsertBlock(g->builder);
- LLVMBuildBr(g->builder, end_block);
-
- LLVMPositionBuilderAtEnd(g->builder, end_block);
- LLVMValueRef phi = LLVMBuildPhi(g->builder, get_llvm_type(g, operand_type), "");
- LLVMValueRef incoming_values[] = { ceiled, floored };
- LLVMBasicBlockRef incoming_blocks[] = { ceiled_end_block, floored_end_block };
- LLVMAddIncoming(phi, incoming_values, incoming_blocks, 2);
- return phi;
- }
+ return gen_float_op(g, result, operand_type, BuiltinFnIdTrunc);
case DivKindFloor:
return gen_float_op(g, result, operand_type, BuiltinFnIdFloor);
}
diff --git a/src/translate_c.zig b/src/translate_c.zig
index 7247ed50a9..84e809730e 100644
--- a/src/translate_c.zig
+++ b/src/translate_c.zig
@@ -575,12 +575,14 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const fn_decl_loc = fn_decl.getLocation();
const has_body = fn_decl.hasBody();
const storage_class = fn_decl.getStorageClass();
+ const is_always_inline = has_body and fn_decl.hasAlwaysInlineAttr();
var decl_ctx = FnDeclContext{
.fn_name = fn_name,
.has_body = has_body,
.storage_class = storage_class,
+ .is_always_inline = is_always_inline,
.is_export = switch (storage_class) {
- .None => has_body and !fn_decl.isInlineSpecified(),
+ .None => has_body and !is_always_inline and !fn_decl.isInlineSpecified(),
.Extern, .Static => false,
.PrivateExtern => return failDecl(c, fn_decl_loc, fn_name, "unsupported storage class: private extern", .{}),
.Auto => unreachable, // Not legal on functions
@@ -615,6 +617,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
decl_ctx.has_body = false;
decl_ctx.storage_class = .Extern;
decl_ctx.is_export = false;
+ decl_ctx.is_always_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "TODO unable to translate variadic function, demoted to extern", .{});
}
break :blk transFnProto(c, fn_decl, fn_proto_type, fn_decl_loc, decl_ctx, true) catch |err| switch (err) {
@@ -653,6 +656,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
const param_name = param.name orelse {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "function {s} parameter has no name, demoted to extern", .{fn_name});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
};
@@ -685,6 +689,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to translate function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
@@ -704,6 +709,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
=> {
proto_node.data.is_extern = true;
proto_node.data.is_export = false;
+ proto_node.data.is_inline = false;
try warn(c, &c.global_scope.base, fn_decl_loc, "unable to create a return value for function, demoted to extern", .{});
return addTopLevelDecl(c, fn_name, Node.initPayload(&proto_node.base));
},
@@ -974,6 +980,7 @@ fn buildFlexibleArrayFn(
.is_pub = true,
.is_extern = false,
.is_export = false,
+ .is_inline = false,
.is_var_args = false,
.name = field_name,
.linksection_string = null,
@@ -2808,16 +2815,18 @@ fn maybeBlockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError
.NullStmtClass,
.WhileStmtClass,
=> return transStmt(c, scope, stmt, .unused),
- else => {
- var block_scope = try Scope.Block.init(c, scope, false);
- defer block_scope.deinit();
- const result = try transStmt(c, &block_scope.base, stmt, .unused);
- try block_scope.statements.append(result);
- return block_scope.complete(c);
- },
+ else => return blockify(c, scope, stmt),
}
}
+fn blockify(c: *Context, scope: *Scope, stmt: *const clang.Stmt) TransError!Node {
+ var block_scope = try Scope.Block.init(c, scope, false);
+ defer block_scope.deinit();
+ const result = try transStmt(c, &block_scope.base, stmt, .unused);
+ try block_scope.statements.append(result);
+ return block_scope.complete(c);
+}
+
fn transIfStmt(
c: *Context,
scope: *Scope,
@@ -2835,9 +2844,21 @@ fn transIfStmt(
const cond_expr = @ptrCast(*const clang.Expr, stmt.getCond());
const cond = try transBoolExpr(c, &cond_scope.base, cond_expr, .used);
- const then_body = try maybeBlockify(c, scope, stmt.getThen());
+ const then_stmt = stmt.getThen();
+ const else_stmt = stmt.getElse();
+ const then_class = then_stmt.getStmtClass();
+ // block needed to keep else statement from attaching to inner while
+ const must_blockify = (else_stmt != null) and switch (then_class) {
+ .DoStmtClass, .ForStmtClass, .WhileStmtClass => true,
+ else => false,
+ };
+
+ const then_body = if (must_blockify)
+ try blockify(c, scope, then_stmt)
+ else
+ try maybeBlockify(c, scope, then_stmt);
- const else_body = if (stmt.getElse()) |expr|
+ const else_body = if (else_stmt) |expr|
try maybeBlockify(c, scope, expr)
else
null;
@@ -4807,6 +4828,7 @@ const FnDeclContext = struct {
fn_name: []const u8,
has_body: bool,
storage_class: clang.StorageClass,
+ is_always_inline: bool,
is_export: bool,
};
@@ -4857,7 +4879,7 @@ fn transFnNoProto(
is_pub: bool,
) !*ast.Payload.Func {
const cc = try transCC(c, fn_ty, source_loc);
- const is_var_args = if (fn_decl_context) |ctx| (!ctx.is_export and ctx.storage_class != .Static) else true;
+ const is_var_args = if (fn_decl_context) |ctx| (!ctx.is_export and ctx.storage_class != .Static and !ctx.is_always_inline) else true;
return finishTransFnProto(c, null, null, fn_ty, source_loc, fn_decl_context, is_var_args, cc, is_pub);
}
@@ -4874,9 +4896,9 @@ fn finishTransFnProto(
) !*ast.Payload.Func {
const is_export = if (fn_decl_context) |ctx| ctx.is_export else false;
const is_extern = if (fn_decl_context) |ctx| !ctx.has_body else false;
+ const is_inline = if (fn_decl_context) |ctx| ctx.is_always_inline else false;
const scope = &c.global_scope.base;
- // TODO check for always_inline attribute
// TODO check for align attribute
var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
@@ -4920,7 +4942,7 @@ fn finishTransFnProto(
const alignment = if (fn_decl) |decl| zigAlignment(decl.getAlignedAttribute(c.clang_context)) else null;
- const explicit_callconv = if ((is_export or is_extern) and cc == .C) null else cc;
+ const explicit_callconv = if ((is_inline or is_export or is_extern) and cc == .C) null else cc;
const return_type_node = blk: {
if (fn_ty.getNoReturnAttr()) {
@@ -4949,6 +4971,7 @@ fn finishTransFnProto(
.is_pub = is_pub,
.is_extern = is_extern,
.is_export = is_export,
+ .is_inline = is_inline,
.is_var_args = is_var_args,
.name = name,
.linksection_string = linksection_string,
diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig
index a86ec0d414..315f22d7f2 100644
--- a/src/translate_c/ast.zig
+++ b/src/translate_c/ast.zig
@@ -540,6 +540,7 @@ pub const Payload = struct {
is_pub: bool,
is_extern: bool,
is_export: bool,
+ is_inline: bool,
is_var_args: bool,
name: ?[]const u8,
linksection_string: ?[]const u8,
@@ -2614,6 +2615,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
if (payload.is_pub) _ = try c.addToken(.keyword_pub, "pub");
if (payload.is_extern) _ = try c.addToken(.keyword_extern, "extern");
if (payload.is_export) _ = try c.addToken(.keyword_export, "export");
+ if (payload.is_inline) _ = try c.addToken(.keyword_inline, "inline");
const fn_token = try c.addToken(.keyword_fn, "fn");
if (payload.name) |some| _ = try c.addIdentifier(some);
diff --git a/src/type.zig b/src/type.zig
index d845d8a49a..4682b3ed10 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -1529,6 +1529,7 @@ pub const Type = extern union {
return fast_result;
}
+ /// Returns 0 if the pointer is naturally aligned and the element type is 0-bit.
pub fn ptrAlignment(self: Type, target: Target) u32 {
switch (self.tag()) {
.single_const_pointer,
@@ -1693,15 +1694,15 @@ pub const Type = extern union {
},
.error_union => {
- const payload = self.castTag(.error_union).?.data;
- if (!payload.error_set.hasCodeGenBits()) {
- return payload.payload.abiAlignment(target);
- } else if (!payload.payload.hasCodeGenBits()) {
- return payload.error_set.abiAlignment(target);
+ const data = self.castTag(.error_union).?.data;
+ if (!data.error_set.hasCodeGenBits()) {
+ return data.payload.abiAlignment(target);
+ } else if (!data.payload.hasCodeGenBits()) {
+ return data.error_set.abiAlignment(target);
}
- return std.math.max(
- payload.payload.abiAlignment(target),
- payload.error_set.abiAlignment(target),
+ return @maximum(
+ data.payload.abiAlignment(target),
+ data.error_set.abiAlignment(target),
);
},
@@ -1739,10 +1740,10 @@ pub const Type = extern union {
.empty_struct,
.void,
+ .c_void,
=> return 0,
.empty_struct_literal,
- .c_void,
.type,
.comptime_int,
.comptime_float,
@@ -1763,6 +1764,7 @@ pub const Type = extern union {
}
/// Asserts the type has the ABI size already resolved.
+ /// Types that return false for hasCodeGenBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
@@ -1770,53 +1772,32 @@ pub const Type = extern union {
.fn_naked_noreturn_no_args => unreachable, // represents machine code; not a pointer
.fn_ccc_void_no_args => unreachable, // represents machine code; not a pointer
.function => unreachable, // represents machine code; not a pointer
- .c_void => unreachable,
- .type => unreachable,
- .comptime_int => unreachable,
- .comptime_float => unreachable,
+ .@"opaque" => unreachable, // no size available
+ .bound_fn => unreachable, // TODO remove from the language
.noreturn => unreachable,
- .@"null" => unreachable,
- .@"undefined" => unreachable,
- .enum_literal => unreachable,
- .single_const_pointer_to_comptime_int => unreachable,
- .empty_struct_literal => unreachable,
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
- .@"opaque" => unreachable,
.var_args_param => unreachable,
.generic_poison => unreachable,
- .type_info => unreachable,
- .bound_fn => unreachable,
+ .call_options => unreachable, // missing call to resolveTypeFields
+ .export_options => unreachable, // missing call to resolveTypeFields
+ .extern_options => unreachable, // missing call to resolveTypeFields
+ .type_info => unreachable, // missing call to resolveTypeFields
- .empty_struct, .void => 0,
-
- .@"struct" => {
- const fields = self.structFields();
- if (self.castTag(.@"struct")) |payload| {
- const struct_obj = payload.data;
- assert(struct_obj.status == .have_layout);
- const is_packed = struct_obj.layout == .Packed;
- if (is_packed) @panic("TODO packed structs");
- }
- var size: u64 = 0;
- var big_align: u32 = 0;
- for (fields.values()) |field| {
- if (!field.ty.hasCodeGenBits()) continue;
+ .c_void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .@"null",
+ .@"undefined",
+ .enum_literal,
+ .single_const_pointer_to_comptime_int,
+ .empty_struct_literal,
+ .empty_struct,
+ .void,
+ => 0,
- const field_align = a: {
- if (field.abi_align.tag() == .abi_align_default) {
- break :a field.ty.abiAlignment(target);
- } else {
- break :a @intCast(u32, field.abi_align.toUnsignedInt());
- }
- };
- big_align = @maximum(big_align, field_align);
- size = std.mem.alignForwardGeneric(u64, size, field_align);
- size += field.ty.abiSize(target);
- }
- size = std.mem.alignForwardGeneric(u64, size, big_align);
- return size;
- },
+ .@"struct" => return self.structFieldOffset(self.structFieldCount(), target),
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
@@ -1836,9 +1817,6 @@ pub const Type = extern union {
.address_space,
.float_mode,
.reduce_op,
- .call_options,
- .export_options,
- .extern_options,
=> return 1,
.array_u8 => self.castTag(.array_u8).?.data,
@@ -1942,15 +1920,25 @@ pub const Type = extern union {
},
.error_union => {
- const payload = self.castTag(.error_union).?.data;
- if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) {
+ const data = self.castTag(.error_union).?.data;
+ if (!data.error_set.hasCodeGenBits() and !data.payload.hasCodeGenBits()) {
return 0;
- } else if (!payload.error_set.hasCodeGenBits()) {
- return payload.payload.abiSize(target);
- } else if (!payload.payload.hasCodeGenBits()) {
- return payload.error_set.abiSize(target);
+ } else if (!data.error_set.hasCodeGenBits()) {
+ return data.payload.abiSize(target);
+ } else if (!data.payload.hasCodeGenBits()) {
+ return data.error_set.abiSize(target);
}
- std.debug.panic("TODO abiSize error union {}", .{self});
+ const code_align = abiAlignment(data.error_set, target);
+ const payload_align = abiAlignment(data.payload, target);
+ const big_align = @maximum(code_align, payload_align);
+ const payload_size = abiSize(data.payload, target);
+
+ var size: u64 = 0;
+ size += abiSize(data.error_set, target);
+ size = std.mem.alignForwardGeneric(u64, size, payload_align);
+ size += payload_size;
+ size = std.mem.alignForwardGeneric(u64, size, big_align);
+ return size;
},
};
}
@@ -2509,6 +2497,20 @@ pub const Type = extern union {
};
}
+ /// Returns the type of a pointer to an element.
+ /// Asserts that the type is a pointer, and that the element type is indexable.
+ /// For *[N]T, return *T
+ /// For [*]T, returns *T
+ /// For []T, returns *T
+ /// Handles const-ness and address spaces in particular.
+ pub fn elemPtrType(ptr_ty: Type, arena: *Allocator) !Type {
+ return try Type.ptr(arena, .{
+ .pointee_type = ptr_ty.elemType2(),
+ .mutable = ptr_ty.ptrIsMutable(),
+ .@"addrspace" = ptr_ty.ptrAddressSpace(),
+ });
+ }
+
fn shallowElemType(child_ty: Type) Type {
return switch (child_ty.zigTypeTag()) {
.Array, .Vector => child_ty.childType(),
@@ -2573,12 +2575,14 @@ pub const Type = extern union {
pub fn unionFields(ty: Type) Module.Union.Fields {
const union_obj = ty.cast(Payload.Union).?.data;
+ assert(union_obj.haveFieldTypes());
return union_obj.fields;
}
pub fn unionFieldType(ty: Type, enum_tag: Value) Type {
const union_obj = ty.cast(Payload.Union).?.data;
const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag).?;
+ assert(union_obj.haveFieldTypes());
return union_obj.fields.values()[index].ty;
}
@@ -2617,6 +2621,17 @@ pub const Type = extern union {
};
}
+ /// Returns true if it is an error set that includes anyerror, false otherwise.
+ /// Note that the result may be a false negative if the type did not get error set
+ /// resolution prior to this call.
+ pub fn isAnyError(ty: Type) bool {
+ return switch (ty.tag()) {
+ .anyerror => true,
+ .error_set_inferred => ty.castTag(.error_set_inferred).?.data.is_anyerror,
+ else => false,
+ };
+ }
+
/// Asserts the type is an array or vector.
pub fn arrayLen(ty: Type) u64 {
return switch (ty.tag()) {
@@ -2653,7 +2668,7 @@ pub const Type = extern union {
.pointer => return self.castTag(.pointer).?.data.sentinel,
.array_sentinel => return self.castTag(.array_sentinel).?.data.sentinel,
- .array_u8_sentinel_0 => return Value.initTag(.zero),
+ .array_u8_sentinel_0 => return Value.zero,
else => unreachable,
};
@@ -3073,6 +3088,14 @@ pub const Type = extern union {
}
return Value.initTag(.empty_struct_value);
},
+ .enum_numbered => {
+ const enum_numbered = ty.castTag(.enum_numbered).?.data;
+ if (enum_numbered.fields.count() == 1) {
+ return enum_numbered.values.keys()[0];
+ } else {
+ return null;
+ }
+ },
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
if (enum_full.fields.count() == 1) {
@@ -3084,13 +3107,19 @@ pub const Type = extern union {
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
if (enum_simple.fields.count() == 1) {
- return Value.initTag(.zero);
+ return Value.zero;
+ } else {
+ return null;
+ }
+ },
+ .enum_nonexhaustive => {
+ const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
+ if (!tag_ty.hasCodeGenBits()) {
+ return Value.zero;
} else {
return null;
}
},
- .enum_nonexhaustive => ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty,
- .enum_numbered => ty = ty.castTag(.enum_numbered).?.data.tag_ty,
.@"union" => {
return null; // TODO
},
@@ -3106,7 +3135,7 @@ pub const Type = extern union {
.int_unsigned, .int_signed => {
if (ty.cast(Payload.Bits).?.data == 0) {
- return Value.initTag(.zero);
+ return Value.zero;
} else {
return null;
}
@@ -3114,8 +3143,9 @@ pub const Type = extern union {
.vector, .array, .array_u8 => {
if (ty.arrayLen() == 0)
return Value.initTag(.empty_array);
- ty = ty.elemType();
- continue;
+ if (ty.elemType().onePossibleValue() != null)
+ return Value.initTag(.the_only_possible_value);
+ return null;
},
.inferred_alloc_const => unreachable,
@@ -3156,7 +3186,7 @@ pub const Type = extern union {
const info = self.intInfo(target);
if (info.signedness == .unsigned) {
- return Value.initTag(.zero);
+ return Value.zero;
}
if (info.bits <= 6) {
@@ -3229,14 +3259,11 @@ pub const Type = extern union {
};
}
- pub fn enumFieldCount(ty: Type) usize {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.count();
- },
- .enum_simple => return ty.castTag(.enum_simple).?.data.fields.count(),
- .enum_numbered => return ty.castTag(.enum_numbered).?.data.fields.count(),
+ pub fn enumFields(ty: Type) Module.EnumFull.NameMap {
+ return switch (ty.tag()) {
+ .enum_full, .enum_nonexhaustive => ty.cast(Payload.EnumFull).?.data.fields,
+ .enum_simple => ty.castTag(.enum_simple).?.data.fields,
+ .enum_numbered => ty.castTag(.enum_numbered).?.data.fields,
.atomic_order,
.atomic_rmw_op,
.calling_convention,
@@ -3247,65 +3274,20 @@ pub const Type = extern union {
.export_options,
.extern_options,
=> @panic("TODO resolve std.builtin types"),
-
else => unreachable,
- }
+ };
+ }
+
+ pub fn enumFieldCount(ty: Type) usize {
+ return ty.enumFields().count();
}
pub fn enumFieldName(ty: Type, field_index: usize) []const u8 {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.keys()[field_index];
- },
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.fields.keys()[field_index];
- },
- .enum_numbered => {
- const enum_numbered = ty.castTag(.enum_numbered).?.data;
- return enum_numbered.fields.keys()[field_index];
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .export_options,
- .extern_options,
- => @panic("TODO resolve std.builtin types"),
- else => unreachable,
- }
+ return ty.enumFields().keys()[field_index];
}
pub fn enumFieldIndex(ty: Type, field_name: []const u8) ?usize {
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Payload.EnumFull).?.data;
- return enum_full.fields.getIndex(field_name);
- },
- .enum_simple => {
- const enum_simple = ty.castTag(.enum_simple).?.data;
- return enum_simple.fields.getIndex(field_name);
- },
- .enum_numbered => {
- const enum_numbered = ty.castTag(.enum_numbered).?.data;
- return enum_numbered.fields.getIndex(field_name);
- },
- .atomic_order,
- .atomic_rmw_op,
- .calling_convention,
- .address_space,
- .float_mode,
- .reduce_op,
- .call_options,
- .export_options,
- .extern_options,
- => @panic("TODO resolve std.builtin types"),
- else => unreachable,
- }
+ return ty.enumFields().getIndex(field_name);
}
/// Asserts `ty` is an enum. `enum_tag` can either be `enum_field_index` or
@@ -3376,6 +3358,7 @@ pub const Type = extern union {
.empty_struct => return .{},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.haveFieldTypes());
return struct_obj.fields;
},
else => unreachable,
@@ -3408,6 +3391,51 @@ pub const Type = extern union {
}
}
+ /// Supports structs and unions.
+ pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
+ switch (ty.tag()) {
+ .@"struct" => {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.status == .have_layout);
+ const is_packed = struct_obj.layout == .Packed;
+ if (is_packed) @panic("TODO packed structs");
+
+ var offset: u64 = 0;
+ var big_align: u32 = 0;
+ for (struct_obj.fields.values()) |field, i| {
+ if (!field.ty.hasCodeGenBits()) continue;
+
+ const field_align = a: {
+ if (field.abi_align.tag() == .abi_align_default) {
+ break :a field.ty.abiAlignment(target);
+ } else {
+ break :a @intCast(u32, field.abi_align.toUnsignedInt());
+ }
+ };
+ big_align = @maximum(big_align, field_align);
+ offset = std.mem.alignForwardGeneric(u64, offset, field_align);
+ if (i == index) return offset;
+ offset += field.ty.abiSize(target);
+ }
+ offset = std.mem.alignForwardGeneric(u64, offset, big_align);
+ return offset;
+ },
+ .@"union" => return 0,
+ .union_tagged => {
+ const union_obj = ty.castTag(.union_tagged).?.data;
+ const layout = union_obj.getLayout(target, true);
+ if (layout.tag_align >= layout.payload_align) {
+ // {Tag, Payload}
+ return std.mem.alignForwardGeneric(u64, layout.tag_size, layout.payload_align);
+ } else {
+ // {Payload, Tag}
+ return 0;
+ }
+ },
+ else => unreachable,
+ }
+ }
+
pub fn declSrcLoc(ty: Type) Module.SrcLoc {
return declSrcLocOrNull(ty).?;
}
@@ -3868,10 +3896,44 @@ pub const Type = extern union {
pub const base_tag = Tag.error_set_inferred;
base: Payload = Payload{ .tag = base_tag },
- data: struct {
+ data: Data,
+
+ pub const Data = struct {
func: *Module.Fn,
+ /// Direct additions to the inferred error set via `return error.Foo;`.
map: std.StringHashMapUnmanaged(void),
- },
+ /// Other functions with inferred error sets which this error set includes.
+ functions: std.AutoHashMapUnmanaged(*Module.Fn, void),
+ is_anyerror: bool,
+
+ pub fn addErrorSet(self: *Data, gpa: *Allocator, err_set_ty: Type) !void {
+ switch (err_set_ty.tag()) {
+ .error_set => {
+ const names = err_set_ty.castTag(.error_set).?.data.names();
+ for (names) |name| {
+ try self.map.put(gpa, name, {});
+ }
+ },
+ .error_set_single => {
+ const name = err_set_ty.castTag(.error_set_single).?.data;
+ try self.map.put(gpa, name, {});
+ },
+ .error_set_inferred => {
+ const func = err_set_ty.castTag(.error_set_inferred).?.data.func;
+ try self.functions.put(gpa, func, {});
+ var it = func.owner_decl.ty.fnReturnType().errorUnionSet()
+ .castTag(.error_set_inferred).?.data.map.iterator();
+ while (it.next()) |entry| {
+ try self.map.put(gpa, entry.key_ptr.*, {});
+ }
+ },
+ .anyerror => {
+ self.is_anyerror = true;
+ },
+ else => unreachable,
+ }
+ }
+ };
};
pub const Pointer = struct {
@@ -3959,6 +4021,7 @@ pub const Type = extern union {
pub const @"u8" = initTag(.u8);
pub const @"bool" = initTag(.bool);
pub const @"usize" = initTag(.usize);
+ pub const @"isize" = initTag(.isize);
pub const @"comptime_int" = initTag(.comptime_int);
pub const @"void" = initTag(.void);
pub const @"type" = initTag(.type);
@@ -4003,7 +4066,7 @@ pub const Type = extern union {
) Allocator.Error!Type {
if (elem_type.eql(Type.u8)) {
if (sent) |some| {
- if (some.eql(Value.initTag(.zero), elem_type)) {
+ if (some.eql(Value.zero, elem_type)) {
return Tag.array_u8_sentinel_0.create(arena, len);
}
} else {
diff --git a/src/value.zig b/src/value.zig
index 127c6c27b4..19c2a73666 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -86,6 +86,8 @@ pub const Value = extern union {
one,
void_value,
unreachable_value,
+ /// The only possible value for a particular type, which is stored externally.
+ the_only_possible_value,
null_value,
bool_true,
bool_false,
@@ -112,9 +114,9 @@ pub const Value = extern union {
/// This Tag will never be seen by machine codegen backends. It is changed into a
/// `decl_ref` when a comptime variable goes out of scope.
decl_ref_mut,
- /// Pointer to a specific element of an array.
+ /// Pointer to a specific element of an array, vector or slice.
elem_ptr,
- /// Pointer to a specific field of a struct.
+ /// Pointer to a specific field of a struct or union.
field_ptr,
/// A slice of u8 whose memory is managed externally.
bytes,
@@ -226,6 +228,7 @@ pub const Value = extern union {
.one,
.void_value,
.unreachable_value,
+ .the_only_possible_value,
.empty_struct_value,
.empty_array,
.null_value,
@@ -415,6 +418,7 @@ pub const Value = extern union {
.one,
.void_value,
.unreachable_value,
+ .the_only_possible_value,
.empty_array,
.null_value,
.bool_true,
@@ -510,7 +514,9 @@ pub const Value = extern union {
.base = payload.base,
.data = try arena.alloc(Value, payload.data.len),
};
- std.mem.copy(Value, new_payload.data, payload.data);
+ for (new_payload.data) |*elem, i| {
+ elem.* = try payload.data[i].copy(arena);
+ }
return Value{ .ptr_otherwise = &new_payload.base };
},
.slice => {
@@ -664,6 +670,7 @@ pub const Value = extern union {
.one => return out_stream.writeAll("1"),
.void_value => return out_stream.writeAll("{}"),
.unreachable_value => return out_stream.writeAll("unreachable"),
+ .the_only_possible_value => return out_stream.writeAll("(the only possible value)"),
.bool_true => return out_stream.writeAll("true"),
.bool_false => return out_stream.writeAll("false"),
.ty => return val.castTag(.ty).?.data.format("", options, out_stream),
@@ -755,6 +762,8 @@ pub const Value = extern union {
const decl_val = try decl.value();
return decl_val.toAllocatedBytes(decl.ty, allocator);
},
+ .the_only_possible_value => return &[_]u8{},
+ .slice => return toAllocatedBytes(val.castTag(.slice).?.data.ptr, ty, allocator),
else => unreachable,
}
}
@@ -847,53 +856,63 @@ pub const Value = extern union {
// TODO should `@intToEnum` do this `@intCast` for you?
return @intToEnum(E, @intCast(@typeInfo(E).Enum.tag_type, field_index));
},
+ .the_only_possible_value => {
+ const fields = std.meta.fields(E);
+ assert(fields.len == 1);
+ return @intToEnum(E, fields[0].value);
+ },
else => unreachable,
}
}
pub fn enumToInt(val: Value, ty: Type, buffer: *Payload.U64) Value {
- if (val.castTag(.enum_field_index)) |enum_field_payload| {
- const field_index = enum_field_payload.data;
- switch (ty.tag()) {
- .enum_full, .enum_nonexhaustive => {
- const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
- if (enum_full.values.count() != 0) {
- return enum_full.values.keys()[field_index];
- } else {
- // Field index and integer values are the same.
- buffer.* = .{
- .base = .{ .tag = .int_u64 },
- .data = field_index,
- };
- return Value.initPayload(&buffer.base);
- }
- },
- .enum_numbered => {
- const enum_obj = ty.castTag(.enum_numbered).?.data;
- if (enum_obj.values.count() != 0) {
- return enum_obj.values.keys()[field_index];
- } else {
- // Field index and integer values are the same.
- buffer.* = .{
- .base = .{ .tag = .int_u64 },
- .data = field_index,
- };
- return Value.initPayload(&buffer.base);
- }
- },
- .enum_simple => {
+ const field_index = switch (val.tag()) {
+ .enum_field_index => val.castTag(.enum_field_index).?.data,
+ .the_only_possible_value => blk: {
+ assert(ty.enumFieldCount() == 1);
+ break :blk 0;
+ },
+ // Assume it is already an integer and return it directly.
+ else => return val,
+ };
+
+ switch (ty.tag()) {
+ .enum_full, .enum_nonexhaustive => {
+ const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
+ if (enum_full.values.count() != 0) {
+ return enum_full.values.keys()[field_index];
+ } else {
// Field index and integer values are the same.
buffer.* = .{
.base = .{ .tag = .int_u64 },
.data = field_index,
};
return Value.initPayload(&buffer.base);
- },
- else => unreachable,
- }
+ }
+ },
+ .enum_numbered => {
+ const enum_obj = ty.castTag(.enum_numbered).?.data;
+ if (enum_obj.values.count() != 0) {
+ return enum_obj.values.keys()[field_index];
+ } else {
+ // Field index and integer values are the same.
+ buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = field_index,
+ };
+ return Value.initPayload(&buffer.base);
+ }
+ },
+ .enum_simple => {
+ // Field index and integer values are the same.
+ buffer.* = .{
+ .base = .{ .tag = .int_u64 },
+ .data = field_index,
+ };
+ return Value.initPayload(&buffer.base);
+ },
+ else => unreachable,
}
- // Assume it is already an integer and return it directly.
- return val;
}
/// Asserts the value is an integer.
@@ -901,6 +920,7 @@ pub const Value = extern union {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return BigIntMutable.init(&space.limbs, 0).toConst(),
.one,
@@ -917,32 +937,40 @@ pub const Value = extern union {
}
}
- /// Asserts the value is an integer and it fits in a u64
- pub fn toUnsignedInt(self: Value) u64 {
- switch (self.tag()) {
+ /// If the value fits in a u64, return it, otherwise null.
+ /// Asserts not undefined.
+ pub fn getUnsignedInt(val: Value) ?u64 {
+ switch (val.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return 0,
.one,
.bool_true,
=> return 1,
- .int_u64 => return self.castTag(.int_u64).?.data,
- .int_i64 => return @intCast(u64, self.castTag(.int_i64).?.data),
- .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().to(u64) catch unreachable,
- .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().to(u64) catch unreachable,
+ .int_u64 => return val.castTag(.int_u64).?.data,
+ .int_i64 => return @intCast(u64, val.castTag(.int_i64).?.data),
+ .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(u64) catch null,
+ .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(u64) catch null,
.undef => unreachable,
- else => unreachable,
+ else => return null,
}
}
+ /// Asserts the value is an integer and it fits in a u64
+ pub fn toUnsignedInt(val: Value) u64 {
+ return getUnsignedInt(val).?;
+ }
+
/// Asserts the value is an integer and it fits in a i64
pub fn toSignedInt(self: Value) i64 {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value, // i0, u0
=> return 0,
.one,
@@ -993,6 +1021,14 @@ pub const Value = extern union {
const bits = ty.intInfo(target).bits;
bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
},
+ .Enum => {
+ var enum_buffer: Payload.U64 = undefined;
+ const int_val = val.enumToInt(ty, &enum_buffer);
+ var bigint_buffer: BigIntSpace = undefined;
+ const bigint = int_val.toBigInt(&bigint_buffer);
+ const bits = ty.intInfo(target).bits;
+ bigint.writeTwosComplement(buffer, bits, target.cpu.arch.endian());
+ },
.Float => switch (ty.floatBits(target)) {
16 => return floatWriteToMemory(f16, val.toFloat(f16), target, buffer),
32 => return floatWriteToMemory(f32, val.toFloat(f32), target, buffer),
@@ -1000,6 +1036,19 @@ pub const Value = extern union {
128 => return floatWriteToMemory(f128, val.toFloat(f128), target, buffer),
else => unreachable,
},
+ .Array, .Vector => {
+ const len = ty.arrayLen();
+ const elem_ty = ty.childType();
+ const elem_size = elem_ty.abiSize(target);
+ var elem_i: usize = 0;
+ var elem_value_buf: ElemValueBuffer = undefined;
+ var buf_off: usize = 0;
+ while (elem_i < len) : (elem_i += 1) {
+ const elem_val = val.elemValueBuffer(elem_i, &elem_value_buf);
+ writeToMemory(elem_val, elem_ty, target, buffer[buf_off..]);
+ buf_off += elem_size;
+ }
+ },
else => @panic("TODO implement writeToMemory for more types"),
}
}
@@ -1124,6 +1173,11 @@ pub const Value = extern union {
@panic("TODO implement int_big_negative Value clz");
},
+ .the_only_possible_value => {
+ assert(ty_bits == 0);
+ return ty_bits;
+ },
+
else => unreachable,
}
}
@@ -1134,6 +1188,7 @@ pub const Value = extern union {
switch (self.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value,
=> return 0,
.one,
@@ -1213,6 +1268,11 @@ pub const Value = extern union {
else => unreachable,
},
+ .the_only_possible_value => {
+ assert(ty.intInfo(target).bits == 0);
+ return true;
+ },
+
else => unreachable,
}
}
@@ -1251,7 +1311,7 @@ pub const Value = extern union {
/// Asserts the value is numeric
pub fn isZero(self: Value) bool {
return switch (self.tag()) {
- .zero => true,
+ .zero, .the_only_possible_value => true,
.one => false,
.int_u64 => self.castTag(.int_u64).?.data == 0,
@@ -1272,6 +1332,7 @@ pub const Value = extern union {
return switch (lhs.tag()) {
.zero,
.bool_false,
+ .the_only_possible_value,
=> .eq,
.one,
@@ -1354,7 +1415,7 @@ pub const Value = extern union {
assert(b_tag != .undef);
if (a_tag == b_tag) {
switch (a_tag) {
- .void_value, .null_value => return true,
+ .void_value, .null_value, .the_only_possible_value => return true,
.enum_literal => {
const a_name = a.castTag(.enum_literal).?.data;
const b_name = b.castTag(.enum_literal).?.data;
@@ -1371,6 +1432,16 @@ pub const Value = extern union {
var buffer: Type.Payload.ElemType = undefined;
return eql(a_payload, b_payload, ty.optionalChild(&buffer));
},
+ .slice => {
+ const a_payload = a.castTag(.slice).?.data;
+ const b_payload = b.castTag(.slice).?.data;
+ if (!eql(a_payload.len, b_payload.len, Type.usize)) return false;
+
+ var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
+
+ return eql(a_payload.ptr, b_payload.ptr, ptr_ty);
+ },
.elem_ptr => @panic("TODO: Implement more pointer eql cases"),
.field_ptr => @panic("TODO: Implement more pointer eql cases"),
.eu_payload_ptr => @panic("TODO: Implement more pointer eql cases"),
@@ -1444,6 +1515,14 @@ pub const Value = extern union {
.variable,
=> std.hash.autoHash(hasher, val.pointerDecl().?),
+ .slice => {
+ const slice = val.castTag(.slice).?.data;
+ var ptr_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = ty.slicePtrFieldType(&ptr_buf);
+ hash(slice.ptr, ptr_ty, hasher);
+ hash(slice.len, Type.usize, hasher);
+ },
+
.elem_ptr => @panic("TODO: Implement more pointer hashing cases"),
.field_ptr => @panic("TODO: Implement more pointer hashing cases"),
.eu_payload_ptr => @panic("TODO: Implement more pointer hashing cases"),
@@ -1555,60 +1634,6 @@ pub const Value = extern union {
}
};
- /// Asserts the value is a pointer and dereferences it.
- /// Returns error.AnalysisFail if the pointer points to a Decl that failed semantic analysis.
- pub fn pointerDeref(val: Value, arena: *Allocator) error{ AnalysisFail, OutOfMemory }!?Value {
- const sub_val: Value = switch (val.tag()) {
- .decl_ref_mut => sub_val: {
- // The decl whose value we are obtaining here may be overwritten with
- // a different value, which would invalidate this memory. So we must
- // copy here.
- const sub_val = try val.castTag(.decl_ref_mut).?.data.decl.value();
- break :sub_val try sub_val.copy(arena);
- },
- .decl_ref => try val.castTag(.decl_ref).?.data.value(),
- .elem_ptr => blk: {
- const elem_ptr = val.castTag(.elem_ptr).?.data;
- const array_val = (try elem_ptr.array_ptr.pointerDeref(arena)) orelse return null;
- break :blk try array_val.elemValue(arena, elem_ptr.index);
- },
- .field_ptr => blk: {
- const field_ptr = val.castTag(.field_ptr).?.data;
- const container_val = (try field_ptr.container_ptr.pointerDeref(arena)) orelse return null;
- break :blk try container_val.fieldValue(arena, field_ptr.field_index);
- },
- .eu_payload_ptr => blk: {
- const err_union_ptr = val.castTag(.eu_payload_ptr).?.data;
- const err_union_val = (try err_union_ptr.pointerDeref(arena)) orelse return null;
- break :blk err_union_val.castTag(.eu_payload).?.data;
- },
- .opt_payload_ptr => blk: {
- const opt_ptr = val.castTag(.opt_payload_ptr).?.data;
- const opt_val = (try opt_ptr.pointerDeref(arena)) orelse return null;
- break :blk opt_val.castTag(.opt_payload).?.data;
- },
-
- .zero,
- .one,
- .int_u64,
- .int_i64,
- .int_big_positive,
- .int_big_negative,
- .variable,
- .extern_fn,
- .function,
- => return null,
-
- else => unreachable,
- };
- if (sub_val.tag() == .variable) {
- // This would be loading a runtime value at compile-time so we return
- // the indicator that this pointer dereference requires being done at runtime.
- return null;
- }
- return sub_val;
- }
-
pub fn isComptimeMutablePtr(val: Value) bool {
return switch (val.tag()) {
.decl_ref_mut => true,
@@ -1706,6 +1731,9 @@ pub const Value = extern union {
.decl_ref => return val.castTag(.decl_ref).?.data.val.elemValueAdvanced(index, arena, buffer),
.decl_ref_mut => return val.castTag(.decl_ref_mut).?.data.decl.val.elemValueAdvanced(index, arena, buffer),
+ // The child type of arrays which have only one possible value need to have only one possible value itself.
+ .the_only_possible_value => return val,
+
else => unreachable,
}
}
@@ -1722,6 +1750,8 @@ pub const Value = extern union {
// TODO assert the tag is correct
return payload.val;
},
+ // Structs which have only one possible value need to consist of members which have only one possible value.
+ .the_only_possible_value => return val,
else => unreachable,
}
@@ -1737,29 +1767,54 @@ pub const Value = extern union {
/// Returns a pointer to the element value at the index.
pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
- if (self.castTag(.elem_ptr)) |elem_ptr| {
- return Tag.elem_ptr.create(allocator, .{
- .array_ptr = elem_ptr.data.array_ptr,
- .index = elem_ptr.data.index + index,
- });
+ switch (self.tag()) {
+ .elem_ptr => {
+ const elem_ptr = self.castTag(.elem_ptr).?.data;
+ return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = elem_ptr.array_ptr,
+ .index = elem_ptr.index + index,
+ });
+ },
+ .slice => return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = self.castTag(.slice).?.data.ptr,
+ .index = index,
+ }),
+ else => return Tag.elem_ptr.create(allocator, .{
+ .array_ptr = self,
+ .index = index,
+ }),
}
-
- return Tag.elem_ptr.create(allocator, .{
- .array_ptr = self,
- .index = index,
- });
}
pub fn isUndef(self: Value) bool {
return self.tag() == .undef;
}
- /// Valid for all types. Asserts the value is not undefined and not unreachable.
+ /// Asserts the value is not undefined and not unreachable.
+ /// Integer value 0 is considered null because of C pointers.
pub fn isNull(self: Value) bool {
return switch (self.tag()) {
.null_value => true,
.opt_payload => false,
+ // If it's not one of those two tags then it must be a C pointer value,
+ // in which case the value 0 is null and other values are non-null.
+
+ .zero,
+ .bool_false,
+ .the_only_possible_value,
+ => true,
+
+ .one,
+ .bool_true,
+ => false,
+
+ .int_u64,
+ .int_i64,
+ .int_big_positive,
+ .int_big_negative,
+ => compareWithZero(self, .eq),
+
.undef => unreachable,
.unreachable_value => unreachable,
.inferred_alloc => unreachable,
@@ -1817,16 +1872,26 @@ pub const Value = extern union {
};
}
- pub fn intToFloat(val: Value, allocator: *Allocator, dest_ty: Type, target: Target) !Value {
+ pub fn intToFloat(val: Value, arena: *Allocator, dest_ty: Type, target: Target) !Value {
switch (val.tag()) {
.undef, .zero, .one => return val,
+ .the_only_possible_value => return Value.initTag(.zero), // for i0, u0
.int_u64 => {
- return intToFloatInner(val.castTag(.int_u64).?.data, allocator, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_u64).?.data, arena, dest_ty, target);
},
.int_i64 => {
- return intToFloatInner(val.castTag(.int_i64).?.data, allocator, dest_ty, target);
+ return intToFloatInner(val.castTag(.int_i64).?.data, arena, dest_ty, target);
+ },
+ .int_big_positive => {
+ const limbs = val.castTag(.int_big_positive).?.data;
+ const float = bigIntToFloat(limbs, true);
+ return floatToValue(float, arena, dest_ty, target);
+ },
+ .int_big_negative => {
+ const limbs = val.castTag(.int_big_negative).?.data;
+ const float = bigIntToFloat(limbs, false);
+ return floatToValue(float, arena, dest_ty, target);
},
- .int_big_positive, .int_big_negative => @panic("big int to float"),
else => unreachable,
}
}
@@ -1841,6 +1906,16 @@ pub const Value = extern union {
}
}
+ fn floatToValue(float: f128, arena: *Allocator, dest_ty: Type, target: Target) !Value {
+ switch (dest_ty.floatBits(target)) {
+ 16 => return Value.Tag.float_16.create(arena, @floatCast(f16, float)),
+ 32 => return Value.Tag.float_32.create(arena, @floatCast(f32, float)),
+ 64 => return Value.Tag.float_64.create(arena, @floatCast(f64, float)),
+ 128 => return Value.Tag.float_128.create(arena, float),
+ else => unreachable,
+ }
+ }
+
/// Supports both floats and ints; handles undefined.
pub fn numberAddWrap(
lhs: Value,
@@ -2081,6 +2156,32 @@ pub const Value = extern union {
};
}
+ /// operands must be integers; handles undefined.
+ pub fn bitwiseNot(val: Value, ty: Type, arena: *Allocator, target: Target) !Value {
+ if (val.isUndef()) return Value.initTag(.undef);
+
+ const info = ty.intInfo(target);
+
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var val_space: Value.BigIntSpace = undefined;
+ const val_bigint = val.toBigInt(&val_space);
+ const limbs = try arena.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcTwosCompLimbCount(info.bits),
+ );
+
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ if (result_bigint.positive) {
+ return Value.Tag.int_big_positive.create(arena, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(arena, result_limbs);
+ }
+ }
+
/// operands must be integers; handles undefined.
pub fn bitwiseAnd(lhs: Value, rhs: Value, arena: *Allocator) !Value {
if (lhs.isUndef() or rhs.isUndef()) return Value.initTag(.undef);
@@ -2093,7 +2194,8 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ // + 1 for negatives
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitAnd(lhs_bigint, rhs_bigint);
@@ -2157,7 +2259,8 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs = try arena.alloc(
std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ // + 1 for negatives
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
);
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
result_bigint.bitXor(lhs_bigint, rhs_bigint);
@@ -2223,19 +2326,50 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
+ rhs_bigint.limbs.len,
+ );
+ const limbs_buffer = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
+ );
+ var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
+ var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
+ const result_limbs = result_q.limbs[0..result_q.len];
+
+ if (result_q.positive) {
+ return Value.Tag.int_big_positive.create(allocator, result_limbs);
+ } else {
+ return Value.Tag.int_big_negative.create(allocator, result_limbs);
+ }
+ }
+
+ pub fn intDivFloor(lhs: Value, rhs: Value, allocator: *Allocator) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs_q = try allocator.alloc(
+ std.math.big.Limb,
lhs_bigint.limbs.len,
);
+ const limbs_r = try allocator.alloc(
+ std.math.big.Limb,
+ rhs_bigint.limbs.len,
+ );
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len),
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_q.limbs[0..result_q.len];
if (result_q.positive) {
@@ -2254,11 +2388,13 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len,
+ // TODO: consider reworking Sema to re-use Values rather than
+ // always producing new Value objects.
+ rhs_bigint.limbs.len,
);
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
@@ -2266,7 +2402,7 @@ pub const Value = extern union {
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_r.limbs[0..result_r.len];
if (result_r.positive) {
@@ -2285,11 +2421,11 @@ pub const Value = extern union {
const rhs_bigint = rhs.toBigInt(&rhs_space);
const limbs_q = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len + rhs_bigint.limbs.len + 1,
+ lhs_bigint.limbs.len,
);
const limbs_r = try allocator.alloc(
std.math.big.Limb,
- lhs_bigint.limbs.len,
+ rhs_bigint.limbs.len,
);
const limbs_buffer = try allocator.alloc(
std.math.big.Limb,
@@ -2297,7 +2433,7 @@ pub const Value = extern union {
);
var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined };
var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined };
- result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer, null);
+ result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
const result_limbs = result_r.limbs[0..result_r.len];
if (result_r.positive) {
@@ -2555,6 +2691,68 @@ pub const Value = extern union {
}
}
+ pub fn floatDivFloor(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: *Allocator,
+ ) !Value {
+ switch (float_type.tag()) {
+ .f16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ .f128, .comptime_float, .c_longdouble => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @divFloor(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
+ }
+
+ pub fn floatDivTrunc(
+ lhs: Value,
+ rhs: Value,
+ float_type: Type,
+ arena: *Allocator,
+ ) !Value {
+ switch (float_type.tag()) {
+ .f16 => {
+ const lhs_val = lhs.toFloat(f16);
+ const rhs_val = rhs.toFloat(f16);
+ return Value.Tag.float_16.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f32 => {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ return Value.Tag.float_32.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f64 => {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ return Value.Tag.float_64.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ .f128, .comptime_float, .c_longdouble => {
+ const lhs_val = lhs.toFloat(f128);
+ const rhs_val = rhs.toFloat(f128);
+ return Value.Tag.float_128.create(arena, @divTrunc(lhs_val, rhs_val));
+ },
+ else => unreachable,
+ }
+ }
+
pub fn floatMul(
lhs: Value,
rhs: Value,
@@ -2798,6 +2996,7 @@ pub const Value = extern union {
pub const negative_one: Value = .{ .ptr_otherwise = &negative_one_payload.base };
pub const undef = initTag(.undef);
pub const @"void" = initTag(.void_value);
+ pub const @"null" = initTag(.null_value);
};
var negative_one_payload: Value.Payload.I64 = .{
diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp
index f5b04ddd9d..deefc04c0a 100644
--- a/src/zig_clang.cpp
+++ b/src/zig_clang.cpp
@@ -2120,6 +2120,11 @@ bool ZigClangFunctionDecl_isInlineSpecified(const struct ZigClangFunctionDecl *s
return casted->isInlineSpecified();
}
+bool ZigClangFunctionDecl_hasAlwaysInlineAttr(const struct ZigClangFunctionDecl *self) {
+ auto casted = reinterpret_cast<const clang::FunctionDecl *>(self);
+ return casted->hasAttr<clang::AlwaysInlineAttr>();
+}
+
const char* ZigClangFunctionDecl_getSectionAttribute(const struct ZigClangFunctionDecl *self, size_t *len) {
auto casted = reinterpret_cast<const clang::FunctionDecl *>(self);
if (const clang::SectionAttr *SA = casted->getAttr<clang::SectionAttr>()) {
diff --git a/src/zig_clang.h b/src/zig_clang.h
index f704b50b18..af44e51cdd 100644
--- a/src/zig_clang.h
+++ b/src/zig_clang.h
@@ -1111,6 +1111,7 @@ ZIG_EXTERN_C bool ZigClangFunctionDecl_doesDeclarationForceExternallyVisibleDefi
ZIG_EXTERN_C bool ZigClangFunctionDecl_isThisDeclarationADefinition(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_doesThisDeclarationHaveABody(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_isInlineSpecified(const struct ZigClangFunctionDecl *);
+ZIG_EXTERN_C bool ZigClangFunctionDecl_hasAlwaysInlineAttr(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C bool ZigClangFunctionDecl_isDefined(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C const struct ZigClangFunctionDecl* ZigClangFunctionDecl_getDefinition(const struct ZigClangFunctionDecl *);
ZIG_EXTERN_C const char* ZigClangFunctionDecl_getSectionAttribute(const struct ZigClangFunctionDecl *, size_t *);
diff --git a/test/behavior.zig b/test/behavior.zig
index f94f47c703..05e05d51fc 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -2,6 +2,7 @@ const builtin = @import("builtin");
test {
// Tests that pass for both.
+ _ = @import("behavior/align.zig");
_ = @import("behavior/array.zig");
_ = @import("behavior/atomics.zig");
_ = @import("behavior/basic.zig");
@@ -32,22 +33,29 @@ test {
_ = @import("behavior/enum.zig");
_ = @import("behavior/error.zig");
_ = @import("behavior/eval.zig");
+ _ = @import("behavior/floatop.zig");
+ _ = @import("behavior/fn.zig");
_ = @import("behavior/for.zig");
_ = @import("behavior/generics.zig");
_ = @import("behavior/hasdecl.zig");
+ _ = @import("behavior/hasfield.zig");
_ = @import("behavior/if.zig");
_ = @import("behavior/math.zig");
_ = @import("behavior/maximum_minimum.zig");
_ = @import("behavior/member_func.zig");
+ _ = @import("behavior/null.zig");
_ = @import("behavior/optional.zig");
_ = @import("behavior/pointers.zig");
+ _ = @import("behavior/ptrcast.zig");
_ = @import("behavior/pub_enum.zig");
- _ = @import("behavior/slice.zig");
+ _ = @import("behavior/saturating_arithmetic.zig");
_ = @import("behavior/sizeof_and_typeof.zig");
+ _ = @import("behavior/slice.zig");
_ = @import("behavior/struct.zig");
_ = @import("behavior/switch.zig");
_ = @import("behavior/this.zig");
_ = @import("behavior/translate_c_macros.zig");
+ _ = @import("behavior/truncate.zig");
_ = @import("behavior/underscore.zig");
_ = @import("behavior/union.zig");
_ = @import("behavior/usingnamespace.zig");
@@ -58,7 +66,7 @@ test {
// When all comptime_memory.zig tests pass, #9646 can be closed.
// _ = @import("behavior/comptime_memory.zig");
} else {
- _ = @import("behavior/align.zig");
+ _ = @import("behavior/align_stage1.zig");
_ = @import("behavior/alignof.zig");
_ = @import("behavior/array_stage1.zig");
if (builtin.os.tag != .wasi) {
@@ -110,6 +118,7 @@ test {
_ = @import("behavior/bugs/7047.zig");
_ = @import("behavior/bugs/7250.zig");
_ = @import("behavior/bugs/9584.zig");
+ _ = @import("behavior/bugs/9967.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call_stage1.zig");
@@ -120,13 +129,12 @@ test {
_ = @import("behavior/error_stage1.zig");
_ = @import("behavior/eval_stage1.zig");
_ = @import("behavior/field_parent_ptr.zig");
- _ = @import("behavior/floatop.zig");
- _ = @import("behavior/fn.zig");
- _ = @import("behavior/fn_in_struct_in_comptime.zig");
+ _ = @import("behavior/floatop_stage1.zig");
+ _ = @import("behavior/fn_stage1.zig");
_ = @import("behavior/fn_delegation.zig");
+ _ = @import("behavior/fn_in_struct_in_comptime.zig");
_ = @import("behavior/for_stage1.zig");
_ = @import("behavior/generics_stage1.zig");
- _ = @import("behavior/hasfield.zig");
_ = @import("behavior/if_stage1.zig");
_ = @import("behavior/import.zig");
_ = @import("behavior/incomplete_struct_param_tld.zig");
@@ -137,30 +145,24 @@ test {
_ = @import("behavior/misc.zig");
_ = @import("behavior/muladd.zig");
_ = @import("behavior/namespace_depends_on_compile_var.zig");
- _ = @import("behavior/null.zig");
+ _ = @import("behavior/null_stage1.zig");
_ = @import("behavior/optional_stage1.zig");
_ = @import("behavior/pointers_stage1.zig");
_ = @import("behavior/popcount.zig");
- _ = @import("behavior/ptrcast.zig");
+ _ = @import("behavior/ptrcast_stage1.zig");
_ = @import("behavior/ref_var_in_if_after_if_2nd_switch_prong.zig");
_ = @import("behavior/reflection.zig");
- {
- // Checklist for getting saturating_arithmetic.zig passing for stage2:
- // * add __muloti4 to compiler-rt
- _ = @import("behavior/saturating_arithmetic.zig");
- }
- _ = @import("behavior/shuffle.zig");
_ = @import("behavior/select.zig");
+ _ = @import("behavior/shuffle.zig");
_ = @import("behavior/sizeof_and_typeof_stage1.zig");
- _ = @import("behavior/slice_stage1.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
- _ = @import("behavior/struct_stage1.zig");
+ _ = @import("behavior/slice_stage1.zig");
_ = @import("behavior/struct_contains_null_ptr_itself.zig");
_ = @import("behavior/struct_contains_slice_of_itself.zig");
- _ = @import("behavior/switch_stage1.zig");
+ _ = @import("behavior/struct_stage1.zig");
_ = @import("behavior/switch_prong_err_enum.zig");
_ = @import("behavior/switch_prong_implicit_cast.zig");
- _ = @import("behavior/truncate.zig");
+ _ = @import("behavior/switch_stage1.zig");
_ = @import("behavior/try.zig");
_ = @import("behavior/tuple.zig");
_ = @import("behavior/type.zig");
diff --git a/test/behavior/align.zig b/test/behavior/align.zig
index e99cf1e0e2..76a024b88f 100644
--- a/test/behavior/align.zig
+++ b/test/behavior/align.zig
@@ -19,42 +19,6 @@ test "global variable alignment" {
}
}
-fn derp() align(@sizeOf(usize) * 2) i32 {
- return 1234;
-}
-fn noop1() align(1) void {}
-fn noop4() align(4) void {}
-
-test "function alignment" {
- // function alignment is a compile error on wasm32/wasm64
- if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
-
- try expect(derp() == 1234);
- try expect(@TypeOf(noop1) == fn () align(1) void);
- try expect(@TypeOf(noop4) == fn () align(4) void);
- noop1();
- noop4();
-}
-
-var baz: packed struct {
- a: u32,
- b: u32,
-} = undefined;
-
-test "packed struct alignment" {
- try expect(@TypeOf(&baz.b) == *align(1) u32);
-}
-
-const blah: packed struct {
- a: u3,
- b: u3,
- c: u2,
-} = undefined;
-
-test "bit field alignment" {
- try expect(@TypeOf(&blah.b) == *align(1:3:1) const u3);
-}
-
test "default alignment allows unspecified in type syntax" {
try expect(*u32 == *align(@alignOf(u32)) u32);
}
@@ -78,20 +42,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
return a[0] + b[0];
}
-test "specifying alignment allows pointer cast" {
- try testBytesAlign(0x33);
-}
-fn testBytesAlign(b: u8) !void {
- var bytes align(4) = [_]u8{
- b,
- b,
- b,
- b,
- };
- const ptr = @ptrCast(*u32, &bytes[0]);
- try expect(ptr.* == 0x33333333);
-}
-
test "@alignCast pointers" {
var x: u32 align(4) = 1;
expectsOnly1(&x);
@@ -104,11 +54,17 @@ fn expects4(x: *align(4) u32) void {
x.* += 1;
}
+test "specifying alignment allows pointer cast" {
+ try testBytesAlign(0x33);
+}
+fn testBytesAlign(b: u8) !void {
+ var bytes align(4) = [_]u8{ b, b, b, b };
+ const ptr = @ptrCast(*u32, &bytes[0]);
+ try expect(ptr.* == 0x33333333);
+}
+
test "@alignCast slices" {
- var array align(4) = [_]u32{
- 1,
- 1,
- };
+ var array align(4) = [_]u32{ 1, 1 };
const slice = array[0..];
sliceExpectsOnly1(slice);
try expect(slice[0] == 2);
@@ -120,110 +76,6 @@ fn sliceExpects4(slice: []align(4) u32) void {
slice[0] += 1;
}
-test "implicitly decreasing fn alignment" {
- // function alignment is a compile error on wasm32/wasm64
- if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
-
- try testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
- try testImplicitlyDecreaseFnAlign(alignedBig, 5678);
-}
-
-fn testImplicitlyDecreaseFnAlign(ptr: fn () align(1) i32, answer: i32) !void {
- try expect(ptr() == answer);
-}
-
-fn alignedSmall() align(8) i32 {
- return 1234;
-}
-fn alignedBig() align(16) i32 {
- return 5678;
-}
-
-test "@alignCast functions" {
- // function alignment is a compile error on wasm32/wasm64
- if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
- if (native_arch == .thumb) return error.SkipZigTest;
-
- try expect(fnExpectsOnly1(simple4) == 0x19);
-}
-fn fnExpectsOnly1(ptr: fn () align(1) i32) i32 {
- return fnExpects4(@alignCast(4, ptr));
-}
-fn fnExpects4(ptr: fn () align(4) i32) i32 {
- return ptr();
-}
-fn simple4() align(4) i32 {
- return 0x19;
-}
-
-test "generic function with align param" {
- // function alignment is a compile error on wasm32/wasm64
- if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
- if (native_arch == .thumb) return error.SkipZigTest;
-
- try expect(whyWouldYouEverDoThis(1) == 0x1);
- try expect(whyWouldYouEverDoThis(4) == 0x1);
- try expect(whyWouldYouEverDoThis(8) == 0x1);
-}
-
-fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
- _ = align_bytes;
- return 0x1;
-}
-
-test "@ptrCast preserves alignment of bigger source" {
- var x: u32 align(16) = 1234;
- const ptr = @ptrCast(*u8, &x);
- try expect(@TypeOf(ptr) == *align(16) u8);
-}
-
-test "runtime known array index has best alignment possible" {
- // take full advantage of over-alignment
- var array align(4) = [_]u8{ 1, 2, 3, 4 };
- try expect(@TypeOf(&array[0]) == *align(4) u8);
- try expect(@TypeOf(&array[1]) == *u8);
- try expect(@TypeOf(&array[2]) == *align(2) u8);
- try expect(@TypeOf(&array[3]) == *u8);
-
- // because align is too small but we still figure out to use 2
- var bigger align(2) = [_]u64{ 1, 2, 3, 4 };
- try expect(@TypeOf(&bigger[0]) == *align(2) u64);
- try expect(@TypeOf(&bigger[1]) == *align(2) u64);
- try expect(@TypeOf(&bigger[2]) == *align(2) u64);
- try expect(@TypeOf(&bigger[3]) == *align(2) u64);
-
- // because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
- var smaller align(2) = [_]u32{ 1, 2, 3, 4 };
- var runtime_zero: usize = 0;
- comptime try expect(@TypeOf(smaller[runtime_zero..]) == []align(2) u32);
- comptime try expect(@TypeOf(smaller[runtime_zero..].ptr) == [*]align(2) u32);
- try testIndex(smaller[runtime_zero..].ptr, 0, *align(2) u32);
- try testIndex(smaller[runtime_zero..].ptr, 1, *align(2) u32);
- try testIndex(smaller[runtime_zero..].ptr, 2, *align(2) u32);
- try testIndex(smaller[runtime_zero..].ptr, 3, *align(2) u32);
-
- // has to use ABI alignment because index known at runtime only
- try testIndex2(array[runtime_zero..].ptr, 0, *u8);
- try testIndex2(array[runtime_zero..].ptr, 1, *u8);
- try testIndex2(array[runtime_zero..].ptr, 2, *u8);
- try testIndex2(array[runtime_zero..].ptr, 3, *u8);
-}
-fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) !void {
- comptime try expect(@TypeOf(&smaller[index]) == T);
-}
-fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
- comptime try expect(@TypeOf(&ptr[index]) == T);
-}
-
-test "alignstack" {
- try expect(fnWithAlignedStack() == 1234);
-}
-
-fn fnWithAlignedStack() i32 {
- @setAlignStack(256);
- return 1234;
-}
-
test "alignment of structs" {
try expect(@alignOf(struct {
a: i32,
@@ -231,15 +83,6 @@ test "alignment of structs" {
}) == @alignOf(usize));
}
-test "alignment of function with c calling convention" {
- var runtime_nothing = nothing;
- const casted1 = @ptrCast(*const u8, runtime_nothing);
- const casted2 = @ptrCast(fn () callconv(.C) void, casted1);
- casted2();
-}
-
-fn nothing() callconv(.C) void {}
-
test "return error union with 128-bit integer" {
try expect(3 == try give());
}
@@ -277,75 +120,3 @@ test "size of extern struct with 128-bit field" {
}) == 32);
}
}
-
-const DefaultAligned = struct {
- nevermind: u32,
- badguy: i128,
-};
-
-test "read 128-bit field from default aligned struct in stack memory" {
- var default_aligned = DefaultAligned{
- .nevermind = 1,
- .badguy = 12,
- };
- try expect((@ptrToInt(&default_aligned.badguy) % 16) == 0);
- try expect(12 == default_aligned.badguy);
-}
-
-var default_aligned_global = DefaultAligned{
- .nevermind = 1,
- .badguy = 12,
-};
-
-test "read 128-bit field from default aligned struct in global memory" {
- try expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0);
- try expect(12 == default_aligned_global.badguy);
-}
-
-test "struct field explicit alignment" {
- const S = struct {
- const Node = struct {
- next: *Node,
- massive_byte: u8 align(64),
- };
- };
-
- var node: S.Node = undefined;
- node.massive_byte = 100;
- try expect(node.massive_byte == 100);
- comptime try expect(@TypeOf(&node.massive_byte) == *align(64) u8);
- try expect(@ptrToInt(&node.massive_byte) % 64 == 0);
-}
-
-test "align(@alignOf(T)) T does not force resolution of T" {
- const S = struct {
- const A = struct {
- a: *align(@alignOf(A)) A,
- };
- fn doTheTest() void {
- suspend {
- resume @frame();
- }
- _ = bar(@Frame(doTheTest));
- }
- fn bar(comptime T: type) *align(@alignOf(T)) T {
- ok = true;
- return undefined;
- }
-
- var ok = false;
- };
- _ = async S.doTheTest();
- try expect(S.ok);
-}
-
-test "align(N) on functions" {
- // function alignment is a compile error on wasm32/wasm64
- if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
- if (native_arch == .thumb) return error.SkipZigTest;
-
- try expect((@ptrToInt(overaligned_fn) & (0x1000 - 1)) == 0);
-}
-fn overaligned_fn() align(0x1000) i32 {
- return 42;
-}
diff --git a/test/behavior/align_stage1.zig b/test/behavior/align_stage1.zig
new file mode 100644
index 0000000000..a4af92368e
--- /dev/null
+++ b/test/behavior/align_stage1.zig
@@ -0,0 +1,225 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const builtin = @import("builtin");
+const native_arch = builtin.target.cpu.arch;
+
+fn derp() align(@sizeOf(usize) * 2) i32 {
+ return 1234;
+}
+fn noop1() align(1) void {}
+fn noop4() align(4) void {}
+
+test "function alignment" {
+ // function alignment is a compile error on wasm32/wasm64
+ if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
+
+ try expect(derp() == 1234);
+ try expect(@TypeOf(noop1) == fn () align(1) void);
+ try expect(@TypeOf(noop4) == fn () align(4) void);
+ noop1();
+ noop4();
+}
+
+var baz: packed struct {
+ a: u32,
+ b: u32,
+} = undefined;
+
+test "packed struct alignment" {
+ try expect(@TypeOf(&baz.b) == *align(1) u32);
+}
+
+const blah: packed struct {
+ a: u3,
+ b: u3,
+ c: u2,
+} = undefined;
+
+test "bit field alignment" {
+ try expect(@TypeOf(&blah.b) == *align(1:3:1) const u3);
+}
+
+test "implicitly decreasing fn alignment" {
+ // function alignment is a compile error on wasm32/wasm64
+ if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
+
+ try testImplicitlyDecreaseFnAlign(alignedSmall, 1234);
+ try testImplicitlyDecreaseFnAlign(alignedBig, 5678);
+}
+
+fn testImplicitlyDecreaseFnAlign(ptr: fn () align(1) i32, answer: i32) !void {
+ try expect(ptr() == answer);
+}
+
+fn alignedSmall() align(8) i32 {
+ return 1234;
+}
+fn alignedBig() align(16) i32 {
+ return 5678;
+}
+
+test "@alignCast functions" {
+ // function alignment is a compile error on wasm32/wasm64
+ if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
+ if (native_arch == .thumb) return error.SkipZigTest;
+
+ try expect(fnExpectsOnly1(simple4) == 0x19);
+}
+fn fnExpectsOnly1(ptr: fn () align(1) i32) i32 {
+ return fnExpects4(@alignCast(4, ptr));
+}
+fn fnExpects4(ptr: fn () align(4) i32) i32 {
+ return ptr();
+}
+fn simple4() align(4) i32 {
+ return 0x19;
+}
+
+test "generic function with align param" {
+ // function alignment is a compile error on wasm32/wasm64
+ if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
+ if (native_arch == .thumb) return error.SkipZigTest;
+
+ try expect(whyWouldYouEverDoThis(1) == 0x1);
+ try expect(whyWouldYouEverDoThis(4) == 0x1);
+ try expect(whyWouldYouEverDoThis(8) == 0x1);
+}
+
+fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
+ _ = align_bytes;
+ return 0x1;
+}
+
+test "@ptrCast preserves alignment of bigger source" {
+ var x: u32 align(16) = 1234;
+ const ptr = @ptrCast(*u8, &x);
+ try expect(@TypeOf(ptr) == *align(16) u8);
+}
+
+test "runtime known array index has best alignment possible" {
+ // take full advantage of over-alignment
+ var array align(4) = [_]u8{ 1, 2, 3, 4 };
+ try expect(@TypeOf(&array[0]) == *align(4) u8);
+ try expect(@TypeOf(&array[1]) == *u8);
+ try expect(@TypeOf(&array[2]) == *align(2) u8);
+ try expect(@TypeOf(&array[3]) == *u8);
+
+ // because align is too small but we still figure out to use 2
+ var bigger align(2) = [_]u64{ 1, 2, 3, 4 };
+ try expect(@TypeOf(&bigger[0]) == *align(2) u64);
+ try expect(@TypeOf(&bigger[1]) == *align(2) u64);
+ try expect(@TypeOf(&bigger[2]) == *align(2) u64);
+ try expect(@TypeOf(&bigger[3]) == *align(2) u64);
+
+ // because pointer is align 2 and u32 align % 2 == 0 we can assume align 2
+ var smaller align(2) = [_]u32{ 1, 2, 3, 4 };
+ var runtime_zero: usize = 0;
+ comptime try expect(@TypeOf(smaller[runtime_zero..]) == []align(2) u32);
+ comptime try expect(@TypeOf(smaller[runtime_zero..].ptr) == [*]align(2) u32);
+ try testIndex(smaller[runtime_zero..].ptr, 0, *align(2) u32);
+ try testIndex(smaller[runtime_zero..].ptr, 1, *align(2) u32);
+ try testIndex(smaller[runtime_zero..].ptr, 2, *align(2) u32);
+ try testIndex(smaller[runtime_zero..].ptr, 3, *align(2) u32);
+
+ // has to use ABI alignment because index known at runtime only
+ try testIndex2(array[runtime_zero..].ptr, 0, *u8);
+ try testIndex2(array[runtime_zero..].ptr, 1, *u8);
+ try testIndex2(array[runtime_zero..].ptr, 2, *u8);
+ try testIndex2(array[runtime_zero..].ptr, 3, *u8);
+}
+fn testIndex(smaller: [*]align(2) u32, index: usize, comptime T: type) !void {
+ comptime try expect(@TypeOf(&smaller[index]) == T);
+}
+fn testIndex2(ptr: [*]align(4) u8, index: usize, comptime T: type) !void {
+ comptime try expect(@TypeOf(&ptr[index]) == T);
+}
+
+test "alignstack" {
+ try expect(fnWithAlignedStack() == 1234);
+}
+
+fn fnWithAlignedStack() i32 {
+ @setAlignStack(256);
+ return 1234;
+}
+
+test "alignment of function with c calling convention" {
+ var runtime_nothing = nothing;
+ const casted1 = @ptrCast(*const u8, runtime_nothing);
+ const casted2 = @ptrCast(fn () callconv(.C) void, casted1);
+ casted2();
+}
+
+fn nothing() callconv(.C) void {}
+
+const DefaultAligned = struct {
+ nevermind: u32,
+ badguy: i128,
+};
+
+test "read 128-bit field from default aligned struct in stack memory" {
+ var default_aligned = DefaultAligned{
+ .nevermind = 1,
+ .badguy = 12,
+ };
+ try expect((@ptrToInt(&default_aligned.badguy) % 16) == 0);
+ try expect(12 == default_aligned.badguy);
+}
+
+var default_aligned_global = DefaultAligned{
+ .nevermind = 1,
+ .badguy = 12,
+};
+
+test "read 128-bit field from default aligned struct in global memory" {
+ try expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0);
+ try expect(12 == default_aligned_global.badguy);
+}
+
+test "struct field explicit alignment" {
+ const S = struct {
+ const Node = struct {
+ next: *Node,
+ massive_byte: u8 align(64),
+ };
+ };
+
+ var node: S.Node = undefined;
+ node.massive_byte = 100;
+ try expect(node.massive_byte == 100);
+ comptime try expect(@TypeOf(&node.massive_byte) == *align(64) u8);
+ try expect(@ptrToInt(&node.massive_byte) % 64 == 0);
+}
+
+test "align(@alignOf(T)) T does not force resolution of T" {
+ const S = struct {
+ const A = struct {
+ a: *align(@alignOf(A)) A,
+ };
+ fn doTheTest() void {
+ suspend {
+ resume @frame();
+ }
+ _ = bar(@Frame(doTheTest));
+ }
+ fn bar(comptime T: type) *align(@alignOf(T)) T {
+ ok = true;
+ return undefined;
+ }
+
+ var ok = false;
+ };
+ _ = async S.doTheTest();
+ try expect(S.ok);
+}
+
+test "align(N) on functions" {
+ // function alignment is a compile error on wasm32/wasm64
+ if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
+ if (native_arch == .thumb) return error.SkipZigTest;
+
+ try expect((@ptrToInt(overaligned_fn) & (0x1000 - 1)) == 0);
+}
+fn overaligned_fn() align(0x1000) i32 {
+ return 42;
+}
diff --git a/test/behavior/array.zig b/test/behavior/array.zig
index 0b4babca67..3ff339b140 100644
--- a/test/behavior/array.zig
+++ b/test/behavior/array.zig
@@ -112,3 +112,55 @@ test "void arrays" {
try expect(@sizeOf(@TypeOf(array)) == 0);
try expect(array.len == 4);
}
+
+test "nested arrays" {
+ const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
+ for (array_of_strings) |s, i| {
+ if (i == 0) try expect(mem.eql(u8, s, "hello"));
+ if (i == 1) try expect(mem.eql(u8, s, "this"));
+ if (i == 2) try expect(mem.eql(u8, s, "is"));
+ if (i == 3) try expect(mem.eql(u8, s, "my"));
+ if (i == 4) try expect(mem.eql(u8, s, "thing"));
+ }
+}
+
+var s_array: [8]Sub = undefined;
+const Sub = struct { b: u8 };
+const Str = struct { a: []Sub };
+test "set global var array via slice embedded in struct" {
+ var s = Str{ .a = s_array[0..] };
+
+ s.a[0].b = 1;
+ s.a[1].b = 2;
+ s.a[2].b = 3;
+
+ try expect(s_array[0].b == 1);
+ try expect(s_array[1].b == 2);
+ try expect(s_array[2].b == 3);
+}
+
+test "implicit comptime in array type size" {
+ var arr: [plusOne(10)]bool = undefined;
+ try expect(arr.len == 11);
+}
+
+fn plusOne(x: u32) u32 {
+ return x + 1;
+}
+
+test "read/write through global variable array of struct fields initialized via array mult" {
+ const S = struct {
+ fn doTheTest() !void {
+ try expect(storage[0].term == 1);
+ storage[0] = MyStruct{ .term = 123 };
+ try expect(storage[0].term == 123);
+ }
+
+ pub const MyStruct = struct {
+ term: usize,
+ };
+
+ var storage: [1]MyStruct = [_]MyStruct{MyStruct{ .term = 1 }} ** 1;
+ };
+ try S.doTheTest();
+}
diff --git a/test/behavior/array_stage1.zig b/test/behavior/array_stage1.zig
index 62860a9deb..b374c9dd06 100644
--- a/test/behavior/array_stage1.zig
+++ b/test/behavior/array_stage1.zig
@@ -4,32 +4,6 @@ const mem = std.mem;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
-test "nested arrays" {
- const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
- for (array_of_strings) |s, i| {
- if (i == 0) try expect(mem.eql(u8, s, "hello"));
- if (i == 1) try expect(mem.eql(u8, s, "this"));
- if (i == 2) try expect(mem.eql(u8, s, "is"));
- if (i == 3) try expect(mem.eql(u8, s, "my"));
- if (i == 4) try expect(mem.eql(u8, s, "thing"));
- }
-}
-
-var s_array: [8]Sub = undefined;
-const Sub = struct { b: u8 };
-const Str = struct { a: []Sub };
-test "set global var array via slice embedded in struct" {
- var s = Str{ .a = s_array[0..] };
-
- s.a[0].b = 1;
- s.a[1].b = 2;
- s.a[2].b = 3;
-
- try expect(s_array[0].b == 1);
- try expect(s_array[1].b == 2);
- try expect(s_array[2].b == 3);
-}
-
test "single-item pointer to array indexing and slicing" {
try testSingleItemPtrArrayIndexSlice();
comptime try testSingleItemPtrArrayIndexSlice();
@@ -75,15 +49,6 @@ test "comptime evaluating function that takes array by value" {
_ = comptime testArrayByValAtComptime(arr);
}
-test "implicit comptime in array type size" {
- var arr: [plusOne(10)]bool = undefined;
- try expect(arr.len == 11);
-}
-
-fn plusOne(x: u32) u32 {
- return x + 1;
-}
-
test "runtime initialize array elem and then implicit cast to slice" {
var two: i32 = 2;
const x: []const i32 = &[_]i32{two};
@@ -171,23 +136,6 @@ test "double nested array to const slice cast in array literal" {
comptime try S.entry(2);
}
-test "read/write through global variable array of struct fields initialized via array mult" {
- const S = struct {
- fn doTheTest() !void {
- try expect(storage[0].term == 1);
- storage[0] = MyStruct{ .term = 123 };
- try expect(storage[0].term == 123);
- }
-
- pub const MyStruct = struct {
- term: usize,
- };
-
- var storage: [1]MyStruct = [_]MyStruct{MyStruct{ .term = 1 }} ** 1;
- };
- try S.doTheTest();
-}
-
test "implicit cast zero sized array ptr to slice" {
{
var b = "".*;
diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig
index ae4cb26354..f3d511916f 100644
--- a/test/behavior/basic.zig
+++ b/test/behavior/basic.zig
@@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
const expect = std.testing.expect;
+const expectEqualStrings = std.testing.expectEqualStrings;
// normal comment
@@ -446,3 +447,15 @@ test "self reference through fn ptr field" {
a.f = S.foo;
try expect(a.f(a) == 12);
}
+
+test "global variable initialized to global variable array element" {
+ try expect(global_ptr == &gdt[0]);
+}
+const GDTEntry = struct {
+ field: i32,
+};
+var gdt = [_]GDTEntry{
+ GDTEntry{ .field = 1 },
+ GDTEntry{ .field = 2 },
+};
+var global_ptr = &gdt[0];
diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig
index ff64652074..3a2f8a0a8a 100644
--- a/test/behavior/bitcast.zig
+++ b/test/behavior/bitcast.zig
@@ -42,3 +42,25 @@ test "nested bitcast" {
try S.foo(42);
comptime try S.foo(42);
}
+
+test "@bitCast enum to its integer type" {
+ const SOCK = enum(c_int) {
+ A,
+ B,
+
+ fn testBitCastExternEnum() !void {
+ var SOCK_DGRAM = @This().B;
+ var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
+ try expect(sock_dgram == 1);
+ }
+ };
+
+ try SOCK.testBitCastExternEnum();
+ comptime try SOCK.testBitCastExternEnum();
+}
+
+// issue #3010: compiler segfault
+test "bitcast literal [4]u8 param to u32" {
+ const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
+ try expect(ip == maxInt(u32));
+}
diff --git a/test/behavior/bitcast_stage1.zig b/test/behavior/bitcast_stage1.zig
index 98db00a4dd..92dd88e34f 100644
--- a/test/behavior/bitcast_stage1.zig
+++ b/test/behavior/bitcast_stage1.zig
@@ -5,22 +5,6 @@ const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
const native_endian = builtin.target.cpu.arch.endian();
-test "@bitCast enum to its integer type" {
- const SOCK = enum(c_int) {
- A,
- B,
-
- fn testBitCastExternEnum() !void {
- var SOCK_DGRAM = @This().B;
- var sock_dgram = @bitCast(c_int, SOCK_DGRAM);
- try expect(sock_dgram == 1);
- }
- };
-
- try SOCK.testBitCastExternEnum();
- comptime try SOCK.testBitCastExternEnum();
-}
-
test "@bitCast packed structs at runtime and comptime" {
const Full = packed struct {
number: u16,
@@ -111,12 +95,6 @@ test "implicit cast to error union by returning" {
comptime try S.entry();
}
-// issue #3010: compiler segfault
-test "bitcast literal [4]u8 param to u32" {
- const ip = @bitCast(u32, [_]u8{ 255, 255, 255, 255 });
- try expect(ip == maxInt(u32));
-}
-
test "bitcast packed struct literal to byte" {
const Foo = packed struct {
value: u8,
diff --git a/test/behavior/bugs/9967.zig b/test/behavior/bugs/9967.zig
new file mode 100644
index 0000000000..d2c5909689
--- /dev/null
+++ b/test/behavior/bugs/9967.zig
@@ -0,0 +1,8 @@
+const std = @import("std");
+
+test "nested breaks to same labeled block" {
+ const a = blk: {
+ break :blk break :blk @as(u32, 1);
+ };
+ try std.testing.expectEqual(a, 1);
+}
diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig
index 3b6b5a02e7..42e6016061 100644
--- a/test/behavior/cast.zig
+++ b/test/behavior/cast.zig
@@ -65,3 +65,58 @@ test "implicit cast comptime_int to comptime_float" {
comptime try expect(@as(comptime_float, 10) == @as(f32, 10));
try expect(2 == 2.0);
}
+
+test "pointer reinterpret const float to int" {
+ // The hex representation is 0x3fe3333333333303.
+ const float: f64 = 5.99999999999994648725e-01;
+ const float_ptr = &float;
+ const int_ptr = @ptrCast(*const i32, float_ptr);
+ const int_val = int_ptr.*;
+ if (native_endian == .Little)
+ try expect(int_val == 0x33333303)
+ else
+ try expect(int_val == 0x3fe33333);
+}
+
+test "comptime_int @intToFloat" {
+ {
+ const result = @intToFloat(f16, 1234);
+ try expect(@TypeOf(result) == f16);
+ try expect(result == 1234.0);
+ }
+ {
+ const result = @intToFloat(f32, 1234);
+ try expect(@TypeOf(result) == f32);
+ try expect(result == 1234.0);
+ }
+ {
+ const result = @intToFloat(f64, 1234);
+ try expect(@TypeOf(result) == f64);
+ try expect(result == 1234.0);
+ }
+ {
+ const result = @intToFloat(f128, 1234);
+ try expect(@TypeOf(result) == f128);
+ try expect(result == 1234.0);
+ }
+ // big comptime_int (> 64 bits) to f128 conversion
+ {
+ const result = @intToFloat(f128, 0x1_0000_0000_0000_0000);
+ try expect(@TypeOf(result) == f128);
+ try expect(result == 0x1_0000_0000_0000_0000.0);
+ }
+}
+
+test "implicit cast from [*]T to ?*c_void" {
+ var a = [_]u8{ 3, 2, 1 };
+ var runtime_zero: usize = 0;
+ incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
+ try expect(std.mem.eql(u8, &a, &[_]u8{ 4, 3, 2 }));
+}
+
+fn incrementVoidPtrArray(array: ?*c_void, len: usize) void {
+ var n: usize = 0;
+ while (n < len) : (n += 1) {
+ @ptrCast([*]u8, array.?)[n] += 1;
+ }
+}
diff --git a/test/behavior/cast_stage1.zig b/test/behavior/cast_stage1.zig
index 0705047937..8eb6302e60 100644
--- a/test/behavior/cast_stage1.zig
+++ b/test/behavior/cast_stage1.zig
@@ -5,18 +5,6 @@ const maxInt = std.math.maxInt;
const Vector = std.meta.Vector;
const native_endian = @import("builtin").target.cpu.arch.endian();
-test "pointer reinterpret const float to int" {
- // The hex representation is 0x3fe3333333333303.
- const float: f64 = 5.99999999999994648725e-01;
- const float_ptr = &float;
- const int_ptr = @ptrCast(*const i32, float_ptr);
- const int_val = int_ptr.*;
- if (native_endian == .Little)
- try expect(int_val == 0x33333303)
- else
- try expect(int_val == 0x3fe33333);
-}
-
test "implicitly cast indirect pointer to maybe-indirect pointer" {
const S = struct {
const Self = @This();
@@ -368,35 +356,6 @@ test "vector casts" {
comptime try S.doTheTestFloat();
}
-test "comptime_int @intToFloat" {
- {
- const result = @intToFloat(f16, 1234);
- try expect(@TypeOf(result) == f16);
- try expect(result == 1234.0);
- }
- {
- const result = @intToFloat(f32, 1234);
- try expect(@TypeOf(result) == f32);
- try expect(result == 1234.0);
- }
- {
- const result = @intToFloat(f64, 1234);
- try expect(@TypeOf(result) == f64);
- try expect(result == 1234.0);
- }
- {
- const result = @intToFloat(f128, 1234);
- try expect(@TypeOf(result) == f128);
- try expect(result == 1234.0);
- }
- // big comptime_int (> 64 bits) to f128 conversion
- {
- const result = @intToFloat(f128, 0x1_0000_0000_0000_0000);
- try expect(@TypeOf(result) == f128);
- try expect(result == 0x1_0000_0000_0000_0000.0);
- }
-}
-
test "@floatCast cast down" {
{
var double: f64 = 0.001534;
@@ -458,20 +417,6 @@ fn incrementVoidPtrValue(value: ?*c_void) void {
@ptrCast(*u8, value.?).* += 1;
}
-test "implicit cast from [*]T to ?*c_void" {
- var a = [_]u8{ 3, 2, 1 };
- var runtime_zero: usize = 0;
- incrementVoidPtrArray(a[runtime_zero..].ptr, 3);
- try expect(std.mem.eql(u8, &a, &[_]u8{ 4, 3, 2 }));
-}
-
-fn incrementVoidPtrArray(array: ?*c_void, len: usize) void {
- var n: usize = 0;
- while (n < len) : (n += 1) {
- @ptrCast([*]u8, array.?)[n] += 1;
- }
-}
-
test "*usize to *void" {
var i = @as(usize, 0);
var v = @ptrCast(*void, &i);
diff --git a/test/behavior/error.zig b/test/behavior/error.zig
index fe647ee7c5..edbe866b95 100644
--- a/test/behavior/error.zig
+++ b/test/behavior/error.zig
@@ -31,3 +31,87 @@ test "empty error union" {
const x = error{} || error{};
_ = x;
}
+
+pub fn foo() anyerror!i32 {
+ const x = try bar();
+ return x + 1;
+}
+
+pub fn bar() anyerror!i32 {
+ return 13;
+}
+
+pub fn baz() anyerror!i32 {
+ const y = foo() catch 1234;
+ return y + 1;
+}
+
+test "error wrapping" {
+ try expect((baz() catch unreachable) == 15);
+}
+
+test "unwrap simple value from error" {
+ const i = unwrapSimpleValueFromErrorDo() catch unreachable;
+ try expect(i == 13);
+}
+fn unwrapSimpleValueFromErrorDo() anyerror!isize {
+ return 13;
+}
+
+test "error return in assignment" {
+ doErrReturnInAssignment() catch unreachable;
+}
+
+fn doErrReturnInAssignment() anyerror!void {
+ var x: i32 = undefined;
+ x = try makeANonErr();
+}
+
+fn makeANonErr() anyerror!i32 {
+ return 1;
+}
+
+test "syntax: optional operator in front of error union operator" {
+ comptime {
+ try expect(?(anyerror!i32) == ?(anyerror!i32));
+ }
+}
+
+test "widen cast integer payload of error union function call" {
+ const S = struct {
+ fn errorable() !u64 {
+ var x = @as(u64, try number());
+ return x;
+ }
+
+ fn number() anyerror!u32 {
+ return 1234;
+ }
+ };
+ try expect((try S.errorable()) == 1234);
+}
+
+test "debug info for optional error set" {
+ const SomeError = error{Hello};
+ var a_local_variable: ?SomeError = null;
+ _ = a_local_variable;
+}
+
+test "implicit cast to optional to error union to return result loc" {
+ const S = struct {
+ fn entry() !void {
+ var x: Foo = undefined;
+ if (func(&x)) |opt| {
+ try expect(opt != null);
+ } else |_| @panic("expected non error");
+ }
+ fn func(f: *Foo) anyerror!?*Foo {
+ return f;
+ }
+ const Foo = struct {
+ field: i32,
+ };
+ };
+ try S.entry();
+ //comptime S.entry(); TODO
+}
diff --git a/test/behavior/error_stage1.zig b/test/behavior/error_stage1.zig
index 03186a0aaa..c379257b99 100644
--- a/test/behavior/error_stage1.zig
+++ b/test/behavior/error_stage1.zig
@@ -4,52 +4,14 @@ const expectError = std.testing.expectError;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
-pub fn foo() anyerror!i32 {
- const x = try bar();
- return x + 1;
-}
-
-pub fn bar() anyerror!i32 {
- return 13;
-}
-
-pub fn baz() anyerror!i32 {
- const y = foo() catch 1234;
- return y + 1;
-}
-
-test "error wrapping" {
- try expect((baz() catch unreachable) == 15);
-}
-
-fn gimmeItBroke() []const u8 {
- return @errorName(error.ItBroke);
+fn gimmeItBroke() anyerror {
+ return error.ItBroke;
}
test "@errorName" {
try expect(mem.eql(u8, @errorName(error.AnError), "AnError"));
try expect(mem.eql(u8, @errorName(error.ALongerErrorName), "ALongerErrorName"));
-}
-
-test "unwrap simple value from error" {
- const i = unwrapSimpleValueFromErrorDo() catch unreachable;
- try expect(i == 13);
-}
-fn unwrapSimpleValueFromErrorDo() anyerror!isize {
- return 13;
-}
-
-test "error return in assignment" {
- doErrReturnInAssignment() catch unreachable;
-}
-
-fn doErrReturnInAssignment() anyerror!void {
- var x: i32 = undefined;
- x = try makeANonErr();
-}
-
-fn makeANonErr() anyerror!i32 {
- return 1;
+ try expect(mem.eql(u8, @errorName(gimmeItBroke()), "ItBroke"));
}
test "error union type " {
@@ -116,12 +78,6 @@ fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void {
}
}
-test "syntax: optional operator in front of error union operator" {
- comptime {
- try expect(?(anyerror!i32) == ?(anyerror!i32));
- }
-}
-
test "comptime err to int of error set with only 1 possible value" {
testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
comptime testErrToIntWithOnePossibleValue(error.A, @errorToInt(error.A));
@@ -268,20 +224,6 @@ test "nested error union function call in optional unwrap" {
}
}
-test "widen cast integer payload of error union function call" {
- const S = struct {
- fn errorable() !u64 {
- var x = @as(u64, try number());
- return x;
- }
-
- fn number() anyerror!u32 {
- return 1234;
- }
- };
- try expect((try S.errorable()) == 1234);
-}
-
test "return function call to error set from error union function" {
const S = struct {
fn errorable() anyerror!i32 {
@@ -307,12 +249,6 @@ test "optional error set is the same size as error set" {
comptime try expect(S.returnsOptErrSet() == null);
}
-test "debug info for optional error set" {
- const SomeError = error{Hello};
- var a_local_variable: ?SomeError = null;
- _ = a_local_variable;
-}
-
test "nested catch" {
const S = struct {
fn entry() !void {
@@ -335,25 +271,6 @@ test "nested catch" {
comptime try S.entry();
}
-test "implicit cast to optional to error union to return result loc" {
- const S = struct {
- fn entry() !void {
- var x: Foo = undefined;
- if (func(&x)) |opt| {
- try expect(opt != null);
- } else |_| @panic("expected non error");
- }
- fn func(f: *Foo) anyerror!?*Foo {
- return f;
- }
- const Foo = struct {
- field: i32,
- };
- };
- try S.entry();
- //comptime S.entry(); TODO
-}
-
test "function pointer with return type that is error union with payload which is pointer of parent struct" {
const S = struct {
const Foo = struct {
diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig
index ecc10859cd..84af3ecfbb 100644
--- a/test/behavior/eval.zig
+++ b/test/behavior/eval.zig
@@ -368,3 +368,86 @@ test "return 0 from function that has u0 return type" {
}
}
}
+
+test "statically initialized struct" {
+ st_init_str_foo.x += 1;
+ try expect(st_init_str_foo.x == 14);
+}
+const StInitStrFoo = struct {
+ x: i32,
+ y: bool,
+};
+var st_init_str_foo = StInitStrFoo{
+ .x = 13,
+ .y = true,
+};
+
+test "inline for with same type but different values" {
+ var res: usize = 0;
+ inline for ([_]type{ [2]u8, [1]u8, [2]u8 }) |T| {
+ var a: T = undefined;
+ res += a.len;
+ }
+ try expect(res == 5);
+}
+
+test "f32 at compile time is lossy" {
+ try expect(@as(f32, 1 << 24) + 1 == 1 << 24);
+}
+
+test "f64 at compile time is lossy" {
+ try expect(@as(f64, 1 << 53) + 1 == 1 << 53);
+}
+
+test {
+ comptime try expect(@as(f128, 1 << 113) == 10384593717069655257060992658440192);
+}
+
+fn copyWithPartialInline(s: []u32, b: []u8) void {
+ comptime var i: usize = 0;
+ inline while (i < 4) : (i += 1) {
+ s[i] = 0;
+ s[i] |= @as(u32, b[i * 4 + 0]) << 24;
+ s[i] |= @as(u32, b[i * 4 + 1]) << 16;
+ s[i] |= @as(u32, b[i * 4 + 2]) << 8;
+ s[i] |= @as(u32, b[i * 4 + 3]) << 0;
+ }
+}
+
+test "binary math operator in partially inlined function" {
+ var s: [4]u32 = undefined;
+ var b: [16]u8 = undefined;
+
+ for (b) |*r, i|
+ r.* = @intCast(u8, i + 1);
+
+ copyWithPartialInline(s[0..], b[0..]);
+ try expect(s[0] == 0x1020304);
+ try expect(s[1] == 0x5060708);
+ try expect(s[2] == 0x90a0b0c);
+ try expect(s[3] == 0xd0e0f10);
+}
+
+test "comptime shl" {
+ var a: u128 = 3;
+ var b: u7 = 63;
+ var c: u128 = 3 << 63;
+ try expect((a << b) == c);
+}
+
+test "comptime bitwise operators" {
+ comptime {
+ try expect(3 & 1 == 1);
+ try expect(3 & -1 == 3);
+ try expect(-3 & -1 == -3);
+ try expect(3 | -1 == -1);
+ try expect(-3 | -1 == -1);
+ try expect(3 ^ -1 == -4);
+ try expect(-3 ^ -1 == 2);
+ try expect(~@as(i8, -1) == 0);
+ try expect(~@as(i128, -1) == 0);
+ try expect(18446744073709551615 & 18446744073709551611 == 18446744073709551611);
+ try expect(-18446744073709551615 & -18446744073709551611 == -18446744073709551615);
+ try expect(~@as(u128, 0) == 0xffffffffffffffffffffffffffffffff);
+ }
+}
diff --git a/test/behavior/eval_stage1.zig b/test/behavior/eval_stage1.zig
index c19c58455b..348c685a26 100644
--- a/test/behavior/eval_stage1.zig
+++ b/test/behavior/eval_stage1.zig
@@ -12,27 +12,10 @@ pub const Vec3 = struct {
};
pub fn vec3(x: f32, y: f32, z: f32) Vec3 {
return Vec3{
- .data = [_]f32{
- x,
- y,
- z,
- },
+ .data = [_]f32{ x, y, z },
};
}
-test "statically initialized struct" {
- st_init_str_foo.x += 1;
- try expect(st_init_str_foo.x == 14);
-}
-const StInitStrFoo = struct {
- x: i32,
- y: bool,
-};
-var st_init_str_foo = StInitStrFoo{
- .x = 13,
- .y = true,
-};
-
test "inlined loop has array literal with elided runtime scope on first iteration but not second iteration" {
var runtime = [1]i32{3};
comptime var i: usize = 0;
@@ -131,22 +114,10 @@ test "float literal at compile time not lossy" {
try expect(9007199254740992.0 + 1.0 == 9007199254740993.0);
}
-test "f32 at compile time is lossy" {
- try expect(@as(f32, 1 << 24) + 1 == 1 << 24);
-}
-
-test "f64 at compile time is lossy" {
- try expect(@as(f64, 1 << 53) + 1 == 1 << 53);
-}
-
test "f128 at compile time is lossy" {
try expect(@as(f128, 10384593717069655257060992658440192.0) + 1 == 10384593717069655257060992658440192.0);
}
-test {
- comptime try expect(@as(f128, 1 << 113) == 10384593717069655257060992658440192);
-}
-
pub fn TypeWithCompTimeSlice(comptime field_name: []const u8) type {
_ = field_name;
return struct {
@@ -161,31 +132,6 @@ test "string literal used as comptime slice is memoized" {
comptime try expect(TypeWithCompTimeSlice("link").Node == TypeWithCompTimeSlice("link").Node);
}
-fn copyWithPartialInline(s: []u32, b: []u8) void {
- comptime var i: usize = 0;
- inline while (i < 4) : (i += 1) {
- s[i] = 0;
- s[i] |= @as(u32, b[i * 4 + 0]) << 24;
- s[i] |= @as(u32, b[i * 4 + 1]) << 16;
- s[i] |= @as(u32, b[i * 4 + 2]) << 8;
- s[i] |= @as(u32, b[i * 4 + 3]) << 0;
- }
-}
-
-test "binary math operator in partially inlined function" {
- var s: [4]u32 = undefined;
- var b: [16]u8 = undefined;
-
- for (b) |*r, i|
- r.* = @intCast(u8, i + 1);
-
- copyWithPartialInline(s[0..], b[0..]);
- try expect(s[0] == 0x1020304);
- try expect(s[1] == 0x5060708);
- try expect(s[2] == 0x90a0b0c);
- try expect(s[3] == 0xd0e0f10);
-}
-
test "comptime function with mutable pointer is not memoized" {
comptime {
var x: i32 = 1;
@@ -232,13 +178,6 @@ test "comptime shlWithOverflow" {
try expect(ct_shifted == rt_shifted);
}
-test "comptime shl" {
- var a: u128 = 3;
- var b: u7 = 63;
- var c: u128 = 3 << 63;
- try expectEqual(a << b, c);
-}
-
test "runtime 128 bit integer division" {
var a: u128 = 152313999999999991610955792383;
var b: u128 = 10000000000000000000;
@@ -301,38 +240,12 @@ fn testVarInsideInlineLoop(args: anytype) !void {
}
}
-test "inline for with same type but different values" {
- var res: usize = 0;
- inline for ([_]type{ [2]u8, [1]u8, [2]u8 }) |T| {
- var a: T = undefined;
- res += a.len;
- }
- try expect(res == 5);
-}
-
test "bit shift a u1" {
var x: u1 = 1;
var y = x << 0;
try expect(y == 1);
}
-test "comptime bitwise operators" {
- comptime {
- try expect(3 & 1 == 1);
- try expect(3 & -1 == 3);
- try expect(-3 & -1 == -3);
- try expect(3 | -1 == -1);
- try expect(-3 | -1 == -1);
- try expect(3 ^ -1 == -4);
- try expect(-3 ^ -1 == 2);
- try expect(~@as(i8, -1) == 0);
- try expect(~@as(i128, -1) == 0);
- try expect(18446744073709551615 & 18446744073709551611 == 18446744073709551611);
- try expect(-18446744073709551615 & -18446744073709551611 == -18446744073709551615);
- try expect(~@as(u128, 0) == 0xffffffffffffffffffffffffffffffff);
- }
-}
-
test "*align(1) u16 is the same as *align(1:0:2) u16" {
comptime {
try expect(*align(1:0:2) u16 == *align(1) u16);
diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig
index ec8641340f..cbf0d2532b 100644
--- a/test/behavior/floatop.zig
+++ b/test/behavior/floatop.zig
@@ -7,403 +7,6 @@ const Vector = std.meta.Vector;
const epsilon = 0.000001;
-test "@sqrt" {
- comptime try testSqrt();
- try testSqrt();
-}
-
-fn testSqrt() !void {
- {
- var a: f16 = 4;
- try expect(@sqrt(a) == 2);
- }
- {
- var a: f32 = 9;
- try expect(@sqrt(a) == 3);
- var b: f32 = 1.1;
- try expect(math.approxEqAbs(f32, @sqrt(b), 1.0488088481701516, epsilon));
- }
- {
- var a: f64 = 25;
- try expect(@sqrt(a) == 5);
- }
- {
- const a: comptime_float = 25.0;
- try expect(@sqrt(a) == 5.0);
- }
- // TODO https://github.com/ziglang/zig/issues/4026
- //{
- // var a: f128 = 49;
- //try expect(@sqrt(a) == 7);
- //}
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @sqrt(v);
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "more @sqrt f16 tests" {
- // TODO these are not all passing at comptime
- try expect(@sqrt(@as(f16, 0.0)) == 0.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 2.0)), 1.414214, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 3.6)), 1.897367, epsilon));
- try expect(@sqrt(@as(f16, 4.0)) == 2.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 7.539840)), 2.745877, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 19.230934)), 4.385309, epsilon));
- try expect(@sqrt(@as(f16, 64.0)) == 8.0);
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 64.1)), 8.006248, epsilon));
- try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 8942.230469)), 94.563370, epsilon));
-
- // special cases
- try expect(math.isPositiveInf(@sqrt(@as(f16, math.inf(f16)))));
- try expect(@sqrt(@as(f16, 0.0)) == 0.0);
- try expect(@sqrt(@as(f16, -0.0)) == -0.0);
- try expect(math.isNan(@sqrt(@as(f16, -1.0))));
- try expect(math.isNan(@sqrt(@as(f16, math.nan(f16)))));
-}
-
-test "@sin" {
- comptime try testSin();
- try testSin();
-}
-
-fn testSin() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@sin(a) == 0);
- }
- {
- var a: f32 = 0;
- try expect(@sin(a) == 0);
- }
- {
- var a: f64 = 0;
- try expect(@sin(a) == 0);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @sin(v);
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @sin(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "@cos" {
- comptime try testCos();
- try testCos();
-}
-
-fn testCos() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@cos(a) == 1);
- }
- {
- var a: f32 = 0;
- try expect(@cos(a) == 1);
- }
- {
- var a: f64 = 0;
- try expect(@cos(a) == 1);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
- var result = @cos(v);
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 3.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @cos(@as(f32, 4.4)), result[3], epsilon));
- }
-}
-
-test "@exp" {
- comptime try testExp();
- try testExp();
-}
-
-fn testExp() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 0;
- try expect(@exp(a) == 1);
- }
- {
- var a: f32 = 0;
- try expect(@exp(a) == 1);
- }
- {
- var a: f64 = 0;
- try expect(@exp(a) == 1);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @exp(v);
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@exp2" {
- comptime try testExp2();
- try testExp2();
-}
-
-fn testExp2() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2;
- try expect(@exp2(a) == 4);
- }
- {
- var a: f32 = 2;
- try expect(@exp2(a) == 4);
- }
- {
- var a: f64 = 2;
- try expect(@exp2(a) == 4);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @exp2(v);
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log" {
- // Old musl (and glibc?), and our current math.ln implementation do not return 1
- // so also accept those values.
- comptime try testLog();
- try testLog();
-}
-
-fn testLog() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = e;
- try expect(math.approxEqAbs(f16, @log(a), 1, epsilon));
- }
- {
- var a: f32 = e;
- try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff)));
- }
- {
- var a: f64 = e;
- try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000)));
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log(v);
- try expect(math.approxEqAbs(f32, @log(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log2" {
- comptime try testLog2();
- try testLog2();
-}
-
-fn testLog2() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 4;
- try expect(@log2(a) == 2);
- }
- {
- var a: f32 = 4;
- try expect(@log2(a) == 2);
- }
- {
- var a: f64 = 4;
- try expect(@log2(a) == 2);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log2(v);
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@log10" {
- comptime try testLog10();
- try testLog10();
-}
-
-fn testLog10() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 100;
- try expect(@log10(a) == 2);
- }
- {
- var a: f32 = 100;
- try expect(@log10(a) == 2);
- }
- {
- var a: f64 = 1000;
- try expect(@log10(a) == 3);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
- var result = @log10(v);
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.4)), result[3], epsilon));
- }
-}
-
-test "@fabs" {
- comptime try testFabs();
- try testFabs();
-}
-
-fn testFabs() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = -2.5;
- var b: f16 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- {
- var a: f32 = -2.5;
- var b: f32 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- {
- var a: f64 = -2.5;
- var b: f64 = 2.5;
- try expect(@fabs(a) == 2.5);
- try expect(@fabs(b) == 2.5);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @fabs(v);
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @fabs(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@floor" {
- comptime try testFloor();
- try testFloor();
-}
-
-fn testFloor() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@floor(a) == 2);
- }
- {
- var a: f32 = 2.1;
- try expect(@floor(a) == 2);
- }
- {
- var a: f64 = 3.5;
- try expect(@floor(a) == 3);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @floor(v);
- try expect(math.approxEqAbs(f32, @floor(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @floor(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@ceil" {
- comptime try testCeil();
- try testCeil();
-}
-
-fn testCeil() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@ceil(a) == 3);
- }
- {
- var a: f32 = 2.1;
- try expect(@ceil(a) == 3);
- }
- {
- var a: f64 = 3.5;
- try expect(@ceil(a) == 4);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @ceil(v);
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @ceil(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
-test "@trunc" {
- comptime try testTrunc();
- try testTrunc();
-}
-
-fn testTrunc() !void {
- // TODO test f128, and c_longdouble
- // https://github.com/ziglang/zig/issues/4026
- {
- var a: f16 = 2.1;
- try expect(@trunc(a) == 2);
- }
- {
- var a: f32 = 2.1;
- try expect(@trunc(a) == 2);
- }
- {
- var a: f64 = -3.5;
- try expect(@trunc(a) == -3);
- }
- {
- var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
- var result = @trunc(v);
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, 1.1)), result[0], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, -2.2)), result[1], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, 0.3)), result[2], epsilon));
- try expect(math.approxEqAbs(f32, @trunc(@as(f32, -0.4)), result[3], epsilon));
- }
-}
-
test "floating point comparisons" {
try testFloatComparisons();
comptime try testFloatComparisons();
diff --git a/test/behavior/floatop_stage1.zig b/test/behavior/floatop_stage1.zig
new file mode 100644
index 0000000000..b1b7eb2b92
--- /dev/null
+++ b/test/behavior/floatop_stage1.zig
@@ -0,0 +1,405 @@
+const std = @import("std");
+const expect = std.testing.expect;
+const math = std.math;
+const pi = std.math.pi;
+const e = std.math.e;
+const Vector = std.meta.Vector;
+
+const epsilon = 0.000001;
+
+test "@sqrt" {
+ comptime try testSqrt();
+ try testSqrt();
+}
+
+fn testSqrt() !void {
+ {
+ var a: f16 = 4;
+ try expect(@sqrt(a) == 2);
+ }
+ {
+ var a: f32 = 9;
+ try expect(@sqrt(a) == 3);
+ var b: f32 = 1.1;
+ try expect(math.approxEqAbs(f32, @sqrt(b), 1.0488088481701516, epsilon));
+ }
+ {
+ var a: f64 = 25;
+ try expect(@sqrt(a) == 5);
+ }
+ {
+ const a: comptime_float = 25.0;
+ try expect(@sqrt(a) == 5.0);
+ }
+ // TODO https://github.com/ziglang/zig/issues/4026
+ //{
+ // var a: f128 = 49;
+ //try expect(@sqrt(a) == 7);
+ //}
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @sqrt(v);
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @sqrt(@as(f32, 4.4)), result[3], epsilon));
+ }
+}
+
+test "more @sqrt f16 tests" {
+ // TODO these are not all passing at comptime
+ try expect(@sqrt(@as(f16, 0.0)) == 0.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 2.0)), 1.414214, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 3.6)), 1.897367, epsilon));
+ try expect(@sqrt(@as(f16, 4.0)) == 2.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 7.539840)), 2.745877, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 19.230934)), 4.385309, epsilon));
+ try expect(@sqrt(@as(f16, 64.0)) == 8.0);
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 64.1)), 8.006248, epsilon));
+ try expect(math.approxEqAbs(f16, @sqrt(@as(f16, 8942.230469)), 94.563370, epsilon));
+
+ // special cases
+ try expect(math.isPositiveInf(@sqrt(@as(f16, math.inf(f16)))));
+ try expect(@sqrt(@as(f16, 0.0)) == 0.0);
+ try expect(@sqrt(@as(f16, -0.0)) == -0.0);
+ try expect(math.isNan(@sqrt(@as(f16, -1.0))));
+ try expect(math.isNan(@sqrt(@as(f16, math.nan(f16)))));
+}
+
+test "@sin" {
+ comptime try testSin();
+ try testSin();
+}
+
+fn testSin() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 0;
+ try expect(@sin(a) == 0);
+ }
+ {
+ var a: f32 = 0;
+ try expect(@sin(a) == 0);
+ }
+ {
+ var a: f64 = 0;
+ try expect(@sin(a) == 0);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @sin(v);
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @sin(@as(f32, 4.4)), result[3], epsilon));
+ }
+}
+
+test "@cos" {
+ comptime try testCos();
+ try testCos();
+}
+
+fn testCos() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 0;
+ try expect(@cos(a) == 1);
+ }
+ {
+ var a: f32 = 0;
+ try expect(@cos(a) == 1);
+ }
+ {
+ var a: f64 = 0;
+ try expect(@cos(a) == 1);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 3.3, 4.4 };
+ var result = @cos(v);
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 3.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @cos(@as(f32, 4.4)), result[3], epsilon));
+ }
+}
+
+test "@exp" {
+ comptime try testExp();
+ try testExp();
+}
+
+fn testExp() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 0;
+ try expect(@exp(a) == 1);
+ }
+ {
+ var a: f32 = 0;
+ try expect(@exp(a) == 1);
+ }
+ {
+ var a: f64 = 0;
+ try expect(@exp(a) == 1);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @exp(v);
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @exp(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@exp2" {
+ comptime try testExp2();
+ try testExp2();
+}
+
+fn testExp2() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 2;
+ try expect(@exp2(a) == 4);
+ }
+ {
+ var a: f32 = 2;
+ try expect(@exp2(a) == 4);
+ }
+ {
+ var a: f64 = 2;
+ try expect(@exp2(a) == 4);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @exp2(v);
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @exp2(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log" {
+ // Old musl (and glibc?), and our current math.ln implementation do not return 1
+ // so also accept those values.
+ comptime try testLog();
+ try testLog();
+}
+
+fn testLog() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = e;
+ try expect(math.approxEqAbs(f16, @log(a), 1, epsilon));
+ }
+ {
+ var a: f32 = e;
+ try expect(@log(a) == 1 or @log(a) == @bitCast(f32, @as(u32, 0x3f7fffff)));
+ }
+ {
+ var a: f64 = e;
+ try expect(@log(a) == 1 or @log(a) == @bitCast(f64, @as(u64, 0x3ff0000000000000)));
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log(v);
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log2" {
+ comptime try testLog2();
+ try testLog2();
+}
+
+fn testLog2() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 4;
+ try expect(@log2(a) == 2);
+ }
+ {
+ var a: f32 = 4;
+ try expect(@log2(a) == 2);
+ }
+ {
+ var a: f64 = 4;
+ try expect(@log2(a) == 2);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log2(v);
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log2(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@log10" {
+ comptime try testLog10();
+ try testLog10();
+}
+
+fn testLog10() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 100;
+ try expect(@log10(a) == 2);
+ }
+ {
+ var a: f32 = 100;
+ try expect(@log10(a) == 2);
+ }
+ {
+ var a: f64 = 1000;
+ try expect(@log10(a) == 3);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, 2.2, 0.3, 0.4 };
+ var result = @log10(v);
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @log10(@as(f32, 0.4)), result[3], epsilon));
+ }
+}
+
+test "@fabs" {
+ comptime try testFabs();
+ try testFabs();
+}
+
+fn testFabs() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = -2.5;
+ var b: f16 = 2.5;
+ try expect(@fabs(a) == 2.5);
+ try expect(@fabs(b) == 2.5);
+ }
+ {
+ var a: f32 = -2.5;
+ var b: f32 = 2.5;
+ try expect(@fabs(a) == 2.5);
+ try expect(@fabs(b) == 2.5);
+ }
+ {
+ var a: f64 = -2.5;
+ var b: f64 = 2.5;
+ try expect(@fabs(a) == 2.5);
+ try expect(@fabs(b) == 2.5);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @fabs(v);
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @fabs(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@floor" {
+ comptime try testFloor();
+ try testFloor();
+}
+
+fn testFloor() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 2.1;
+ try expect(@floor(a) == 2);
+ }
+ {
+ var a: f32 = 2.1;
+ try expect(@floor(a) == 2);
+ }
+ {
+ var a: f64 = 3.5;
+ try expect(@floor(a) == 3);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @floor(v);
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @floor(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@ceil" {
+ comptime try testCeil();
+ try testCeil();
+}
+
+fn testCeil() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 2.1;
+ try expect(@ceil(a) == 3);
+ }
+ {
+ var a: f32 = 2.1;
+ try expect(@ceil(a) == 3);
+ }
+ {
+ var a: f64 = 3.5;
+ try expect(@ceil(a) == 4);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @ceil(v);
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @ceil(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
+
+test "@trunc" {
+ comptime try testTrunc();
+ try testTrunc();
+}
+
+fn testTrunc() !void {
+ // TODO test f128, and c_longdouble
+ // https://github.com/ziglang/zig/issues/4026
+ {
+ var a: f16 = 2.1;
+ try expect(@trunc(a) == 2);
+ }
+ {
+ var a: f32 = 2.1;
+ try expect(@trunc(a) == 2);
+ }
+ {
+ var a: f64 = -3.5;
+ try expect(@trunc(a) == -3);
+ }
+ {
+ var v: Vector(4, f32) = [_]f32{ 1.1, -2.2, 0.3, -0.4 };
+ var result = @trunc(v);
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, 1.1)), result[0], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, -2.2)), result[1], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, 0.3)), result[2], epsilon));
+ try expect(math.approxEqAbs(f32, @trunc(@as(f32, -0.4)), result[3], epsilon));
+ }
+}
diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig
index 2567582cd3..b00c2f7a59 100644
--- a/test/behavior/fn.zig
+++ b/test/behavior/fn.zig
@@ -19,17 +19,6 @@ fn testLocVars(b: i32) void {
if (a + b != 3) unreachable;
}
-test "void parameters" {
- try voidFun(1, void{}, 2, {});
-}
-fn voidFun(a: i32, b: void, c: i32, d: void) !void {
- _ = d;
- const v = b;
- const vv: void = if (a == 1) v else {};
- try expect(a + c == 3);
- return vv;
-}
-
test "mutable local variables" {
var zero: i32 = 0;
try expect(zero == 0);
@@ -54,14 +43,6 @@ test "separate block scopes" {
try expect(c == 10);
}
-test "call function with empty string" {
- acceptsString("");
-}
-
-fn acceptsString(foo: []u8) void {
- _ = foo;
-}
-
fn @"weird function name"() i32 {
return 1234;
}
@@ -69,51 +50,6 @@ test "weird function name" {
try expect(@"weird function name"() == 1234);
}
-test "implicit cast function unreachable return" {
- wantsFnWithVoid(fnWithUnreachable);
-}
-
-fn wantsFnWithVoid(f: fn () void) void {
- _ = f;
-}
-
-fn fnWithUnreachable() noreturn {
- unreachable;
-}
-
-test "function pointers" {
- const fns = [_]@TypeOf(fn1){
- fn1,
- fn2,
- fn3,
- fn4,
- };
- for (fns) |f, i| {
- try expect(f() == @intCast(u32, i) + 5);
- }
-}
-fn fn1() u32 {
- return 5;
-}
-fn fn2() u32 {
- return 6;
-}
-fn fn3() u32 {
- return 7;
-}
-fn fn4() u32 {
- return 8;
-}
-
-test "number literal as an argument" {
- try numberLiteralArg(3);
- comptime try numberLiteralArg(3);
-}
-
-fn numberLiteralArg(a: anytype) !void {
- try expect(a == 3);
-}
-
test "assign inline fn to const variable" {
const a = inlineFn;
a();
@@ -121,64 +57,6 @@ test "assign inline fn to const variable" {
inline fn inlineFn() void {}
-test "pass by non-copying value" {
- try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3);
-}
-
-const Point = struct {
- x: i32,
- y: i32,
-};
-
-fn addPointCoords(pt: Point) i32 {
- return pt.x + pt.y;
-}
-
-test "pass by non-copying value through var arg" {
- try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3);
-}
-
-fn addPointCoordsVar(pt: anytype) !i32 {
- comptime try expect(@TypeOf(pt) == Point);
- return pt.x + pt.y;
-}
-
-test "pass by non-copying value as method" {
- var pt = Point2{ .x = 1, .y = 2 };
- try expect(pt.addPointCoords() == 3);
-}
-
-const Point2 = struct {
- x: i32,
- y: i32,
-
- fn addPointCoords(self: Point2) i32 {
- return self.x + self.y;
- }
-};
-
-test "pass by non-copying value as method, which is generic" {
- var pt = Point3{ .x = 1, .y = 2 };
- try expect(pt.addPointCoords(i32) == 3);
-}
-
-const Point3 = struct {
- x: i32,
- y: i32,
-
- fn addPointCoords(self: Point3, comptime T: type) i32 {
- _ = T;
- return self.x + self.y;
- }
-};
-
-test "pass by non-copying value as method, at comptime" {
- comptime {
- var pt = Point2{ .x = 1, .y = 2 };
- try expect(pt.addPointCoords() == 3);
- }
-}
-
fn outer(y: u32) fn (u32) u32 {
const Y = @TypeOf(y);
const st = struct {
@@ -194,43 +72,6 @@ test "return inner function which references comptime variable of outer function
try expect(func(3) == 7);
}
-test "extern struct with stdcallcc fn pointer" {
- const S = extern struct {
- ptr: fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32,
-
- fn foo() callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32 {
- return 1234;
- }
- };
-
- var s: S = undefined;
- s.ptr = S.foo;
- try expect(s.ptr() == 1234);
-}
-
-test "implicit cast fn call result to optional in field result" {
- const S = struct {
- fn entry() !void {
- var x = Foo{
- .field = optionalPtr(),
- };
- try expect(x.field.?.* == 999);
- }
-
- const glob: i32 = 999;
-
- fn optionalPtr() *const i32 {
- return &glob;
- }
-
- const Foo = struct {
- field: ?*const i32,
- };
- };
- try S.entry();
- comptime try S.entry();
-}
-
test "discard the result of a function that returns a struct" {
const S = struct {
fn entry() void {
@@ -249,45 +90,3 @@ test "discard the result of a function that returns a struct" {
S.entry();
comptime S.entry();
}
-
-test "function call with anon list literal" {
- const S = struct {
- fn doTheTest() !void {
- try consumeVec(.{ 9, 8, 7 });
- }
-
- fn consumeVec(vec: [3]f32) !void {
- try expect(vec[0] == 9);
- try expect(vec[1] == 8);
- try expect(vec[2] == 7);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "ability to give comptime types and non comptime types to same parameter" {
- const S = struct {
- fn doTheTest() !void {
- var x: i32 = 1;
- try expect(foo(x) == 10);
- try expect(foo(i32) == 20);
- }
-
- fn foo(arg: anytype) i32 {
- if (@typeInfo(@TypeOf(arg)) == .Type and arg == i32) return 20;
- return 9 + arg;
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
-test "function with inferred error set but returning no error" {
- const S = struct {
- fn foo() !void {}
- };
-
- const return_ty = @typeInfo(@TypeOf(S.foo)).Fn.return_type.?;
- try expectEqual(0, @typeInfo(@typeInfo(return_ty).ErrorUnion.error_set).ErrorSet.?.len);
-}
diff --git a/test/behavior/fn_stage1.zig b/test/behavior/fn_stage1.zig
new file mode 100644
index 0000000000..9368b39a46
--- /dev/null
+++ b/test/behavior/fn_stage1.zig
@@ -0,0 +1,222 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const testing = std.testing;
+const expect = testing.expect;
+const expectEqual = testing.expectEqual;
+
+test "void parameters" {
+ try voidFun(1, void{}, 2, {});
+}
+fn voidFun(a: i32, b: void, c: i32, d: void) !void {
+ _ = d;
+ const v = b;
+ const vv: void = if (a == 1) v else {};
+ try expect(a + c == 3);
+ return vv;
+}
+
+test "call function with empty string" {
+ acceptsString("");
+}
+
+fn acceptsString(foo: []u8) void {
+ _ = foo;
+}
+
+test "implicit cast function unreachable return" {
+ wantsFnWithVoid(fnWithUnreachable);
+}
+
+fn wantsFnWithVoid(f: fn () void) void {
+ _ = f;
+}
+
+fn fnWithUnreachable() noreturn {
+ unreachable;
+}
+
+test "function pointers" {
+ const fns = [_]@TypeOf(fn1){
+ fn1,
+ fn2,
+ fn3,
+ fn4,
+ };
+ for (fns) |f, i| {
+ try expect(f() == @intCast(u32, i) + 5);
+ }
+}
+fn fn1() u32 {
+ return 5;
+}
+fn fn2() u32 {
+ return 6;
+}
+fn fn3() u32 {
+ return 7;
+}
+fn fn4() u32 {
+ return 8;
+}
+
+test "number literal as an argument" {
+ try numberLiteralArg(3);
+ comptime try numberLiteralArg(3);
+}
+
+fn numberLiteralArg(a: anytype) !void {
+ try expect(a == 3);
+}
+
+test "pass by non-copying value" {
+ try expect(addPointCoords(Point{ .x = 1, .y = 2 }) == 3);
+}
+
+const Point = struct {
+ x: i32,
+ y: i32,
+};
+
+fn addPointCoords(pt: Point) i32 {
+ return pt.x + pt.y;
+}
+
+test "pass by non-copying value through var arg" {
+ try expect((try addPointCoordsVar(Point{ .x = 1, .y = 2 })) == 3);
+}
+
+fn addPointCoordsVar(pt: anytype) !i32 {
+ comptime try expect(@TypeOf(pt) == Point);
+ return pt.x + pt.y;
+}
+
+test "pass by non-copying value as method" {
+ var pt = Point2{ .x = 1, .y = 2 };
+ try expect(pt.addPointCoords() == 3);
+}
+
+const Point2 = struct {
+ x: i32,
+ y: i32,
+
+ fn addPointCoords(self: Point2) i32 {
+ return self.x + self.y;
+ }
+};
+
+test "pass by non-copying value as method, which is generic" {
+ var pt = Point3{ .x = 1, .y = 2 };
+ try expect(pt.addPointCoords(i32) == 3);
+}
+
+const Point3 = struct {
+ x: i32,
+ y: i32,
+
+ fn addPointCoords(self: Point3, comptime T: type) i32 {
+ _ = T;
+ return self.x + self.y;
+ }
+};
+
+test "pass by non-copying value as method, at comptime" {
+ comptime {
+ var pt = Point2{ .x = 1, .y = 2 };
+ try expect(pt.addPointCoords() == 3);
+ }
+}
+
+test "extern struct with stdcallcc fn pointer" {
+ const S = extern struct {
+ ptr: fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32,
+
+ fn foo() callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32 {
+ return 1234;
+ }
+ };
+
+ var s: S = undefined;
+ s.ptr = S.foo;
+ try expect(s.ptr() == 1234);
+}
+
+test "implicit cast fn call result to optional in field result" {
+ const S = struct {
+ fn entry() !void {
+ var x = Foo{
+ .field = optionalPtr(),
+ };
+ try expect(x.field.?.* == 999);
+ }
+
+ const glob: i32 = 999;
+
+ fn optionalPtr() *const i32 {
+ return &glob;
+ }
+
+ const Foo = struct {
+ field: ?*const i32,
+ };
+ };
+ try S.entry();
+ comptime try S.entry();
+}
+
+test "function call with anon list literal" {
+ const S = struct {
+ fn doTheTest() !void {
+ try consumeVec(.{ 9, 8, 7 });
+ }
+
+ fn consumeVec(vec: [3]f32) !void {
+ try expect(vec[0] == 9);
+ try expect(vec[1] == 8);
+ try expect(vec[2] == 7);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "ability to give comptime types and non comptime types to same parameter" {
+ const S = struct {
+ fn doTheTest() !void {
+ var x: i32 = 1;
+ try expect(foo(x) == 10);
+ try expect(foo(i32) == 20);
+ }
+
+ fn foo(arg: anytype) i32 {
+ if (@typeInfo(@TypeOf(arg)) == .Type and arg == i32) return 20;
+ return 9 + arg;
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "function with inferred error set but returning no error" {
+ const S = struct {
+ fn foo() !void {}
+ };
+
+ const return_ty = @typeInfo(@TypeOf(S.foo)).Fn.return_type.?;
+ try expectEqual(0, @typeInfo(@typeInfo(return_ty).ErrorUnion.error_set).ErrorSet.?.len);
+}
+
+const nComplexCallconv = 100;
+fn fComplexCallconvRet(x: u32) callconv(blk: {
+ const s: struct { n: u32 } = .{ .n = nComplexCallconv };
+ break :blk switch (s.n) {
+ 0 => .C,
+ 1 => .Inline,
+ else => .Unspecified,
+ };
+}) struct { x: u32 } {
+ return .{ .x = x * x };
+}
+
+test "function with complex callconv and return type expressions" {
+ try expect(fComplexCallconvRet(3).x == 9);
+}
diff --git a/test/behavior/for.zig b/test/behavior/for.zig
index 0361451138..47babe7125 100644
--- a/test/behavior/for.zig
+++ b/test/behavior/for.zig
@@ -2,3 +2,61 @@ const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
+
+test "continue in for loop" {
+ const array = [_]i32{ 1, 2, 3, 4, 5 };
+ var sum: i32 = 0;
+ for (array) |x| {
+ sum += x;
+ if (x < 3) {
+ continue;
+ }
+ break;
+ }
+ if (sum != 6) unreachable;
+}
+
+test "break from outer for loop" {
+ try testBreakOuter();
+ comptime try testBreakOuter();
+}
+
+fn testBreakOuter() !void {
+ var array = "aoeu";
+ var count: usize = 0;
+ outer: for (array) |_| {
+ for (array) |_| {
+ count += 1;
+ break :outer;
+ }
+ }
+ try expect(count == 1);
+}
+
+test "continue outer for loop" {
+ try testContinueOuter();
+ comptime try testContinueOuter();
+}
+
+fn testContinueOuter() !void {
+ var array = "aoeu";
+ var counter: usize = 0;
+ outer: for (array) |_| {
+ for (array) |_| {
+ counter += 1;
+ continue :outer;
+ }
+ }
+ try expect(counter == array.len);
+}
+
+test "ignore lval with underscore (for loop)" {
+ for ([_]void{}) |_, i| {
+ _ = i;
+ for ([_]void{}) |_, j| {
+ _ = j;
+ break;
+ }
+ break;
+ }
+}
diff --git a/test/behavior/for_stage1.zig b/test/behavior/for_stage1.zig
index 13b50892f9..898be21d2a 100644
--- a/test/behavior/for_stage1.zig
+++ b/test/behavior/for_stage1.zig
@@ -3,19 +3,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
-test "continue in for loop" {
- const array = [_]i32{ 1, 2, 3, 4, 5 };
- var sum: i32 = 0;
- for (array) |x| {
- sum += x;
- if (x < 3) {
- continue;
- }
- break;
- }
- if (sum != 6) unreachable;
-}
-
test "for loop with pointer elem var" {
const source = "abcdefg";
var target: [source.len]u8 = undefined;
@@ -78,40 +65,6 @@ test "basic for loop" {
try expect(mem.eql(u8, buffer[0..buf_index], &expected_result));
}
-test "break from outer for loop" {
- try testBreakOuter();
- comptime try testBreakOuter();
-}
-
-fn testBreakOuter() !void {
- var array = "aoeu";
- var count: usize = 0;
- outer: for (array) |_| {
- for (array) |_| {
- count += 1;
- break :outer;
- }
- }
- try expect(count == 1);
-}
-
-test "continue outer for loop" {
- try testContinueOuter();
- comptime try testContinueOuter();
-}
-
-fn testContinueOuter() !void {
- var array = "aoeu";
- var counter: usize = 0;
- outer: for (array) |_| {
- for (array) |_| {
- counter += 1;
- continue :outer;
- }
- }
- try expect(counter == array.len);
-}
-
test "2 break statements and an else" {
const S = struct {
fn entry(t: bool, f: bool) !void {
@@ -172,14 +125,3 @@ test "for on slice with allowzero ptr" {
try S.doTheTest(&[_]u8{ 1, 2, 3, 4 });
comptime try S.doTheTest(&[_]u8{ 1, 2, 3, 4 });
}
-
-test "ignore lval with underscore (for loop)" {
- for ([_]void{}) |_, i| {
- _ = i;
- for ([_]void{}) |_, j| {
- _ = j;
- break;
- }
- break;
- }
-}
diff --git a/test/behavior/math.zig b/test/behavior/math.zig
index 56fbdc124d..1073183a3c 100644
--- a/test/behavior/math.zig
+++ b/test/behavior/math.zig
@@ -235,3 +235,212 @@ test "comptime_int param and return" {
fn comptimeAdd(comptime a: comptime_int, comptime b: comptime_int) comptime_int {
return a + b;
}
+
+test "binary not" {
+ try expect(comptime x: {
+ break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
+ });
+ try expect(comptime x: {
+ break :x ~@as(u64, 2147483647) == 18446744071562067968;
+ });
+ try testBinaryNot(0b1010101010101010);
+}
+
+fn testBinaryNot(x: u16) !void {
+ try expect(~x == 0b0101010101010101);
+}
+
+test "division" {
+ try testDivision();
+ comptime try testDivision();
+}
+fn testDivision() !void {
+ try expect(div(u32, 13, 3) == 4);
+ try expect(div(f16, 1.0, 2.0) == 0.5);
+ try expect(div(f32, 1.0, 2.0) == 0.5);
+
+ try expect(divExact(u32, 55, 11) == 5);
+ try expect(divExact(i32, -55, 11) == -5);
+ try expect(divExact(f16, 55.0, 11.0) == 5.0);
+ try expect(divExact(f16, -55.0, 11.0) == -5.0);
+ try expect(divExact(f32, 55.0, 11.0) == 5.0);
+ try expect(divExact(f32, -55.0, 11.0) == -5.0);
+
+ try expect(divFloor(i32, 5, 3) == 1);
+ try expect(divFloor(i32, -5, 3) == -2);
+ try expect(divFloor(f16, 5.0, 3.0) == 1.0);
+ try expect(divFloor(f16, -5.0, 3.0) == -2.0);
+ try expect(divFloor(f32, 5.0, 3.0) == 1.0);
+ try expect(divFloor(f32, -5.0, 3.0) == -2.0);
+ try expect(divFloor(i32, -0x80000000, -2) == 0x40000000);
+ try expect(divFloor(i32, 0, -0x80000000) == 0);
+ try expect(divFloor(i32, -0x40000001, 0x40000000) == -2);
+ try expect(divFloor(i32, -0x80000000, 1) == -0x80000000);
+ try expect(divFloor(i32, 10, 12) == 0);
+ try expect(divFloor(i32, -14, 12) == -2);
+ try expect(divFloor(i32, -2, 12) == -1);
+
+ try expect(divTrunc(i32, 5, 3) == 1);
+ try expect(divTrunc(i32, -5, 3) == -1);
+ try expect(divTrunc(i32, 9, -10) == 0);
+ try expect(divTrunc(i32, -9, 10) == 0);
+ try expect(divTrunc(f16, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f16, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(f16, 9.0, -10.0) == 0.0);
+ try expect(divTrunc(f16, -9.0, 10.0) == 0.0);
+ try expect(divTrunc(f32, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f32, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(f32, 9.0, -10.0) == 0.0);
+ try expect(divTrunc(f32, -9.0, 10.0) == 0.0);
+ try expect(divTrunc(f64, 5.0, 3.0) == 1.0);
+ try expect(divTrunc(f64, -5.0, 3.0) == -1.0);
+ try expect(divTrunc(f64, 9.0, -10.0) == 0.0);
+ try expect(divTrunc(f64, -9.0, 10.0) == 0.0);
+ try expect(divTrunc(i32, 10, 12) == 0);
+ try expect(divTrunc(i32, -14, 12) == -1);
+ try expect(divTrunc(i32, -2, 12) == 0);
+
+ try expect(mod(i32, 10, 12) == 10);
+ try expect(mod(i32, -14, 12) == 10);
+ try expect(mod(i32, -2, 12) == 10);
+
+ comptime {
+ try expect(
+ 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
+ );
+ try expect(
+ @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
+ );
+ try expect(
+ 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
+ );
+ try expect(
+ @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
+ );
+ try expect(
+ @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
+ );
+ try expect(
+ @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
+ );
+ try expect(
+ 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
+ );
+ }
+}
+fn div(comptime T: type, a: T, b: T) T {
+ return a / b;
+}
+fn divExact(comptime T: type, a: T, b: T) T {
+ return @divExact(a, b);
+}
+fn divFloor(comptime T: type, a: T, b: T) T {
+ return @divFloor(a, b);
+}
+fn divTrunc(comptime T: type, a: T, b: T) T {
+ return @divTrunc(a, b);
+}
+fn mod(comptime T: type, a: T, b: T) T {
+ return @mod(a, b);
+}
+
+test "unsigned wrapping" {
+ try testUnsignedWrappingEval(maxInt(u32));
+ comptime try testUnsignedWrappingEval(maxInt(u32));
+}
+fn testUnsignedWrappingEval(x: u32) !void {
+ const zero = x +% 1;
+ try expect(zero == 0);
+ const orig = zero -% 1;
+ try expect(orig == maxInt(u32));
+}
+
+test "signed wrapping" {
+ try testSignedWrappingEval(maxInt(i32));
+ comptime try testSignedWrappingEval(maxInt(i32));
+}
+fn testSignedWrappingEval(x: i32) !void {
+ const min_val = x +% 1;
+ try expect(min_val == minInt(i32));
+ const max_val = min_val -% 1;
+ try expect(max_val == maxInt(i32));
+}
+
+test "signed negation wrapping" {
+ try testSignedNegationWrappingEval(minInt(i16));
+ comptime try testSignedNegationWrappingEval(minInt(i16));
+}
+fn testSignedNegationWrappingEval(x: i16) !void {
+ try expect(x == -32768);
+ const neg = -%x;
+ try expect(neg == -32768);
+}
+
+test "unsigned negation wrapping" {
+ try testUnsignedNegationWrappingEval(1);
+ comptime try testUnsignedNegationWrappingEval(1);
+}
+fn testUnsignedNegationWrappingEval(x: u16) !void {
+ try expect(x == 1);
+ const neg = -%x;
+ try expect(neg == maxInt(u16));
+}
+
+test "unsigned 64-bit division" {
+ try test_u64_div();
+ comptime try test_u64_div();
+}
+fn test_u64_div() !void {
+ const result = divWithResult(1152921504606846976, 34359738365);
+ try expect(result.quotient == 33554432);
+ try expect(result.remainder == 100663296);
+}
+fn divWithResult(a: u64, b: u64) DivResult {
+ return DivResult{
+ .quotient = a / b,
+ .remainder = a % b,
+ };
+}
+const DivResult = struct {
+ quotient: u64,
+ remainder: u64,
+};
+
+test "truncating shift right" {
+ try testShrTrunc(maxInt(u16));
+ comptime try testShrTrunc(maxInt(u16));
+}
+fn testShrTrunc(x: u16) !void {
+ const shifted = x >> 1;
+ try expect(shifted == 32767);
+}
+
+test "f128" {
+ try test_f128();
+ comptime try test_f128();
+}
+
+fn make_f128(x: f128) f128 {
+ return x;
+}
+
+fn test_f128() !void {
+ try expect(@sizeOf(f128) == 16);
+ try expect(make_f128(1.0) == 1.0);
+ try expect(make_f128(1.0) != 1.1);
+ try expect(make_f128(1.0) > 0.9);
+ try expect(make_f128(1.0) >= 0.9);
+ try expect(make_f128(1.0) >= 1.0);
+ try should_not_be_zero(1.0);
+}
+
+fn should_not_be_zero(x: f128) !void {
+ try expect(x != 0.0);
+}
+
+test "128-bit multiplication" {
+ var a: i128 = 3;
+ var b: i128 = 2;
+ var c = a * b;
+ try expect(c == 6);
+}
diff --git a/test/behavior/math_stage1.zig b/test/behavior/math_stage1.zig
index f0c160ebc4..a316c0f3dd 100644
--- a/test/behavior/math_stage1.zig
+++ b/test/behavior/math_stage1.zig
@@ -6,92 +6,6 @@ const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const mem = std.mem;
-test "division" {
- try testDivision();
- comptime try testDivision();
-}
-fn testDivision() !void {
- try expect(div(u32, 13, 3) == 4);
- try expect(div(f16, 1.0, 2.0) == 0.5);
- try expect(div(f32, 1.0, 2.0) == 0.5);
-
- try expect(divExact(u32, 55, 11) == 5);
- try expect(divExact(i32, -55, 11) == -5);
- try expect(divExact(f16, 55.0, 11.0) == 5.0);
- try expect(divExact(f16, -55.0, 11.0) == -5.0);
- try expect(divExact(f32, 55.0, 11.0) == 5.0);
- try expect(divExact(f32, -55.0, 11.0) == -5.0);
-
- try expect(divFloor(i32, 5, 3) == 1);
- try expect(divFloor(i32, -5, 3) == -2);
- try expect(divFloor(f16, 5.0, 3.0) == 1.0);
- try expect(divFloor(f16, -5.0, 3.0) == -2.0);
- try expect(divFloor(f32, 5.0, 3.0) == 1.0);
- try expect(divFloor(f32, -5.0, 3.0) == -2.0);
- try expect(divFloor(i32, -0x80000000, -2) == 0x40000000);
- try expect(divFloor(i32, 0, -0x80000000) == 0);
- try expect(divFloor(i32, -0x40000001, 0x40000000) == -2);
- try expect(divFloor(i32, -0x80000000, 1) == -0x80000000);
- try expect(divFloor(i32, 10, 12) == 0);
- try expect(divFloor(i32, -14, 12) == -2);
- try expect(divFloor(i32, -2, 12) == -1);
-
- try expect(divTrunc(i32, 5, 3) == 1);
- try expect(divTrunc(i32, -5, 3) == -1);
- try expect(divTrunc(f16, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f16, -5.0, 3.0) == -1.0);
- try expect(divTrunc(f32, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f32, -5.0, 3.0) == -1.0);
- try expect(divTrunc(f64, 5.0, 3.0) == 1.0);
- try expect(divTrunc(f64, -5.0, 3.0) == -1.0);
- try expect(divTrunc(i32, 10, 12) == 0);
- try expect(divTrunc(i32, -14, 12) == -1);
- try expect(divTrunc(i32, -2, 12) == 0);
-
- try expect(mod(i32, 10, 12) == 10);
- try expect(mod(i32, -14, 12) == 10);
- try expect(mod(i32, -2, 12) == 10);
-
- comptime {
- try expect(
- 1194735857077236777412821811143690633098347576 % 508740759824825164163191790951174292733114988 == 177254337427586449086438229241342047632117600,
- );
- try expect(
- @rem(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -177254337427586449086438229241342047632117600,
- );
- try expect(
- 1194735857077236777412821811143690633098347576 / 508740759824825164163191790951174292733114988 == 2,
- );
- try expect(
- @divTrunc(-1194735857077236777412821811143690633098347576, 508740759824825164163191790951174292733114988) == -2,
- );
- try expect(
- @divTrunc(1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == -2,
- );
- try expect(
- @divTrunc(-1194735857077236777412821811143690633098347576, -508740759824825164163191790951174292733114988) == 2,
- );
- try expect(
- 4126227191251978491697987544882340798050766755606969681711 % 10 == 1,
- );
- }
-}
-fn div(comptime T: type, a: T, b: T) T {
- return a / b;
-}
-fn divExact(comptime T: type, a: T, b: T) T {
- return @divExact(a, b);
-}
-fn divFloor(comptime T: type, a: T, b: T) T {
- return @divFloor(a, b);
-}
-fn divTrunc(comptime T: type, a: T, b: T) T {
- return @divTrunc(a, b);
-}
-fn mod(comptime T: type, a: T, b: T) T {
- return @mod(a, b);
-}
-
test "@addWithOverflow" {
var result: u8 = undefined;
try expect(@addWithOverflow(u8, 250, 100, &result));
@@ -157,82 +71,6 @@ fn testCtzVectors() !void {
try expectEqual(@ctz(u16, @splat(64, @as(u16, 0b00000000))), @splat(64, @as(u5, 16)));
}
-test "unsigned wrapping" {
- try testUnsignedWrappingEval(maxInt(u32));
- comptime try testUnsignedWrappingEval(maxInt(u32));
-}
-fn testUnsignedWrappingEval(x: u32) !void {
- const zero = x +% 1;
- try expect(zero == 0);
- const orig = zero -% 1;
- try expect(orig == maxInt(u32));
-}
-
-test "signed wrapping" {
- try testSignedWrappingEval(maxInt(i32));
- comptime try testSignedWrappingEval(maxInt(i32));
-}
-fn testSignedWrappingEval(x: i32) !void {
- const min_val = x +% 1;
- try expect(min_val == minInt(i32));
- const max_val = min_val -% 1;
- try expect(max_val == maxInt(i32));
-}
-
-test "signed negation wrapping" {
- try testSignedNegationWrappingEval(minInt(i16));
- comptime try testSignedNegationWrappingEval(minInt(i16));
-}
-fn testSignedNegationWrappingEval(x: i16) !void {
- try expect(x == -32768);
- const neg = -%x;
- try expect(neg == -32768);
-}
-
-test "unsigned negation wrapping" {
- try testUnsignedNegationWrappingEval(1);
- comptime try testUnsignedNegationWrappingEval(1);
-}
-fn testUnsignedNegationWrappingEval(x: u16) !void {
- try expect(x == 1);
- const neg = -%x;
- try expect(neg == maxInt(u16));
-}
-
-test "unsigned 64-bit division" {
- try test_u64_div();
- comptime try test_u64_div();
-}
-fn test_u64_div() !void {
- const result = divWithResult(1152921504606846976, 34359738365);
- try expect(result.quotient == 33554432);
- try expect(result.remainder == 100663296);
-}
-fn divWithResult(a: u64, b: u64) DivResult {
- return DivResult{
- .quotient = a / b,
- .remainder = a % b,
- };
-}
-const DivResult = struct {
- quotient: u64,
- remainder: u64,
-};
-
-test "binary not" {
- try expect(comptime x: {
- break :x ~@as(u16, 0b1010101010101010) == 0b0101010101010101;
- });
- try expect(comptime x: {
- break :x ~@as(u64, 2147483647) == 18446744071562067968;
- });
- try testBinaryNot(0b1010101010101010);
-}
-
-fn testBinaryNot(x: u16) !void {
- try expect(~x == 0b0101010101010101);
-}
-
test "small int addition" {
var x: u2 = 0;
try expect(x == 0);
@@ -360,15 +198,6 @@ fn testShlTrunc(x: u16) !void {
try expect(shifted == 65534);
}
-test "truncating shift right" {
- try testShrTrunc(maxInt(u16));
- comptime try testShrTrunc(maxInt(u16));
-}
-fn testShrTrunc(x: u16) !void {
- const shifted = x >> 1;
- try expect(shifted == 32767);
-}
-
test "exact shift left" {
try testShlExact(0b00110101);
comptime try testShlExact(0b00110101);
@@ -406,29 +235,6 @@ test "shift left/right on u0 operand" {
comptime try S.doTheTest();
}
-test "f128" {
- try test_f128();
- comptime try test_f128();
-}
-
-fn make_f128(x: f128) f128 {
- return x;
-}
-
-fn test_f128() !void {
- try expect(@sizeOf(f128) == 16);
- try expect(make_f128(1.0) == 1.0);
- try expect(make_f128(1.0) != 1.1);
- try expect(make_f128(1.0) > 0.9);
- try expect(make_f128(1.0) >= 0.9);
- try expect(make_f128(1.0) >= 1.0);
- try should_not_be_zero(1.0);
-}
-
-fn should_not_be_zero(x: f128) !void {
- try expect(x != 0.0);
-}
-
test "comptime float rem int" {
comptime {
var x = @as(f32, 1) % 2;
@@ -628,13 +434,6 @@ fn testNanEqNan(comptime F: type) !void {
try expect(!(nan1 <= nan2));
}
-test "128-bit multiplication" {
- var a: i128 = 3;
- var b: i128 = 2;
- var c = a * b;
- try expect(c == 6);
-}
-
test "vector comparison" {
const S = struct {
fn doTheTest() !void {
diff --git a/test/behavior/misc.zig b/test/behavior/misc.zig
index 83414f49b2..7a248ed320 100644
--- a/test/behavior/misc.zig
+++ b/test/behavior/misc.zig
@@ -100,18 +100,6 @@ test "string concatenation" {
try expect(b[len] == 0);
}
-test "global variable initialized to global variable array element" {
- try expect(global_ptr == &gdt[0]);
-}
-const GDTEntry = struct {
- field: i32,
-};
-var gdt = [_]GDTEntry{
- GDTEntry{ .field = 1 },
- GDTEntry{ .field = 2 },
-};
-var global_ptr = &gdt[0];
-
// can't really run this test but we can make sure it has no compile error
// and generates code
const vram = @intToPtr([*]volatile u8, 0x20000000)[0..0x8000];
diff --git a/test/behavior/null.zig b/test/behavior/null.zig
index 3bd652772b..98018c79bc 100644
--- a/test/behavior/null.zig
+++ b/test/behavior/null.zig
@@ -1,4 +1,5 @@
-const expect = @import("std").testing.expect;
+const std = @import("std");
+const expect = std.testing.expect;
test "optional type" {
const x: ?bool = true;
@@ -58,31 +59,6 @@ fn foo(x: ?i32) ?bool {
return value > 1234;
}
-test "if var maybe pointer" {
- try expect(shouldBeAPlus1(Particle{
- .a = 14,
- .b = 1,
- .c = 1,
- .d = 1,
- }) == 15);
-}
-fn shouldBeAPlus1(p: Particle) u64 {
- var maybe_particle: ?Particle = p;
- if (maybe_particle) |*particle| {
- particle.a += 1;
- }
- if (maybe_particle) |particle| {
- return particle.a;
- }
- return 0;
-}
-const Particle = struct {
- a: u64,
- b: u64,
- c: u64,
- d: u64,
-};
-
test "null literal outside function" {
const is_null = here_is_a_null_literal.context == null;
try expect(is_null);
@@ -146,17 +122,6 @@ test "null with default unwrap" {
try expect(x == 1);
}
-test "optional types" {
- comptime {
- const opt_type_struct = StructWithOptionalType{ .t = u8 };
- try expect(opt_type_struct.t != null and opt_type_struct.t.? == u8);
- }
-}
-
-const StructWithOptionalType = struct {
- t: ?type,
-};
-
test "optional pointer to 0 bit type null value at runtime" {
const EmptyStruct = struct {};
var x: ?*EmptyStruct = null;
diff --git a/test/behavior/null_stage1.zig b/test/behavior/null_stage1.zig
new file mode 100644
index 0000000000..2b8feea242
--- /dev/null
+++ b/test/behavior/null_stage1.zig
@@ -0,0 +1,37 @@
+const expect = @import("std").testing.expect;
+
+test "if var maybe pointer" {
+ try expect(shouldBeAPlus1(Particle{
+ .a = 14,
+ .b = 1,
+ .c = 1,
+ .d = 1,
+ }) == 15);
+}
+fn shouldBeAPlus1(p: Particle) u64 {
+ var maybe_particle: ?Particle = p;
+ if (maybe_particle) |*particle| {
+ particle.a += 1;
+ }
+ if (maybe_particle) |particle| {
+ return particle.a;
+ }
+ return 0;
+}
+const Particle = struct {
+ a: u64,
+ b: u64,
+ c: u64,
+ d: u64,
+};
+
+test "optional types" {
+ comptime {
+ const opt_type_struct = StructWithOptionalType{ .t = u8 };
+ try expect(opt_type_struct.t != null and opt_type_struct.t.? == u8);
+ }
+}
+
+const StructWithOptionalType = struct {
+ t: ?type,
+};
diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig
index 4fcd78b1d6..69f9e2af2a 100644
--- a/test/behavior/pointers.zig
+++ b/test/behavior/pointers.zig
@@ -44,3 +44,52 @@ test "double pointer parsing" {
fn PtrOf(comptime T: type) type {
return *T;
}
+
+test "implicit cast single item pointer to C pointer and back" {
+ var y: u8 = 11;
+ var x: [*c]u8 = &y;
+ var z: *u8 = x;
+ z.* += 1;
+ try expect(y == 12);
+}
+
+test "initialize const optional C pointer to null" {
+ const a: ?[*c]i32 = null;
+ try expect(a == null);
+ comptime try expect(a == null);
+}
+
+test "assigning integer to C pointer" {
+ var x: i32 = 0;
+ var ptr: [*c]u8 = 0;
+ var ptr2: [*c]u8 = x;
+ if (false) {
+ ptr;
+ ptr2;
+ }
+}
+
+test "C pointer comparison and arithmetic" {
+ const S = struct {
+ fn doTheTest() !void {
+ var ptr1: [*c]u32 = 0;
+ var ptr2 = ptr1 + 10;
+ try expect(ptr1 == 0);
+ try expect(ptr1 >= 0);
+ try expect(ptr1 <= 0);
+ // expect(ptr1 < 1);
+ // expect(ptr1 < one);
+ // expect(1 > ptr1);
+ // expect(one > ptr1);
+ try expect(ptr1 < ptr2);
+ try expect(ptr2 > ptr1);
+ try expect(ptr2 >= 40);
+ try expect(ptr2 == 40);
+ try expect(ptr2 <= 40);
+ ptr2 -= 10;
+ try expect(ptr1 == ptr2);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
diff --git a/test/behavior/pointers_stage1.zig b/test/behavior/pointers_stage1.zig
index aea123a5c3..3600c3470a 100644
--- a/test/behavior/pointers_stage1.zig
+++ b/test/behavior/pointers_stage1.zig
@@ -19,49 +19,6 @@ fn testDerefPtrOneVal() !void {
try expect(@TypeOf(y.x) == void);
}
-test "assigning integer to C pointer" {
- var x: i32 = 0;
- var ptr: [*c]u8 = 0;
- var ptr2: [*c]u8 = x;
- if (false) {
- ptr;
- ptr2;
- }
-}
-
-test "implicit cast single item pointer to C pointer and back" {
- var y: u8 = 11;
- var x: [*c]u8 = &y;
- var z: *u8 = x;
- z.* += 1;
- try expect(y == 12);
-}
-
-test "C pointer comparison and arithmetic" {
- const S = struct {
- fn doTheTest() !void {
- var ptr1: [*c]u32 = 0;
- var ptr2 = ptr1 + 10;
- try expect(ptr1 == 0);
- try expect(ptr1 >= 0);
- try expect(ptr1 <= 0);
- // expect(ptr1 < 1);
- // expect(ptr1 < one);
- // expect(1 > ptr1);
- // expect(one > ptr1);
- try expect(ptr1 < ptr2);
- try expect(ptr2 > ptr1);
- try expect(ptr2 >= 40);
- try expect(ptr2 == 40);
- try expect(ptr2 <= 40);
- ptr2 -= 10;
- try expect(ptr1 == ptr2);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
test "peer type resolution with C pointers" {
var ptr_one: *u8 = undefined;
var ptr_many: [*]u8 = undefined;
@@ -103,12 +60,6 @@ test "implicit cast error unions with non-optional to optional pointer" {
comptime try S.doTheTest();
}
-test "initialize const optional C pointer to null" {
- const a: ?[*c]i32 = null;
- try expect(a == null);
- comptime try expect(a == null);
-}
-
test "compare equality of optional and non-optional pointer" {
const a = @intToPtr(*const usize, 0x12345678);
const b = @intToPtr(?*usize, 0x12345678);
diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig
index 666b547875..1a03964179 100644
--- a/test/behavior/ptrcast.zig
+++ b/test/behavior/ptrcast.zig
@@ -2,72 +2,3 @@ const std = @import("std");
const builtin = @import("builtin");
const expect = std.testing.expect;
const native_endian = builtin.target.cpu.arch.endian();
-
-test "reinterpret bytes as integer with nonzero offset" {
- try testReinterpretBytesAsInteger();
- comptime try testReinterpretBytesAsInteger();
-}
-
-fn testReinterpretBytesAsInteger() !void {
- const bytes = "\x12\x34\x56\x78\xab";
- const expected = switch (native_endian) {
- .Little => 0xab785634,
- .Big => 0x345678ab,
- };
- try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
-}
-
-test "reinterpret bytes of an array into an extern struct" {
- try testReinterpretBytesAsExternStruct();
- comptime try testReinterpretBytesAsExternStruct();
-}
-
-fn testReinterpretBytesAsExternStruct() !void {
- var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
-
- const S = extern struct {
- a: u8,
- b: u16,
- c: u8,
- };
-
- var ptr = @ptrCast(*const S, &bytes);
- var val = ptr.c;
- try expect(val == 5);
-}
-
-test "reinterpret struct field at comptime" {
- const numNative = comptime Bytes.init(0x12345678);
- if (native_endian != .Little) {
- try expect(std.mem.eql(u8, &[_]u8{ 0x12, 0x34, 0x56, 0x78 }, &numNative.bytes));
- } else {
- try expect(std.mem.eql(u8, &[_]u8{ 0x78, 0x56, 0x34, 0x12 }, &numNative.bytes));
- }
-}
-
-const Bytes = struct {
- bytes: [4]u8,
-
- pub fn init(v: u32) Bytes {
- var res: Bytes = undefined;
- @ptrCast(*align(1) u32, &res.bytes).* = v;
-
- return res;
- }
-};
-
-test "comptime ptrcast keeps larger alignment" {
- comptime {
- const a: u32 = 1234;
- const p = @ptrCast([*]const u8, &a);
- try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8);
- }
-}
-
-test "implicit optional pointer to optional c_void pointer" {
- var buf: [4]u8 = "aoeu".*;
- var x: ?[*]u8 = &buf;
- var y: ?*c_void = x;
- var z = @ptrCast(*[4]u8, y);
- try expect(std.mem.eql(u8, z, "aoeu"));
-}
diff --git a/test/behavior/ptrcast_stage1.zig b/test/behavior/ptrcast_stage1.zig
new file mode 100644
index 0000000000..666b547875
--- /dev/null
+++ b/test/behavior/ptrcast_stage1.zig
@@ -0,0 +1,73 @@
+const std = @import("std");
+const builtin = @import("builtin");
+const expect = std.testing.expect;
+const native_endian = builtin.target.cpu.arch.endian();
+
+test "reinterpret bytes as integer with nonzero offset" {
+ try testReinterpretBytesAsInteger();
+ comptime try testReinterpretBytesAsInteger();
+}
+
+fn testReinterpretBytesAsInteger() !void {
+ const bytes = "\x12\x34\x56\x78\xab";
+ const expected = switch (native_endian) {
+ .Little => 0xab785634,
+ .Big => 0x345678ab,
+ };
+ try expect(@ptrCast(*align(1) const u32, bytes[1..5]).* == expected);
+}
+
+test "reinterpret bytes of an array into an extern struct" {
+ try testReinterpretBytesAsExternStruct();
+ comptime try testReinterpretBytesAsExternStruct();
+}
+
+fn testReinterpretBytesAsExternStruct() !void {
+ var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
+
+ const S = extern struct {
+ a: u8,
+ b: u16,
+ c: u8,
+ };
+
+ var ptr = @ptrCast(*const S, &bytes);
+ var val = ptr.c;
+ try expect(val == 5);
+}
+
+test "reinterpret struct field at comptime" {
+ const numNative = comptime Bytes.init(0x12345678);
+ if (native_endian != .Little) {
+ try expect(std.mem.eql(u8, &[_]u8{ 0x12, 0x34, 0x56, 0x78 }, &numNative.bytes));
+ } else {
+ try expect(std.mem.eql(u8, &[_]u8{ 0x78, 0x56, 0x34, 0x12 }, &numNative.bytes));
+ }
+}
+
+const Bytes = struct {
+ bytes: [4]u8,
+
+ pub fn init(v: u32) Bytes {
+ var res: Bytes = undefined;
+ @ptrCast(*align(1) u32, &res.bytes).* = v;
+
+ return res;
+ }
+};
+
+test "comptime ptrcast keeps larger alignment" {
+ comptime {
+ const a: u32 = 1234;
+ const p = @ptrCast([*]const u8, &a);
+ try expect(@TypeOf(p) == [*]align(@alignOf(u32)) const u8);
+ }
+}
+
+test "implicit optional pointer to optional c_void pointer" {
+ var buf: [4]u8 = "aoeu".*;
+ var x: ?[*]u8 = &buf;
+ var y: ?*c_void = x;
+ var z = @ptrCast(*[4]u8, y);
+ try expect(std.mem.eql(u8, z, "aoeu"));
+}
diff --git a/test/behavior/saturating_arithmetic.zig b/test/behavior/saturating_arithmetic.zig
index 7749dc9559..c0f29892a1 100644
--- a/test/behavior/saturating_arithmetic.zig
+++ b/test/behavior/saturating_arithmetic.zig
@@ -62,7 +62,7 @@ test "saturating subtraction" {
test "saturating multiplication" {
// TODO: once #9660 has been solved, remove this line
- if (builtin.stage2_arch == .wasm32) return error.SkipZigTest;
+ if (builtin.cpu.arch == .wasm32) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -99,7 +99,7 @@ test "saturating shift-left" {
try testSatShl(i8, 127, 1, 127);
try testSatShl(i8, -128, 1, -128);
// TODO: remove this check once #9668 is completed
- if (builtin.stage2_arch != .wasm32) {
+ if (builtin.cpu.arch != .wasm32) {
// skip testing ints > 64 bits on wasm due to miscompilation / wasmtime ci error
try testSatShl(i128, maxInt(i128), 64, maxInt(i128));
try testSatShl(u128, maxInt(u128), 64, maxInt(u128));
diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig
index 5dc652d678..19c9e7e773 100644
--- a/test/behavior/slice.zig
+++ b/test/behavior/slice.zig
@@ -24,3 +24,88 @@ comptime {
var pos = S.indexOfScalar(type, list, c_ulong).?;
if (pos != 1) @compileError("bad pos");
}
+
+test "slicing" {
+ var array: [20]i32 = undefined;
+
+ array[5] = 1234;
+
+ var slice = array[5..10];
+
+ if (slice.len != 5) unreachable;
+
+ const ptr = &slice[0];
+ if (ptr.* != 1234) unreachable;
+
+ var slice_rest = array[10..];
+ if (slice_rest.len != 10) unreachable;
+}
+
+test "const slice" {
+ comptime {
+ const a = "1234567890";
+ try expect(a.len == 10);
+ const b = a[1..2];
+ try expect(b.len == 1);
+ try expect(b[0] == '2');
+ }
+}
+
+test "comptime slice of undefined pointer of length 0" {
+ const slice1 = @as([*]i32, undefined)[0..0];
+ try expect(slice1.len == 0);
+ const slice2 = @as([*]i32, undefined)[100..100];
+ try expect(slice2.len == 0);
+}
+
+test "implicitly cast array of size 0 to slice" {
+ var msg = [_]u8{};
+ try assertLenIsZero(&msg);
+}
+
+fn assertLenIsZero(msg: []const u8) !void {
+ try expect(msg.len == 0);
+}
+
+test "access len index of sentinel-terminated slice" {
+ const S = struct {
+ fn doTheTest() !void {
+ var slice: [:0]const u8 = "hello";
+
+ try expect(slice.len == 5);
+ try expect(slice[5] == 0);
+ }
+ };
+ try S.doTheTest();
+ comptime try S.doTheTest();
+}
+
+test "comptime slice of slice preserves comptime var" {
+ comptime {
+ var buff: [10]u8 = undefined;
+ buff[0..][0..][0] = 1;
+ try expect(buff[0..][0..][0] == 1);
+ }
+}
+
+test "slice of type" {
+ comptime {
+ var types_array = [_]type{ i32, f64, type };
+ for (types_array) |T, i| {
+ switch (i) {
+ 0 => try expect(T == i32),
+ 1 => try expect(T == f64),
+ 2 => try expect(T == type),
+ else => unreachable,
+ }
+ }
+ for (types_array[0..]) |T, i| {
+ switch (i) {
+ 0 => try expect(T == i32),
+ 1 => try expect(T == f64),
+ 2 => try expect(T == type),
+ else => unreachable,
+ }
+ }
+ }
+}
diff --git a/test/behavior/slice_stage1.zig b/test/behavior/slice_stage1.zig
index ab45d14543..cc472a4ff6 100644
--- a/test/behavior/slice_stage1.zig
+++ b/test/behavior/slice_stage1.zig
@@ -4,39 +4,6 @@ const expectEqualSlices = std.testing.expectEqualSlices;
const expectEqual = std.testing.expectEqual;
const mem = std.mem;
-test "slicing" {
- var array: [20]i32 = undefined;
-
- array[5] = 1234;
-
- var slice = array[5..10];
-
- if (slice.len != 5) unreachable;
-
- const ptr = &slice[0];
- if (ptr.* != 1234) unreachable;
-
- var slice_rest = array[10..];
- if (slice_rest.len != 10) unreachable;
-}
-
-test "const slice" {
- comptime {
- const a = "1234567890";
- try expect(a.len == 10);
- const b = a[1..2];
- try expect(b.len == 1);
- try expect(b[0] == '2');
- }
-}
-
-test "comptime slice of undefined pointer of length 0" {
- const slice1 = @as([*]i32, undefined)[0..0];
- try expect(slice1.len == 0);
- const slice2 = @as([*]i32, undefined)[100..100];
- try expect(slice2.len == 0);
-}
-
test "slicing zero length array" {
const s1 = ""[0..];
const s2 = ([_]u32{})[0..];
@@ -97,15 +64,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
return a_slice[start..end];
}
-test "implicitly cast array of size 0 to slice" {
- var msg = [_]u8{};
- try assertLenIsZero(&msg);
-}
-
-fn assertLenIsZero(msg: []const u8) !void {
- try expect(msg.len == 0);
-}
-
test "C pointer" {
var buf: [*c]const u8 = "kjdhfkjdhfdkjhfkfjhdfkjdhfkdjhfdkjhf";
var len: u32 = 10;
@@ -150,19 +108,6 @@ test "slice type with custom alignment" {
try expect(array[1].anything == 42);
}
-test "access len index of sentinel-terminated slice" {
- const S = struct {
- fn doTheTest() !void {
- var slice: [:0]const u8 = "hello";
-
- try expect(slice.len == 5);
- try expect(slice[5] == 0);
- }
- };
- try S.doTheTest();
- comptime try S.doTheTest();
-}
-
test "obtaining a null terminated slice" {
// here we have a normal array
var buf: [50]u8 = undefined;
@@ -407,14 +352,6 @@ test "type coercion of pointer to anon struct literal to pointer to slice" {
comptime try S.doTheTest();
}
-test "comptime slice of slice preserves comptime var" {
- comptime {
- var buff: [10]u8 = undefined;
- buff[0..][0..][0] = 1;
- try expect(buff[0..][0..][0] == 1);
- }
-}
-
test "comptime slice of pointer preserves comptime var" {
comptime {
var buff: [10]u8 = undefined;
@@ -433,28 +370,6 @@ test "array concat of slices gives slice" {
}
}
-test "slice of type" {
- comptime {
- var types_array = [_]type{ i32, f64, type };
- for (types_array) |T, i| {
- switch (i) {
- 0 => try expect(T == i32),
- 1 => try expect(T == f64),
- 2 => try expect(T == type),
- else => unreachable,
- }
- }
- for (types_array[0..]) |T, i| {
- switch (i) {
- 0 => try expect(T == i32),
- 1 => try expect(T == f64),
- 2 => try expect(T == type),
- else => unreachable,
- }
- }
- }
-}
-
test "comptime pointer cast array and then slice" {
const array = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig
index 9524e69540..6df3656760 100644
--- a/test/behavior/switch.zig
+++ b/test/behavior/switch.zig
@@ -2,3 +2,300 @@ const std = @import("std");
const expect = std.testing.expect;
const expectError = std.testing.expectError;
const expectEqual = std.testing.expectEqual;
+
+test "switch with numbers" {
+ try testSwitchWithNumbers(13);
+}
+
+fn testSwitchWithNumbers(x: u32) !void {
+ const result = switch (x) {
+ 1, 2, 3, 4...8 => false,
+ 13 => true,
+ else => false,
+ };
+ try expect(result);
+}
+
+test "switch with all ranges" {
+ try expect(testSwitchWithAllRanges(50, 3) == 1);
+ try expect(testSwitchWithAllRanges(101, 0) == 2);
+ try expect(testSwitchWithAllRanges(300, 5) == 3);
+ try expect(testSwitchWithAllRanges(301, 6) == 6);
+}
+
+fn testSwitchWithAllRanges(x: u32, y: u32) u32 {
+ return switch (x) {
+ 0...100 => 1,
+ 101...200 => 2,
+ 201...300 => 3,
+ else => y,
+ };
+}
+
+test "implicit comptime switch" {
+ const x = 3 + 4;
+ const result = switch (x) {
+ 3 => 10,
+ 4 => 11,
+ 5, 6 => 12,
+ 7, 8 => 13,
+ else => 14,
+ };
+
+ comptime {
+ try expect(result + 1 == 14);
+ }
+}
+
+test "switch on enum" {
+ const fruit = Fruit.Orange;
+ nonConstSwitchOnEnum(fruit);
+}
+const Fruit = enum {
+ Apple,
+ Orange,
+ Banana,
+};
+fn nonConstSwitchOnEnum(fruit: Fruit) void {
+ switch (fruit) {
+ Fruit.Apple => unreachable,
+ Fruit.Orange => {},
+ Fruit.Banana => unreachable,
+ }
+}
+
+test "switch statement" {
+ try nonConstSwitch(SwitchStatementFoo.C);
+}
+fn nonConstSwitch(foo: SwitchStatementFoo) !void {
+ const val = switch (foo) {
+ SwitchStatementFoo.A => @as(i32, 1),
+ SwitchStatementFoo.B => 2,
+ SwitchStatementFoo.C => 3,
+ SwitchStatementFoo.D => 4,
+ };
+ try expect(val == 3);
+}
+const SwitchStatementFoo = enum { A, B, C, D };
+
+test "switch with multiple expressions" {
+ const x = switch (returnsFive()) {
+ 1, 2, 3 => 1,
+ 4, 5, 6 => 2,
+ else => @as(i32, 3),
+ };
+ try expect(x == 2);
+}
+fn returnsFive() i32 {
+ return 5;
+}
+
+const Number = union(enum) {
+ One: u64,
+ Two: u8,
+ Three: f32,
+};
+
+const number = Number{ .Three = 1.23 };
+
+fn returnsFalse() bool {
+ switch (number) {
+ Number.One => |x| return x > 1234,
+ Number.Two => |x| return x == 'a',
+ Number.Three => |x| return x > 12.34,
+ }
+}
+test "switch on const enum with var" {
+ try expect(!returnsFalse());
+}
+
+test "switch on type" {
+ try expect(trueIfBoolFalseOtherwise(bool));
+ try expect(!trueIfBoolFalseOtherwise(i32));
+}
+
+fn trueIfBoolFalseOtherwise(comptime T: type) bool {
+ return switch (T) {
+ bool => true,
+ else => false,
+ };
+}
+
+test "switching on booleans" {
+ try testSwitchOnBools();
+ comptime try testSwitchOnBools();
+}
+
+fn testSwitchOnBools() !void {
+ try expect(testSwitchOnBoolsTrueAndFalse(true) == false);
+ try expect(testSwitchOnBoolsTrueAndFalse(false) == true);
+
+ try expect(testSwitchOnBoolsTrueWithElse(true) == false);
+ try expect(testSwitchOnBoolsTrueWithElse(false) == true);
+
+ try expect(testSwitchOnBoolsFalseWithElse(true) == false);
+ try expect(testSwitchOnBoolsFalseWithElse(false) == true);
+}
+
+fn testSwitchOnBoolsTrueAndFalse(x: bool) bool {
+ return switch (x) {
+ true => false,
+ false => true,
+ };
+}
+
+fn testSwitchOnBoolsTrueWithElse(x: bool) bool {
+ return switch (x) {
+ true => false,
+ else => true,
+ };
+}
+
+fn testSwitchOnBoolsFalseWithElse(x: bool) bool {
+ return switch (x) {
+ false => true,
+ else => false,
+ };
+}
+
+test "u0" {
+ var val: u0 = 0;
+ switch (val) {
+ 0 => try expect(val == 0),
+ }
+}
+
+test "undefined.u0" {
+ var val: u0 = undefined;
+ switch (val) {
+ 0 => try expect(val == 0),
+ }
+}
+
+test "switch with disjoint range" {
+ var q: u8 = 0;
+ switch (q) {
+ 0...125 => {},
+ 127...255 => {},
+ 126...126 => {},
+ }
+}
+
+test "switch variable for range and multiple prongs" {
+ const S = struct {
+ fn doTheTest() !void {
+ var u: u8 = 16;
+ try doTheSwitch(u);
+ comptime try doTheSwitch(u);
+ var v: u8 = 42;
+ try doTheSwitch(v);
+ comptime try doTheSwitch(v);
+ }
+ fn doTheSwitch(q: u8) !void {
+ switch (q) {
+ 0...40 => |x| try expect(x == 16),
+ 41, 42, 43 => |x| try expect(x == 42),
+ else => try expect(false),
+ }
+ }
+ };
+ _ = S;
+}
+
+var state: u32 = 0;
+fn poll() void {
+ switch (state) {
+ 0 => {
+ state = 1;
+ },
+ else => {
+ state += 1;
+ },
+ }
+}
+
+test "switch on global mutable var isn't constant-folded" {
+ while (state < 2) {
+ poll();
+ }
+}
+
+const SwitchProngWithVarEnum = union(enum) {
+ One: i32,
+ Two: f32,
+ Meh: void,
+};
+
+test "switch prong with variable" {
+ try switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 });
+ try switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 });
+ try switchProngWithVarFn(SwitchProngWithVarEnum{ .Meh = {} });
+}
+fn switchProngWithVarFn(a: SwitchProngWithVarEnum) !void {
+ switch (a) {
+ SwitchProngWithVarEnum.One => |x| {
+ try expect(x == 13);
+ },
+ SwitchProngWithVarEnum.Two => |x| {
+ try expect(x == 13.0);
+ },
+ SwitchProngWithVarEnum.Meh => |x| {
+ const v: void = x;
+ _ = v;
+ },
+ }
+}
+
+test "switch on enum using pointer capture" {
+ try testSwitchEnumPtrCapture();
+ comptime try testSwitchEnumPtrCapture();
+}
+
+fn testSwitchEnumPtrCapture() !void {
+ var value = SwitchProngWithVarEnum{ .One = 1234 };
+ switch (value) {
+ SwitchProngWithVarEnum.One => |*x| x.* += 1,
+ else => unreachable,
+ }
+ switch (value) {
+ SwitchProngWithVarEnum.One => |x| try expect(x == 1235),
+ else => unreachable,
+ }
+}
+
+test "switch handles all cases of number" {
+ try testSwitchHandleAllCases();
+ comptime try testSwitchHandleAllCases();
+}
+
+fn testSwitchHandleAllCases() !void {
+ try expect(testSwitchHandleAllCasesExhaustive(0) == 3);
+ try expect(testSwitchHandleAllCasesExhaustive(1) == 2);
+ try expect(testSwitchHandleAllCasesExhaustive(2) == 1);
+ try expect(testSwitchHandleAllCasesExhaustive(3) == 0);
+
+ try expect(testSwitchHandleAllCasesRange(100) == 0);
+ try expect(testSwitchHandleAllCasesRange(200) == 1);
+ try expect(testSwitchHandleAllCasesRange(201) == 2);
+ try expect(testSwitchHandleAllCasesRange(202) == 4);
+ try expect(testSwitchHandleAllCasesRange(230) == 3);
+}
+
+fn testSwitchHandleAllCasesExhaustive(x: u2) u2 {
+ return switch (x) {
+ 0 => @as(u2, 3),
+ 1 => 2,
+ 2 => 1,
+ 3 => 0,
+ };
+}
+
+fn testSwitchHandleAllCasesRange(x: u8) u8 {
+ return switch (x) {
+ 0...100 => @as(u8, 0),
+ 101...200 => 1,
+ 201, 203 => 2,
+ 202 => 4,
+ 204...255 => 3,
+ };
+}
diff --git a/test/behavior/switch_stage1.zig b/test/behavior/switch_stage1.zig
index 62afc74d83..1b85d767d5 100644
--- a/test/behavior/switch_stage1.zig
+++ b/test/behavior/switch_stage1.zig
@@ -3,208 +3,6 @@ const expect = std.testing.expect;
const expectError = std.testing.expectError;
const expectEqual = std.testing.expectEqual;
-test "switch with numbers" {
- try testSwitchWithNumbers(13);
-}
-
-fn testSwitchWithNumbers(x: u32) !void {
- const result = switch (x) {
- 1, 2, 3, 4...8 => false,
- 13 => true,
- else => false,
- };
- try expect(result);
-}
-
-test "switch with all ranges" {
- try expect(testSwitchWithAllRanges(50, 3) == 1);
- try expect(testSwitchWithAllRanges(101, 0) == 2);
- try expect(testSwitchWithAllRanges(300, 5) == 3);
- try expect(testSwitchWithAllRanges(301, 6) == 6);
-}
-
-fn testSwitchWithAllRanges(x: u32, y: u32) u32 {
- return switch (x) {
- 0...100 => 1,
- 101...200 => 2,
- 201...300 => 3,
- else => y,
- };
-}
-
-test "implicit comptime switch" {
- const x = 3 + 4;
- const result = switch (x) {
- 3 => 10,
- 4 => 11,
- 5, 6 => 12,
- 7, 8 => 13,
- else => 14,
- };
-
- comptime {
- try expect(result + 1 == 14);
- }
-}
-
-test "switch on enum" {
- const fruit = Fruit.Orange;
- nonConstSwitchOnEnum(fruit);
-}
-const Fruit = enum {
- Apple,
- Orange,
- Banana,
-};
-fn nonConstSwitchOnEnum(fruit: Fruit) void {
- switch (fruit) {
- Fruit.Apple => unreachable,
- Fruit.Orange => {},
- Fruit.Banana => unreachable,
- }
-}
-
-test "switch statement" {
- try nonConstSwitch(SwitchStatementFoo.C);
-}
-fn nonConstSwitch(foo: SwitchStatementFoo) !void {
- const val = switch (foo) {
- SwitchStatementFoo.A => @as(i32, 1),
- SwitchStatementFoo.B => 2,
- SwitchStatementFoo.C => 3,
- SwitchStatementFoo.D => 4,
- };
- try expect(val == 3);
-}
-const SwitchStatementFoo = enum {
- A,
- B,
- C,
- D,
-};
-
-test "switch prong with variable" {
- try switchProngWithVarFn(SwitchProngWithVarEnum{ .One = 13 });
- try switchProngWithVarFn(SwitchProngWithVarEnum{ .Two = 13.0 });
- try switchProngWithVarFn(SwitchProngWithVarEnum{ .Meh = {} });
-}
-const SwitchProngWithVarEnum = union(enum) {
- One: i32,
- Two: f32,
- Meh: void,
-};
-fn switchProngWithVarFn(a: SwitchProngWithVarEnum) !void {
- switch (a) {
- SwitchProngWithVarEnum.One => |x| {
- try expect(x == 13);
- },
- SwitchProngWithVarEnum.Two => |x| {
- try expect(x == 13.0);
- },
- SwitchProngWithVarEnum.Meh => |x| {
- const v: void = x;
- _ = v;
- },
- }
-}
-
-test "switch on enum using pointer capture" {
- try testSwitchEnumPtrCapture();
- comptime try testSwitchEnumPtrCapture();
-}
-
-fn testSwitchEnumPtrCapture() !void {
- var value = SwitchProngWithVarEnum{ .One = 1234 };
- switch (value) {
- SwitchProngWithVarEnum.One => |*x| x.* += 1,
- else => unreachable,
- }
- switch (value) {
- SwitchProngWithVarEnum.One => |x| try expect(x == 1235),
- else => unreachable,
- }
-}
-
-test "switch with multiple expressions" {
- const x = switch (returnsFive()) {
- 1, 2, 3 => 1,
- 4, 5, 6 => 2,
- else => @as(i32, 3),
- };
- try expect(x == 2);
-}
-fn returnsFive() i32 {
- return 5;
-}
-
-const Number = union(enum) {
- One: u64,
- Two: u8,
- Three: f32,
-};
-
-const number = Number{ .Three = 1.23 };
-
-fn returnsFalse() bool {
- switch (number) {
- Number.One => |x| return x > 1234,
- Number.Two => |x| return x == 'a',
- Number.Three => |x| return x > 12.34,
- }
-}
-test "switch on const enum with var" {
- try expect(!returnsFalse());
-}
-
-test "switch on type" {
- try expect(trueIfBoolFalseOtherwise(bool));
- try expect(!trueIfBoolFalseOtherwise(i32));
-}
-
-fn trueIfBoolFalseOtherwise(comptime T: type) bool {
- return switch (T) {
- bool => true,
- else => false,
- };
-}
-
-test "switch handles all cases of number" {
- try testSwitchHandleAllCases();
- comptime try testSwitchHandleAllCases();
-}
-
-fn testSwitchHandleAllCases() !void {
- try expect(testSwitchHandleAllCasesExhaustive(0) == 3);
- try expect(testSwitchHandleAllCasesExhaustive(1) == 2);
- try expect(testSwitchHandleAllCasesExhaustive(2) == 1);
- try expect(testSwitchHandleAllCasesExhaustive(3) == 0);
-
- try expect(testSwitchHandleAllCasesRange(100) == 0);
- try expect(testSwitchHandleAllCasesRange(200) == 1);
- try expect(testSwitchHandleAllCasesRange(201) == 2);
- try expect(testSwitchHandleAllCasesRange(202) == 4);
- try expect(testSwitchHandleAllCasesRange(230) == 3);
-}
-
-fn testSwitchHandleAllCasesExhaustive(x: u2) u2 {
- return switch (x) {
- 0 => @as(u2, 3),
- 1 => 2,
- 2 => 1,
- 3 => 0,
- };
-}
-
-fn testSwitchHandleAllCasesRange(x: u8) u8 {
- return switch (x) {
- 0...100 => @as(u8, 0),
- 101...200 => 1,
- 201, 203 => 2,
- 202 => 4,
- 204...255 => 3,
- };
-}
-
test "switch all prongs unreachable" {
try testAllProngsUnreachable();
comptime try testAllProngsUnreachable();
@@ -237,57 +35,6 @@ test "capture value of switch with all unreachable prongs" {
try expect(x == 1);
}
-test "switching on booleans" {
- try testSwitchOnBools();
- comptime try testSwitchOnBools();
-}
-
-fn testSwitchOnBools() !void {
- try expect(testSwitchOnBoolsTrueAndFalse(true) == false);
- try expect(testSwitchOnBoolsTrueAndFalse(false) == true);
-
- try expect(testSwitchOnBoolsTrueWithElse(true) == false);
- try expect(testSwitchOnBoolsTrueWithElse(false) == true);
-
- try expect(testSwitchOnBoolsFalseWithElse(true) == false);
- try expect(testSwitchOnBoolsFalseWithElse(false) == true);
-}
-
-fn testSwitchOnBoolsTrueAndFalse(x: bool) bool {
- return switch (x) {
- true => false,
- false => true,
- };
-}
-
-fn testSwitchOnBoolsTrueWithElse(x: bool) bool {
- return switch (x) {
- true => false,
- else => true,
- };
-}
-
-fn testSwitchOnBoolsFalseWithElse(x: bool) bool {
- return switch (x) {
- false => true,
- else => false,
- };
-}
-
-test "u0" {
- var val: u0 = 0;
- switch (val) {
- 0 => try expect(val == 0),
- }
-}
-
-test "undefined.u0" {
- var val: u0 = undefined;
- switch (val) {
- 0 => try expect(val == 0),
- }
-}
-
test "anon enum literal used in switch on union enum" {
const Foo = union(enum) {
a: i32,
@@ -435,54 +182,6 @@ test "switch prongs with cases with identical payload types" {
comptime try S.doTheTest();
}
-test "switch with disjoint range" {
- var q: u8 = 0;
- switch (q) {
- 0...125 => {},
- 127...255 => {},
- 126...126 => {},
- }
-}
-
-test "switch variable for range and multiple prongs" {
- const S = struct {
- fn doTheTest() !void {
- var u: u8 = 16;
- try doTheSwitch(u);
- comptime try doTheSwitch(u);
- var v: u8 = 42;
- try doTheSwitch(v);
- comptime try doTheSwitch(v);
- }
- fn doTheSwitch(q: u8) !void {
- switch (q) {
- 0...40 => |x| try expect(x == 16),
- 41, 42, 43 => |x| try expect(x == 42),
- else => try expect(false),
- }
- }
- };
- _ = S;
-}
-
-var state: u32 = 0;
-fn poll() void {
- switch (state) {
- 0 => {
- state = 1;
- },
- else => {
- state += 1;
- },
- }
-}
-
-test "switch on global mutable var isn't constant-folded" {
- while (state < 2) {
- poll();
- }
-}
-
test "switch on pointer type" {
const S = struct {
const X = struct {
@@ -527,7 +226,7 @@ test "switch on error set with single else" {
comptime try S.doTheTest();
}
-test "while copies its payload" {
+test "switch capture copies its payload" {
const S = struct {
fn doTheTest() !void {
var tmp: union(enum) {
diff --git a/test/behavior/union.zig b/test/behavior/union.zig
index e296f6bbb8..b528dc8730 100644
--- a/test/behavior/union.zig
+++ b/test/behavior/union.zig
@@ -71,3 +71,34 @@ test "0-sized extern union definition" {
try expect(U.f == 1);
}
+
+const Value = union(enum) {
+ Int: u64,
+ Array: [9]u8,
+};
+
+const Agg = struct {
+ val1: Value,
+ val2: Value,
+};
+
+const v1 = Value{ .Int = 1234 };
+const v2 = Value{ .Array = [_]u8{3} ** 9 };
+
+const err = @as(anyerror!Agg, Agg{
+ .val1 = v1,
+ .val2 = v2,
+});
+
+const array = [_]Value{ v1, v2, v1, v2 };
+
+test "unions embedded in aggregate types" {
+ switch (array[1]) {
+ Value.Array => |arr| try expect(arr[4] == 3),
+ else => unreachable,
+ }
+ switch ((err catch unreachable).val1) {
+ Value.Int => |x| try expect(x == 1234),
+ else => unreachable,
+ }
+}
diff --git a/test/behavior/union_stage1.zig b/test/behavior/union_stage1.zig
index 6a68737ecf..7a2aa96e75 100644
--- a/test/behavior/union_stage1.zig
+++ b/test/behavior/union_stage1.zig
@@ -3,37 +3,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const Tag = std.meta.Tag;
-const Value = union(enum) {
- Int: u64,
- Array: [9]u8,
-};
-
-const Agg = struct {
- val1: Value,
- val2: Value,
-};
-
-const v1 = Value{ .Int = 1234 };
-const v2 = Value{ .Array = [_]u8{3} ** 9 };
-
-const err = @as(anyerror!Agg, Agg{
- .val1 = v1,
- .val2 = v2,
-});
-
-const array = [_]Value{ v1, v2, v1, v2 };
-
-test "unions embedded in aggregate types" {
- switch (array[1]) {
- Value.Array => |arr| try expect(arr[4] == 3),
- else => unreachable,
- }
- switch ((err catch unreachable).val1) {
- Value.Int => |x| try expect(x == 1234),
- else => unreachable,
- }
-}
-
const Letter = enum { A, B, C };
const Payload = union(Letter) {
A: i32,
diff --git a/test/compare_output.zig b/test/compare_output.zig
index 35400e2e2a..68d8f2a807 100644
--- a/test/compare_output.zig
+++ b/test/compare_output.zig
@@ -435,8 +435,8 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\pub const log_level: std.log.Level = .debug;
\\
\\pub const scope_levels = [_]std.log.ScopeLevel{
- \\ .{ .scope = .a, .level = .alert },
- \\ .{ .scope = .c, .level = .emerg },
+ \\ .{ .scope = .a, .level = .warn },
+ \\ .{ .scope = .c, .level = .err },
\\};
\\
\\const loga = std.log.scoped(.a);
@@ -452,10 +452,6 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ logb.info("", .{});
\\ logc.info("", .{});
\\
- \\ loga.notice("", .{});
- \\ logb.notice("", .{});
- \\ logc.notice("", .{});
- \\
\\ loga.warn("", .{});
\\ logb.warn("", .{});
\\ logc.warn("", .{});
@@ -463,18 +459,6 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ loga.err("", .{});
\\ logb.err("", .{});
\\ logc.err("", .{});
- \\
- \\ loga.crit("", .{});
- \\ logb.crit("", .{});
- \\ logc.crit("", .{});
- \\
- \\ loga.alert("", .{});
- \\ logb.alert("", .{});
- \\ logc.alert("", .{});
- \\
- \\ loga.emerg("", .{});
- \\ logb.emerg("", .{});
- \\ logc.emerg("", .{});
\\}
\\pub fn log(
\\ comptime level: std.log.Level,
@@ -483,22 +467,18 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ args: anytype,
\\) void {
\\ const level_txt = comptime level.asText();
- \\ const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
+ \\ const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "):";
\\ const stdout = std.io.getStdOut().writer();
\\ nosuspend stdout.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
\\}
,
- \\debug(b):
- \\info(b):
- \\notice(b):
- \\warning(b):
- \\error(b):
- \\critical(b):
- \\alert(a):
- \\alert(b):
- \\emergency(a):
- \\emergency(b):
- \\emergency(c):
+ \\debug(b):
+ \\info(b):
+ \\warning(a):
+ \\warning(b):
+ \\error(a):
+ \\error(b):
+ \\error(c):
\\
);
@@ -534,7 +514,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
,
\\debug: alloc - success - len: 10, ptr_align: 1, len_align: 0
\\debug: shrink - success - 10 to 5, len_align: 0, buf_align: 1
- \\critical: expand - failure: OutOfMemory - 5 to 20, len_align: 0, buf_align: 1
+ \\error: expand - failure: OutOfMemory - 5 to 20, len_align: 0, buf_align: 1
\\debug: free - success - len: 5
\\
);
diff --git a/test/run_translated_c.zig b/test/run_translated_c.zig
index c222a00eb7..91e6cc9cfd 100644
--- a/test/run_translated_c.zig
+++ b/test/run_translated_c.zig
@@ -1767,4 +1767,21 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\ return 0;
\\}
, "");
+
+ cases.add("Ensure while loop under an if doesn't steal the else. Issue #9953",
+ \\#include <stdio.h>
+ \\void doWork(int id) { }
+ \\int reallyDelete(int id) { printf("deleted %d\n", id); return 1; }
+ \\int process(int id, int n, int delete) {
+ \\ if(!delete)
+ \\ while(n-- > 0) doWork(id);
+ \\ else
+ \\ return reallyDelete(id);
+ \\ return 0;
+ \\}
+ \\int main(void) {
+ \\ process(99, 3, 0);
+ \\ return 0;
+ \\}
+ , "");
}
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index a6e81a5f5c..cfb9831e40 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -501,6 +501,19 @@ pub fn addCases(ctx: *TestContext) !void {
\\ return 69 - i;
\\}
, "");
+ case.addCompareOutput(
+ \\const E = error{e};
+ \\const S = struct { x: u32 };
+ \\fn f() E!u32 {
+ \\ const x = (try @as(E!S, S{ .x = 1 })).x;
+ \\ return x;
+ \\}
+ \\pub export fn main() c_int {
+ \\ const x = f() catch @as(u32, 0);
+ \\ if (x != 1) unreachable;
+ \\ return 0;
+ \\}
+ , "");
}
{
@@ -839,7 +852,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = E.d;
\\}
, &.{
- ":3:10: error: enum 'tmp.E' has no member named 'd'",
+ ":3:11: error: enum 'tmp.E' has no member named 'd'",
":1:11: note: enum declared here",
});
diff --git a/test/standalone/link_common_symbols/b.c b/test/standalone/link_common_symbols/b.c
index d3789c0fdf..18e8a8c23b 100644
--- a/test/standalone/link_common_symbols/b.c
+++ b/test/standalone/link_common_symbols/b.c
@@ -1,5 +1,6 @@
long i;
int j = 2;
+int k;
void incr_i() {
i++;
diff --git a/test/standalone/link_common_symbols/build.zig b/test/standalone/link_common_symbols/build.zig
index 43bb41fe32..2f9f892e86 100644
--- a/test/standalone/link_common_symbols/build.zig
+++ b/test/standalone/link_common_symbols/build.zig
@@ -4,7 +4,7 @@ pub fn build(b: *Builder) void {
const mode = b.standardReleaseOptions();
const lib_a = b.addStaticLibrary("a", null);
- lib_a.addCSourceFiles(&.{ "a.c", "b.c" }, &.{"-fcommon"});
+ lib_a.addCSourceFiles(&.{ "c.c", "a.c", "b.c" }, &.{"-fcommon"});
lib_a.setBuildMode(mode);
const test_exe = b.addTest("main.zig");
diff --git a/test/standalone/link_common_symbols/c.c b/test/standalone/link_common_symbols/c.c
new file mode 100644
index 0000000000..fdf60b9ca8
--- /dev/null
+++ b/test/standalone/link_common_symbols/c.c
@@ -0,0 +1,5 @@
+extern int k;
+
+int common_defined_externally() {
+ return k;
+}
diff --git a/test/standalone/link_common_symbols/main.zig b/test/standalone/link_common_symbols/main.zig
index 9d00d0d4fb..255b5aa621 100644
--- a/test/standalone/link_common_symbols/main.zig
+++ b/test/standalone/link_common_symbols/main.zig
@@ -1,9 +1,14 @@
const std = @import("std");
const expect = std.testing.expect;
+extern fn common_defined_externally() c_int;
extern fn incr_i() void;
extern fn add_to_i_and_j(x: c_int) c_int;
+test "undef shadows common symbol: issue #9937" {
+ try expect(common_defined_externally() == 0);
+}
+
test "import C common symbols" {
incr_i();
const res = add_to_i_and_j(2);
diff --git a/test/translate_c.zig b/test/translate_c.zig
index eb38e4e011..b63bbfc57d 100644
--- a/test/translate_c.zig
+++ b/test/translate_c.zig
@@ -849,6 +849,16 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\pub extern fn foo() noreturn;
});
+ cases.add("always_inline attribute",
+ \\__attribute__((always_inline)) int foo() {
+ \\ return 5;
+ \\}
+ , &[_][]const u8{
+ \\pub inline fn foo() c_int {
+ \\ return 5;
+ \\}
+ });
+
cases.add("add, sub, mul, div, rem",
\\int s() {
\\ int a, b, c;