aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/std/fs.zig2
-rw-r--r--lib/zig.h178
-rw-r--r--src/AstGen.zig48
-rw-r--r--src/Module.zig2
-rw-r--r--src/Sema.zig133
-rw-r--r--src/Zir.zig8
-rw-r--r--src/arch/aarch64/CodeGen.zig2
-rw-r--r--src/arch/arm/CodeGen.zig2
-rw-r--r--src/arch/sparc64/CodeGen.zig2
-rw-r--r--src/arch/wasm/CodeGen.zig10
-rw-r--r--src/arch/x86_64/CodeGen.zig2
-rw-r--r--src/codegen.zig14
-rw-r--r--src/codegen/c.zig36
-rw-r--r--src/codegen/llvm.zig2
-rw-r--r--src/codegen/spirv.zig4
-rw-r--r--src/link/Dwarf.zig2
-rw-r--r--src/print_air.zig25
-rw-r--r--src/print_zir.zig2
-rw-r--r--src/type.zig54
-rw-r--r--src/value.zig23
-rw-r--r--test/behavior.zig1
-rw-r--r--test/behavior/bugs/12498.zig8
-rw-r--r--test/behavior/struct.zig12
-rw-r--r--test/cases/aarch64-macos/hello_world_with_updates.1.zig4
-rw-r--r--test/cases/compile_errors/calling_var_args_extern_function_passing_array_instead_of_pointer.zig1
-rw-r--r--test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig1
-rw-r--r--test/cases/compile_errors/closure_get_in_param_ty_instantiate_incorrectly.zig1
-rw-r--r--test/cases/compile_errors/control_reaches_end_of_non-void_function.zig9
-rw-r--r--test/cases/compile_errors/disallow_coercion_from_non-null-terminated_pointer_to_null-terminated_pointer.zig1
-rw-r--r--test/cases/compile_errors/double_pointer_to_anyopaque_pointer.zig1
-rw-r--r--test/cases/compile_errors/implicitly_increasing_pointer_alignment.zig1
-rw-r--r--test/cases/compile_errors/invalid_compare_string.zig29
-rw-r--r--test/cases/compile_errors/invalid_dependency_on_struct_size.zig19
-rw-r--r--test/cases/compile_errors/missing_parameter_name.zig19
-rw-r--r--test/cases/compile_errors/pass_const_ptr_to_mutable_ptr_fn.zig1
-rw-r--r--test/cases/compile_errors/struct_init_passed_to_type_param.zig1
-rw-r--r--test/cases/compile_errors/struct_type_mismatch_in_arg.zig18
-rw-r--r--test/cases/compile_errors/switch_on_slice.zig2
-rw-r--r--test/cases/compile_errors/type_error_in_implicit_return.zig17
-rw-r--r--test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig1
-rw-r--r--test/cases/x86_64-linux/hello_world_with_updates.1.zig6
-rw-r--r--test/cases/x86_64-macos/hello_world_with_updates.1.zig4
-rw-r--r--test/cases/x86_64-windows/hello_world_with_updates.1.zig4
-rw-r--r--test/stage2/cbe.zig2
44 files changed, 518 insertions, 196 deletions
diff --git a/lib/std/fs.zig b/lib/std/fs.zig
index 3ef8e5319c..8ae21259ae 100644
--- a/lib/std/fs.zig
+++ b/lib/std/fs.zig
@@ -809,8 +809,6 @@ pub const IterableDir = struct {
// and we avoid the code complexity here.
const w = os.wasi;
start_over: while (true) {
- // TODO https://github.com/ziglang/zig/issues/12498
- _ = @sizeOf(w.dirent_t) + 1;
// According to the WASI spec, the last entry might be truncated,
// so we need to check if the left buffer contains the whole dirent.
if (self.end_index - self.index < @sizeOf(w.dirent_t)) {
diff --git a/lib/zig.h b/lib/zig.h
index acb2255e74..5a57bbda04 100644
--- a/lib/zig.h
+++ b/lib/zig.h
@@ -6,6 +6,16 @@
#include <stddef.h>
#include <stdint.h>
+#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L
+#if __STDC_VERSION__ >= 199901L
+#include <stdbool.h>
+#else
+typedef char bool;
+#define false 0
+#define true 1
+#endif
+#endif
+
#if defined(__has_builtin)
#define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin)
#else
@@ -136,8 +146,8 @@
#define memory_order_acq_rel __ATOMIC_ACQ_REL
#define memory_order_seq_cst __ATOMIC_SEQ_CST
#define zig_atomic(type) type
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_false, succ, fail)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_true , succ, fail)
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order)
@@ -189,20 +199,6 @@
#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
-#if defined(__cplusplus)
-typedef bool zig_bool;
-#define zig_false false
-#define zig_true true
-#else
-#if __STDC_VERSION__ >= 199901L
-typedef _Bool zig_bool;
-#else
-typedef char zig_bool;
-#endif
-#define zig_false ((zig_bool)0)
-#define zig_true ((zig_bool)1)
-#endif
-
typedef uintptr_t zig_usize;
typedef intptr_t zig_isize;
typedef signed short int zig_c_short;
@@ -330,10 +326,10 @@ zig_int_helpers(16)
zig_int_helpers(32)
zig_int_helpers(64)
-static inline zig_bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u32 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
#else
@@ -349,14 +345,14 @@ static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
}
zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_i32 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u32 full_res = __addosi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
@@ -368,10 +364,10 @@ static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u64 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
#else
@@ -387,14 +383,14 @@ static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
}
zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_i64 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u64 full_res = __addodi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
@@ -406,10 +402,10 @@ static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u8 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
#else
@@ -423,10 +419,10 @@ static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_i8 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
#else
@@ -440,10 +436,10 @@ static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u16 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
#else
@@ -457,10 +453,10 @@ static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_i16 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
#else
@@ -474,10 +470,10 @@ static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_u32 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
#else
@@ -493,14 +489,14 @@ static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
}
zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_i32 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u32 full_res = __subosi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
@@ -512,10 +508,10 @@ static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_u64 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
#else
@@ -531,14 +527,14 @@ static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
}
zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_i64 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u64 full_res = __subodi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
@@ -550,10 +546,10 @@ static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_u8 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
#else
@@ -567,10 +563,10 @@ static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_i8 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
#else
@@ -585,10 +581,10 @@ static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
}
-static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_u16 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
#else
@@ -603,10 +599,10 @@ static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
}
-static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_i16 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
#else
@@ -620,10 +616,10 @@ static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_u32 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u32(full_res, bits);
return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
#else
@@ -639,14 +635,14 @@ static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
}
zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
-static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_i32 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u32 full_res = __mulosi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i32(full_res, bits);
return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
@@ -658,10 +654,10 @@ static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_u64 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u64(full_res, bits);
return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
#else
@@ -677,14 +673,14 @@ static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
}
zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
-static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_i64 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_u64 full_res = __mulodi4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i64(full_res, bits);
return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
@@ -696,10 +692,10 @@ static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_u8 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u8(full_res, bits);
return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
#else
@@ -713,10 +709,10 @@ static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_i8 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i8(full_res, bits);
return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
#else
@@ -730,10 +726,10 @@ static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_u16 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u16(full_res, bits);
return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
#else
@@ -747,10 +743,10 @@ static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
}
-static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_i16 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_i16(full_res, bits);
return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
#else
@@ -797,12 +793,12 @@ static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
} \
\
- static inline zig_bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
*res = zig_shlw_u##w(lhs, rhs, bits); \
return (lhs & zig_maxInt_u##w << (bits - rhs)) != zig_as_u##w(0); \
} \
\
- static inline zig_bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
*res = zig_shlw_i##w(lhs, rhs, bits); \
zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
@@ -1327,22 +1323,22 @@ static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_int128
-static inline zig_bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_u128(lhs, rhs, bits);
return zig_and_u128(lhs, zig_shl_u128(zig_maxInt_u128, bits - rhs)) != zig_as_u128(0, 0);
}
-static inline zig_bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
*res = zig_shlw_i128(lhs, rhs, bits);
zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) &&
zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0);
}
-static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_u128 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
#else
@@ -1352,23 +1348,23 @@ static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs,
}
zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline zig_bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_builtin(add_overflow)
zig_i128 full_res;
- zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
}
-static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_u128 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
#else
@@ -1378,23 +1374,23 @@ static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs,
}
zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline zig_bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_builtin(sub_overflow)
zig_i128 full_res;
- zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
}
-static inline zig_bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_u128 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
*res = zig_wrap_u128(full_res, bits);
return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
#else
@@ -1404,14 +1400,14 @@ static inline zig_bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs,
}
zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
-static inline zig_bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
#if zig_has_builtin(mul_overflow)
zig_i128 full_res;
- zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
#else
zig_c_int overflow_int;
zig_i128 full_res = __muloti4(lhs, rhs, &overflow);
- zig_bool overflow = overflow_int != 0;
+ bool overflow = overflow_int != 0;
#endif
*res = zig_wrap_i128(full_res, bits);
return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
@@ -1419,12 +1415,12 @@ static inline zig_bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs,
#else /* zig_has_int128 */
-static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
+static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
}
-static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
+static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
}
diff --git a/src/AstGen.zig b/src/AstGen.zig
index 009d05e5ed..4e571ffda9 100644
--- a/src/AstGen.zig
+++ b/src/AstGen.zig
@@ -2632,7 +2632,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.compile_error,
.ret_node,
.ret_load,
- .ret_tok,
+ .ret_implicit,
.ret_err_value,
.@"unreachable",
.repeat,
@@ -3696,6 +3696,29 @@ fn fnDecl(
if (param.anytype_ellipsis3) |tok| {
return astgen.failTok(tok, "missing parameter name", .{});
} else {
+ ambiguous: {
+ if (tree.nodes.items(.tag)[param.type_expr] != .identifier) break :ambiguous;
+ const main_token = tree.nodes.items(.main_token)[param.type_expr];
+ const identifier_str = tree.tokenSlice(main_token);
+ if (isPrimitive(identifier_str)) break :ambiguous;
+ return astgen.failNodeNotes(
+ param.type_expr,
+ "missing parameter name or type",
+ .{},
+ &[_]u32{
+ try astgen.errNoteNode(
+ param.type_expr,
+ "if this is a name, annotate its type '{s}: T'",
+ .{identifier_str},
+ ),
+ try astgen.errNoteNode(
+ param.type_expr,
+ "if this is a type, give it a name '<name>: {s}'",
+ .{identifier_str},
+ ),
+ },
+ );
+ }
return astgen.failNode(param.type_expr, "missing parameter name", .{});
}
} else 0;
@@ -3891,9 +3914,8 @@ fn fnDecl(
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
- // Since we are adding the return instruction here, we must handle the coercion.
- // We do this by using the `ret_tok` instruction.
- _ = try fn_gz.addUnTok(.ret_tok, .void_value, tree.lastToken(body_node));
+ // Add implicit return at end of function.
+ _ = try fn_gz.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
}
break :func try decl_gz.addFunc(.{
@@ -4311,9 +4333,8 @@ fn testDecl(
// As our last action before the return, "pop" the error trace if needed
_ = try gz.addRestoreErrRetIndex(.ret, .always);
- // Since we are adding the return instruction here, we must handle the coercion.
- // We do this by using the `ret_tok` instruction.
- _ = try fn_block.addUnTok(.ret_tok, .void_value, tree.lastToken(body_node));
+ // Add implicit return at end of function.
+ _ = try fn_block.addUnTok(.ret_implicit, .void_value, tree.lastToken(body_node));
}
const func_inst = try decl_block.addFunc(.{
@@ -5605,6 +5626,14 @@ fn simpleBinOp(
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
+ if (op_inst_tag == .cmp_neq or op_inst_tag == .cmp_eq) {
+ const node_tags = tree.nodes.items(.tag);
+ const str = if (op_inst_tag == .cmp_eq) "==" else "!=";
+ if (node_tags[node_datas[node].lhs] == .string_literal or
+ node_tags[node_datas[node].rhs] == .string_literal)
+ return astgen.failNode(node, "cannot compare strings with {s}", .{str});
+ }
+
const lhs = try reachableExpr(gz, scope, .{ .rl = .none }, node_datas[node].lhs, node);
var line: u32 = undefined;
var column: u32 = undefined;
@@ -6602,6 +6631,11 @@ fn switchExpr(
continue;
}
+ for (case.ast.values) |val| {
+ if (node_tags[val] == .string_literal)
+ return astgen.failNode(val, "cannot switch on strings", .{});
+ }
+
if (case.ast.values.len == 1 and node_tags[case.ast.values[0]] != .switch_range) {
scalar_cases_len += 1;
} else {
diff --git a/src/Module.zig b/src/Module.zig
index a18297c8ff..68d0ac8af5 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -940,6 +940,7 @@ pub const Struct = struct {
requires_comptime: PropertyBoolean = .unknown,
have_field_inits: bool = false,
is_tuple: bool,
+ assumed_runtime_bits: bool = false,
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
@@ -1205,6 +1206,7 @@ pub const Union = struct {
fully_resolved,
},
requires_comptime: PropertyBoolean = .unknown,
+ assumed_runtime_bits: bool = false,
pub const Field = struct {
/// undefined until `status` is `have_field_types` or `have_layout`.
diff --git a/src/Sema.zig b/src/Sema.zig
index c384605e1b..09fd184320 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -291,8 +291,8 @@ pub const Block = struct {
try sema.errNote(ci.block, ci.src, parent, prefix ++ "it is inside a @cImport", .{});
},
.comptime_ret_ty => |rt| {
- const src_loc = if (try sema.funcDeclSrc(rt.func)) |capture| blk: {
- var src_loc = capture;
+ const src_loc = if (try sema.funcDeclSrc(rt.func)) |fn_decl| blk: {
+ var src_loc = fn_decl.srcLoc();
src_loc.lazy = .{ .node_offset_fn_type_ret_ty = 0 };
break :blk src_loc;
} else blk: {
@@ -1098,7 +1098,7 @@ fn analyzeBodyInner(
// These functions match the return type of analyzeBody so that we can
// tail call them here.
.compile_error => break sema.zirCompileError(block, inst),
- .ret_tok => break sema.zirRetTok(block, inst),
+ .ret_implicit => break sema.zirRetImplicit(block, inst),
.ret_node => break sema.zirRetNode(block, inst),
.ret_load => break sema.zirRetLoad(block, inst),
.ret_err_value => break sema.zirRetErrValue(block, inst),
@@ -5843,7 +5843,7 @@ fn lookupInNamespace(
return null;
}
-fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?Module.SrcLoc {
+fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?*Decl {
const func_val = (try sema.resolveMaybeUndefVal(func_inst)) orelse return null;
if (func_val.isUndef()) return null;
const owner_decl_index = switch (func_val.tag()) {
@@ -5852,8 +5852,7 @@ fn funcDeclSrc(sema: *Sema, func_inst: Air.Inst.Ref) !?Module.SrcLoc {
.decl_ref => sema.mod.declPtr(func_val.castTag(.decl_ref).?.data).val.castTag(.function).?.data.owner_decl,
else => return null,
};
- const owner_decl = sema.mod.declPtr(owner_decl_index);
- return owner_decl.srcLoc();
+ return sema.mod.declPtr(owner_decl_index);
}
pub fn analyzeSaveErrRetIndex(sema: *Sema, block: *Block) SemaError!Air.Inst.Ref {
@@ -6031,7 +6030,7 @@ fn zirCall(
break :check_args;
}
- const decl_src = try sema.funcDeclSrc(func);
+ const maybe_decl = try sema.funcDeclSrc(func);
const member_str = if (bound_arg_src != null) "member function " else "";
const variadic_str = if (func_ty_info.is_var_args) "at least " else "";
const msg = msg: {
@@ -6048,7 +6047,7 @@ fn zirCall(
);
errdefer msg.destroy(sema.gpa);
- if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -6242,7 +6241,7 @@ fn analyzeCall(
const func_ty_info = func_ty.fnInfo();
const cc = func_ty_info.cc;
if (cc == .Naked) {
- const decl_src = try sema.funcDeclSrc(func);
+ const maybe_decl = try sema.funcDeclSrc(func);
const msg = msg: {
const msg = try sema.errMsg(
block,
@@ -6252,7 +6251,7 @@ fn analyzeCall(
);
errdefer msg.destroy(sema.gpa);
- if (decl_src) |some| try sema.mod.errNoteNonLazy(some, msg, "function declared here", .{});
+ if (maybe_decl) |fn_decl| try sema.mod.errNoteNonLazy(fn_decl.srcLoc(), msg, "function declared here", .{});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
@@ -6488,6 +6487,7 @@ fn analyzeCall(
&should_memoize,
memoized_call_key,
func_ty_info.param_types,
+ func,
) catch |err| switch (err) {
error.NeededSourceLocation => {
_ = sema.inst_map.remove(inst);
@@ -6504,6 +6504,7 @@ fn analyzeCall(
&should_memoize,
memoized_call_key,
func_ty_info.param_types,
+ func,
);
return error.AnalysisFail;
},
@@ -6646,12 +6647,17 @@ fn analyzeCall(
const args = try sema.arena.alloc(Air.Inst.Ref, uncasted_args.len);
for (uncasted_args) |uncasted_arg, i| {
if (i < fn_params_len) {
+ const opts: CoerceOpts = .{ .param_src = .{
+ .func_inst = func,
+ .param_i = @intCast(u32, i),
+ } };
const param_ty = func_ty.fnParamType(i);
args[i] = sema.analyzeCallArg(
block,
.unneeded,
param_ty,
uncasted_arg,
+ opts,
) catch |err| switch (err) {
error.NeededSourceLocation => {
const decl = sema.mod.declPtr(block.src_decl);
@@ -6660,6 +6666,7 @@ fn analyzeCall(
Module.argSrc(call_src.node_offset.x, sema.gpa, decl, i, bound_arg_src),
param_ty,
uncasted_arg,
+ opts,
);
return error.AnalysisFail;
},
@@ -6741,6 +6748,7 @@ fn analyzeInlineCallArg(
should_memoize: *bool,
memoized_call_key: Module.MemoizedCall.Key,
raw_param_types: []const Type,
+ func_inst: Air.Inst.Ref,
) !void {
const zir_tags = sema.code.instructions.items(.tag);
switch (zir_tags[inst]) {
@@ -6765,7 +6773,13 @@ fn analyzeInlineCallArg(
return err;
};
}
- const casted_arg = try sema.coerce(arg_block, param_ty, uncasted_arg, arg_src);
+ const casted_arg = sema.coerceExtra(arg_block, param_ty, uncasted_arg, arg_src, .{ .param_src = .{
+ .func_inst = func_inst,
+ .param_i = @intCast(u32, arg_i.*),
+ } }) catch |err| switch (err) {
+ error.NotCoercible => unreachable,
+ else => |e| return e,
+ };
if (is_comptime_call) {
sema.inst_map.putAssumeCapacityNoClobber(inst, casted_arg);
@@ -6855,9 +6869,13 @@ fn analyzeCallArg(
arg_src: LazySrcLoc,
param_ty: Type,
uncasted_arg: Air.Inst.Ref,
+ opts: CoerceOpts,
) !Air.Inst.Ref {
try sema.resolveTypeFully(param_ty);
- return sema.coerce(block, param_ty, uncasted_arg, arg_src);
+ return sema.coerceExtra(block, param_ty, uncasted_arg, arg_src, opts) catch |err| switch (err) {
+ error.NotCoercible => unreachable,
+ else => |e| return e,
+ };
}
fn analyzeGenericCallArg(
@@ -16546,7 +16564,7 @@ fn zirRetErrValue(
return sema.analyzeRet(block, result_inst, src);
}
-fn zirRetTok(
+fn zirRetImplicit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
@@ -16556,9 +16574,33 @@ fn zirRetTok(
const inst_data = sema.code.instructions.items(.data)[inst].un_tok;
const operand = try sema.resolveInst(inst_data.operand);
- const src = inst_data.src();
- return sema.analyzeRet(block, operand, src);
+ const r_brace_src = inst_data.src();
+ const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
+ const base_tag = sema.fn_ret_ty.baseZigTypeTag();
+ if (base_tag == .NoReturn) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, ret_ty_src, "function declared '{}' implicitly returns", .{
+ sema.fn_ret_ty.fmt(sema.mod),
+ });
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ } else if (base_tag != .Void) {
+ const msg = msg: {
+ const msg = try sema.errMsg(block, ret_ty_src, "function with non-void return type '{}' implicitly returns", .{
+ sema.fn_ret_ty.fmt(sema.mod),
+ });
+ errdefer msg.destroy(sema.gpa);
+ try sema.errNote(block, r_brace_src, msg, "control flow reaches end of body here", .{});
+ break :msg msg;
+ };
+ return sema.failWithOwnedErrorMsg(msg);
+ }
+
+ return sema.analyzeRet(block, operand, .unneeded);
}
fn zirRetNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index {
@@ -16825,7 +16867,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const bitoffset_src: LazySrcLoc = .{ .node_offset_ptr_bitoffset = extra.data.src_node };
const hostsize_src: LazySrcLoc = .{ .node_offset_ptr_hostsize = extra.data.src_node };
- const unresolved_elem_ty = blk: {
+ const elem_ty = blk: {
const air_inst = try sema.resolveInst(extra.data.elem_type);
const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| {
if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer()) {
@@ -16854,7 +16896,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
if (val.castTag(.lazy_align)) |payload| {
- if (payload.data.eql(unresolved_elem_ty, sema.mod)) {
+ if (payload.data.eql(elem_ty, sema.mod)) {
break :blk 0;
}
}
@@ -16887,14 +16929,6 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.fail(block, bitoffset_src, "bit offset starts after end of host integer", .{});
}
- const elem_ty = if (abi_align == 0)
- unresolved_elem_ty
- else t: {
- const elem_ty = try sema.resolveTypeFields(unresolved_elem_ty);
- try sema.resolveTypeLayout(elem_ty);
- break :t elem_ty;
- };
-
if (elem_ty.zigTypeTag() == .NoReturn) {
return sema.fail(block, elem_ty_src, "pointer to noreturn not allowed", .{});
} else if (elem_ty.zigTypeTag() == .Fn) {
@@ -20270,7 +20304,7 @@ fn analyzeShuffle(
var buf: Value.ElemValueBuffer = undefined;
const elem = mask.elemValueBuffer(sema.mod, i, &buf);
if (elem.isUndef()) continue;
- const int = elem.toSignedInt();
+ const int = elem.toSignedInt(sema.mod.getTarget());
var unsigned: u32 = undefined;
var chosen: u32 = undefined;
if (int >= 0) {
@@ -20312,7 +20346,7 @@ fn analyzeShuffle(
values[i] = Value.undef;
continue;
}
- const int = mask_elem_val.toSignedInt();
+ const int = mask_elem_val.toSignedInt(sema.mod.getTarget());
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int);
if (int >= 0) {
values[i] = try a_val.elemValue(sema.mod, sema.arena, unsigned);
@@ -24040,6 +24074,25 @@ const CoerceOpts = struct {
is_ret: bool = false,
/// Should coercion to comptime_int ermit an error message.
no_cast_to_comptime_int: bool = false,
+
+ param_src: struct {
+ func_inst: Air.Inst.Ref = .none,
+ param_i: u32 = undefined,
+
+ fn get(info: @This(), sema: *Sema) !?Module.SrcLoc {
+ if (info.func_inst == .none) return null;
+ const fn_decl = (try sema.funcDeclSrc(info.func_inst)) orelse return null;
+ const param_src = Module.paramSrc(0, sema.gpa, fn_decl, info.param_i);
+ if (param_src == .node_offset_param) {
+ return Module.SrcLoc{
+ .file_scope = fn_decl.getFileScope(),
+ .parent_decl_node = fn_decl.src_node,
+ .lazy = LazySrcLoc.nodeOffset(param_src.node_offset_param),
+ };
+ }
+ return param_src.toSrcLoc(fn_decl);
+ }
+ } = .{},
};
fn coerceExtra(
@@ -24699,6 +24752,10 @@ fn coerceExtra(
}
}
+ if (try opts.param_src.get(sema)) |param_src| {
+ try sema.mod.errNoteNonLazy(param_src, msg, "parameter type declared here", .{});
+ }
+
// TODO maybe add "cannot store an error in type '{}'" note
break :msg msg;
@@ -28307,6 +28364,7 @@ fn cmpNumeric(
var lhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(lhs)) |lhs_val| {
+ try sema.resolveLazyValue(lhs_val);
if (lhs_val.isUndef())
return sema.addConstUndef(Type.bool);
if (lhs_val.isNan()) switch (op) {
@@ -28365,6 +28423,7 @@ fn cmpNumeric(
var rhs_bits: usize = undefined;
if (try sema.resolveMaybeUndefVal(rhs)) |rhs_val| {
+ try sema.resolveLazyValue(rhs_val);
if (rhs_val.isUndef())
return sema.addConstUndef(Type.bool);
if (rhs_val.isNan()) switch (op) {
@@ -29237,6 +29296,16 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
struct_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
+
+ if (struct_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) {
+ const msg = try Module.ErrorMsg.create(
+ sema.gpa,
+ struct_obj.srcLoc(sema.mod),
+ "struct layout depends on it having runtime bits",
+ .{},
+ );
+ return sema.failWithOwnedErrorMsg(msg);
+ }
}
// otherwise it's a tuple; no need to resolve anything
}
@@ -29401,6 +29470,16 @@ fn resolveUnionLayout(sema: *Sema, ty: Type) CompileError!void {
}
union_obj.status = .have_layout;
_ = try sema.resolveTypeRequiresComptime(resolved_ty);
+
+ if (union_obj.assumed_runtime_bits and !resolved_ty.hasRuntimeBits()) {
+ const msg = try Module.ErrorMsg.create(
+ sema.gpa,
+ union_obj.srcLoc(sema.mod),
+ "union layout depends on it having runtime bits",
+ .{},
+ );
+ return sema.failWithOwnedErrorMsg(msg);
+ }
}
// In case of querying the ABI alignment of this struct, we will ask
diff --git a/src/Zir.zig b/src/Zir.zig
index c7e1d22d22..ed425ea73e 100644
--- a/src/Zir.zig
+++ b/src/Zir.zig
@@ -519,7 +519,7 @@ pub const Inst = struct {
/// Includes an operand as the return value.
/// Includes a token source location.
/// Uses the `un_tok` union field.
- ret_tok,
+ ret_implicit,
/// Sends control flow back to the function's callee.
/// The return operand is `error.foo` where `foo` is given by the string.
/// If the current function has an inferred error set, the error given by the
@@ -1256,7 +1256,7 @@ pub const Inst = struct {
.compile_error,
.ret_node,
.ret_load,
- .ret_tok,
+ .ret_implicit,
.ret_err_value,
.@"unreachable",
.repeat,
@@ -1530,7 +1530,7 @@ pub const Inst = struct {
.compile_error,
.ret_node,
.ret_load,
- .ret_tok,
+ .ret_implicit,
.ret_err_value,
.ret_ptr,
.ret_type,
@@ -1659,7 +1659,7 @@ pub const Inst = struct {
.ref = .un_tok,
.ret_node = .un_node,
.ret_load = .un_node,
- .ret_tok = .un_tok,
+ .ret_implicit = .un_tok,
.ret_err_value = .str_tok,
.ret_err_value_code = .str_tok,
.ret_ptr = .node,
diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig
index 9975e08ea9..4d541c4da7 100644
--- a/src/arch/aarch64/CodeGen.zig
+++ b/src/arch/aarch64/CodeGen.zig
@@ -6113,7 +6113,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
if (info.bits <= 64) {
const unsigned = switch (info.signedness) {
.signed => blk: {
- const signed = typed_value.val.toSignedInt();
+ const signed = typed_value.val.toSignedInt(target);
break :blk @bitCast(u64, signed);
},
.unsigned => typed_value.val.toUnsignedInt(target),
diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig
index 7a9e00b83f..257255e00c 100644
--- a/src/arch/arm/CodeGen.zig
+++ b/src/arch/arm/CodeGen.zig
@@ -6070,7 +6070,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
if (info.bits <= ptr_bits) {
const unsigned = switch (info.signedness) {
.signed => blk: {
- const signed = @intCast(i32, typed_value.val.toSignedInt());
+ const signed = @intCast(i32, typed_value.val.toSignedInt(target));
break :blk @bitCast(u32, signed);
},
.unsigned => @intCast(u32, typed_value.val.toUnsignedInt(target)),
diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig
index 9f345767de..cb5df697f5 100644
--- a/src/arch/sparc64/CodeGen.zig
+++ b/src/arch/sparc64/CodeGen.zig
@@ -3751,7 +3751,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (info.bits <= 64) {
const unsigned = switch (info.signedness) {
.signed => blk: {
- const signed = typed_value.val.toSignedInt();
+ const signed = typed_value.val.toSignedInt(target);
break :blk @bitCast(u64, signed);
},
.unsigned => typed_value.val.toUnsignedInt(target),
diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig
index c424c7b59d..1d24076154 100644
--- a/src/arch/wasm/CodeGen.zig
+++ b/src/arch/wasm/CodeGen.zig
@@ -2670,11 +2670,11 @@ fn lowerConstant(func: *CodeGen, arg_val: Value, ty: Type) InnerError!WValue {
switch (int_info.signedness) {
.signed => switch (int_info.bits) {
0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
- val.toSignedInt(),
+ val.toSignedInt(target),
@intCast(u6, int_info.bits),
)) },
33...64 => return WValue{ .imm64 = toTwosComplement(
- val.toSignedInt(),
+ val.toSignedInt(target),
@intCast(u7, int_info.bits),
) },
else => unreachable,
@@ -2841,15 +2841,15 @@ fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
}
},
.Int => switch (ty.intInfo(func.target).signedness) {
- .signed => return @truncate(i32, val.toSignedInt()),
+ .signed => return @truncate(i32, val.toSignedInt(target)),
.unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
},
.ErrorSet => {
const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
return @bitCast(i32, kv.value);
},
- .Bool => return @intCast(i32, val.toSignedInt()),
- .Pointer => return @intCast(i32, val.toSignedInt()),
+ .Bool => return @intCast(i32, val.toSignedInt(target)),
+ .Pointer => return @intCast(i32, val.toSignedInt(target)),
else => unreachable, // Programmer called this function for an illegal type
}
}
diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig
index ae7cbc762d..baafdc9d1f 100644
--- a/src/arch/x86_64/CodeGen.zig
+++ b/src/arch/x86_64/CodeGen.zig
@@ -6862,7 +6862,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.Int => {
const info = typed_value.ty.intInfo(self.target.*);
if (info.bits <= ptr_bits and info.signedness == .signed) {
- return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt()) };
+ return MCValue{ .immediate = @bitCast(u64, typed_value.val.toSignedInt(target)) };
}
if (!(info.bits > ptr_bits or info.signedness == .signed)) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) };
diff --git a/src/codegen.zig b/src/codegen.zig
index d6b2ed7d93..2261ba3b94 100644
--- a/src/codegen.zig
+++ b/src/codegen.zig
@@ -472,7 +472,7 @@ pub fn generateSymbol(
if (info.bits <= 8) {
const x: u8 = switch (info.signedness) {
.unsigned => @intCast(u8, typed_value.val.toUnsignedInt(target)),
- .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt())),
+ .signed => @bitCast(u8, @intCast(i8, typed_value.val.toSignedInt(target))),
};
try code.append(x);
return Result{ .appended = {} };
@@ -501,13 +501,13 @@ pub fn generateSymbol(
},
.signed => {
if (info.bits <= 16) {
- const x = @intCast(i16, typed_value.val.toSignedInt());
+ const x = @intCast(i16, typed_value.val.toSignedInt(target));
mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(i32, typed_value.val.toSignedInt());
+ const x = @intCast(i32, typed_value.val.toSignedInt(target));
mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
} else {
- const x = typed_value.val.toSignedInt();
+ const x = typed_value.val.toSignedInt(target);
mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
}
},
@@ -549,13 +549,13 @@ pub fn generateSymbol(
},
.signed => {
if (info.bits <= 16) {
- const x = @intCast(i16, int_val.toSignedInt());
+ const x = @intCast(i16, int_val.toSignedInt(target));
mem.writeInt(i16, try code.addManyAsArray(2), x, endian);
} else if (info.bits <= 32) {
- const x = @intCast(i32, int_val.toSignedInt());
+ const x = @intCast(i32, int_val.toSignedInt(target));
mem.writeInt(i32, try code.addManyAsArray(4), x, endian);
} else {
- const x = int_val.toSignedInt();
+ const x = int_val.toSignedInt(target);
mem.writeInt(i64, try code.addManyAsArray(8), x, endian);
}
},
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 59a42c721b..5b8b6433bc 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -717,10 +717,21 @@ pub const DeclGen = struct {
val = rt.data;
}
const target = dg.module.getTarget();
+
+ const safety_on = switch (dg.module.optimizeMode()) {
+ .Debug, .ReleaseSafe => true,
+ .ReleaseFast, .ReleaseSmall => false,
+ };
+
if (val.isUndefDeep()) {
switch (ty.zigTypeTag()) {
- // bool b = 0xaa; evals to true, but memcpy(&b, 0xaa, 1); evals to false.
- .Bool => return dg.renderValue(writer, ty, Value.false, location),
+ .Bool => {
+ if (safety_on) {
+ return writer.writeAll("0xaa");
+ } else {
+ return writer.writeAll("false");
+ }
+ },
.Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val)}),
.Float => {
const bits = ty.floatBits(target);
@@ -1099,7 +1110,13 @@ pub const DeclGen = struct {
},
}
},
- .Bool => return writer.print("zig_{}", .{val.toBool()}),
+ .Bool => {
+ if (val.toBool()) {
+ return writer.writeAll("true");
+ } else {
+ return writer.writeAll("false");
+ }
+ },
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&opt_buf);
@@ -1141,8 +1158,10 @@ pub const DeclGen = struct {
if (!payload_ty.hasRuntimeBits()) {
// We use the error type directly as the type.
- const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val;
- return dg.renderValue(writer, error_ty, err_val, location);
+ if (val.errorUnionIsPayload()) {
+ return try writer.writeByte('0');
+ }
+ return dg.renderValue(writer, error_ty, val, location);
}
if (location != .Initializer) {
@@ -1802,10 +1821,9 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
switch (t.zigTypeTag()) {
- .Void => {
- try w.writeAll("void");
- },
- .NoReturn, .Bool, .Float => {
+ .Void => try w.writeAll("void"),
+ .Bool => try w.writeAll("bool"),
+ .NoReturn, .Float => {
try w.writeAll("zig_");
try t.print(w, dg.module);
},
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 6215a11e6f..3fd1effc21 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -8932,7 +8932,7 @@ pub const FuncGen = struct {
if (elem.isUndef()) {
val.* = llvm_i32.getUndef();
} else {
- const int = elem.toSignedInt();
+ const int = elem.toSignedInt(self.dg.module.getTarget());
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
val.* = llvm_i32.constInt(unsigned, .False);
}
diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig
index ada3918baf..bd0c8bc53c 100644
--- a/src/codegen/spirv.zig
+++ b/src/codegen/spirv.zig
@@ -360,7 +360,7 @@ pub const DeclGen = struct {
// Note, value is required to be sign-extended, so we don't need to mask off the upper bits.
// See https://www.khronos.org/registry/SPIR-V/specs/unified1/SPIRV.html#Literal
- var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(target);
+ var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt(target)) else val.toUnsignedInt(target);
const value: spec.LiteralContextDependentNumber = switch (backing_bits) {
1...32 => .{ .uint32 = @truncate(u32, int_bits) },
@@ -763,7 +763,7 @@ pub const DeclGen = struct {
if (elem.isUndef()) {
self.func.body.writeOperand(spec.LiteralInteger, 0xFFFF_FFFF);
} else {
- const int = elem.toSignedInt();
+ const int = elem.toSignedInt(self.getTarget());
const unsigned = if (int >= 0) @intCast(u32, int) else @intCast(u32, ~int + a_len);
self.func.body.writeOperand(spec.LiteralInteger, unsigned);
}
diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig
index 63ccb57e77..54f4300098 100644
--- a/src/link/Dwarf.zig
+++ b/src/link/Dwarf.zig
@@ -410,7 +410,7 @@ pub const DeclState = struct {
// See https://github.com/ziglang/zig/issues/645
var int_buffer: Value.Payload.U64 = undefined;
const field_int_val = value.enumToInt(ty, &int_buffer);
- break :value @bitCast(u64, field_int_val.toSignedInt());
+ break :value @bitCast(u64, field_int_val.toSignedInt(target));
} else @intCast(u64, field_i);
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
diff --git a/src/print_air.zig b/src/print_air.zig
index bed6f029b1..86ba81e110 100644
--- a/src/print_air.zig
+++ b/src/print_air.zig
@@ -750,6 +750,9 @@ const Writer = struct {
fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const switch_br = w.air.extraData(Air.SwitchBr, pl_op.payload);
+ const liveness = w.liveness.getSwitchBr(w.gpa, inst, switch_br.data.cases_len + 1) catch
+ @panic("out of memory");
+ defer w.gpa.free(liveness.deaths);
var extra_index: usize = switch_br.end;
var case_i: u32 = 0;
@@ -770,6 +773,17 @@ const Writer = struct {
}
try s.writeAll("] => {\n");
w.indent += 2;
+
+ const deaths = liveness.deaths[case_i];
+ if (deaths.len != 0) {
+ try s.writeByteNTimes(' ', w.indent);
+ for (deaths) |operand, i| {
+ if (i != 0) try s.writeAll(" ");
+ try s.print("%{d}!", .{operand});
+ }
+ try s.writeAll("\n");
+ }
+
try w.writeBody(s, case_body);
w.indent -= 2;
try s.writeByteNTimes(' ', w.indent);
@@ -780,6 +794,17 @@ const Writer = struct {
if (else_body.len != 0) {
try s.writeAll(", else => {\n");
w.indent += 2;
+
+ const deaths = liveness.deaths[liveness.deaths.len - 1];
+ if (deaths.len != 0) {
+ try s.writeByteNTimes(' ', w.indent);
+ for (deaths) |operand, i| {
+ if (i != 0) try s.writeAll(" ");
+ try s.print("%{d}!", .{operand});
+ }
+ try s.writeAll("\n");
+ }
+
try w.writeBody(s, else_body);
w.indent -= 2;
try s.writeByteNTimes(' ', w.indent);
diff --git a/src/print_zir.zig b/src/print_zir.zig
index 9135c22010..542f0e977d 100644
--- a/src/print_zir.zig
+++ b/src/print_zir.zig
@@ -235,7 +235,7 @@ const Writer = struct {
=> try self.writeUnNode(stream, inst),
.ref,
- .ret_tok,
+ .ret_implicit,
.closure_capture,
.switch_capture_tag,
=> try self.writeUnTok(stream, inst),
diff --git a/src/type.zig b/src/type.zig
index 21cfdf9d73..d36bd285f8 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -160,6 +160,17 @@ pub const Type = extern union {
}
}
+ pub fn baseZigTypeTag(self: Type) std.builtin.TypeId {
+ return switch (self.zigTypeTag()) {
+ .ErrorUnion => self.errorUnionPayload().baseZigTypeTag(),
+ .Optional => {
+ var buf: Payload.ElemType = undefined;
+ return self.optionalChild(&buf).baseZigTypeTag();
+ },
+ else => |t| t,
+ };
+ }
+
pub fn isSelfComparable(ty: Type, is_equality_cmp: bool) bool {
return switch (ty.zigTypeTag()) {
.Int,
@@ -2459,6 +2470,7 @@ pub const Type = extern union {
if (struct_obj.status == .field_types_wip) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
+ struct_obj.assumed_runtime_bits = true;
return true;
}
switch (strat) {
@@ -2491,6 +2503,12 @@ pub const Type = extern union {
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
+ if (union_obj.status == .field_types_wip) {
+ // In this case, we guess that hasRuntimeBits() for this type is true,
+ // and then later if our guess was incorrect, we emit a compile error.
+ union_obj.assumed_runtime_bits = true;
+ return true;
+ }
switch (strat) {
.sema => |sema| _ = try sema.resolveTypeFields(ty),
.eager => assert(union_obj.haveFieldTypes()),
@@ -3027,8 +3045,9 @@ pub const Type = extern union {
const struct_obj = ty.castTag(.@"struct").?.data;
if (opt_sema) |sema| {
if (struct_obj.status == .field_types_wip) {
- // We'll guess "pointer-aligned" and if we guess wrong, emit
- // a compile error later.
+ // We'll guess "pointer-aligned", if the struct has an
+ // underaligned pointer field then some allocations
+ // might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
}
_ = try sema.resolveTypeFields(ty);
@@ -3153,8 +3172,9 @@ pub const Type = extern union {
};
if (opt_sema) |sema| {
if (union_obj.status == .field_types_wip) {
- // We'll guess "pointer-aligned" and if we guess wrong, emit
- // a compile error later.
+ // We'll guess "pointer-aligned", if the union has an
+ // underaligned pointer field then some allocations
+ // might require explicit alignment.
return AbiAlignmentAdvanced{ .scalar = @divExact(target.cpu.arch.ptrBitWidth(), 8) };
}
_ = try sema.resolveTypeFields(ty);
@@ -5234,7 +5254,12 @@ pub const Type = extern union {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
- .wip, .unknown => unreachable, // This function asserts types already resolved.
+ .wip, .unknown => {
+ // Return false to avoid incorrect dependency loops.
+ // This will be handled correctly once merged with
+ // `Sema.typeRequiresComptime`.
+ return false;
+ },
.no => return false,
.yes => return true,
}
@@ -5243,7 +5268,12 @@ pub const Type = extern union {
.@"union", .union_safety_tagged, .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
switch (union_obj.requires_comptime) {
- .wip, .unknown => unreachable, // This function asserts types already resolved.
+ .wip, .unknown => {
+ // Return false to avoid incorrect dependency loops.
+ // This will be handled correctly once merged with
+ // `Sema.typeRequiresComptime`.
+ return false;
+ },
.no => return false,
.yes => return true,
}
@@ -6472,8 +6502,16 @@ pub const Type = extern union {
// type, we change it to 0 here. If this causes an assertion trip because the
// pointee type needs to be resolved more, that needs to be done before calling
// this ptr() function.
- if (d.@"align" != 0 and d.@"align" == d.pointee_type.abiAlignment(target)) {
- d.@"align" = 0;
+ if (d.@"align" != 0) canonicalize: {
+ if (d.pointee_type.castTag(.@"struct")) |struct_ty| {
+ if (!struct_ty.data.haveLayout()) break :canonicalize;
+ }
+ if (d.pointee_type.cast(Payload.Union)) |union_ty| {
+ if (!union_ty.data.haveLayout()) break :canonicalize;
+ }
+ if (d.@"align" == d.pointee_type.abiAlignment(target)) {
+ d.@"align" = 0;
+ }
}
// Canonicalize host_size. If it matches the bit size of the pointee type,
diff --git a/src/value.zig b/src/value.zig
index be643f65a3..d3035946f9 100644
--- a/src/value.zig
+++ b/src/value.zig
@@ -187,7 +187,7 @@ pub const Value = extern union {
bound_fn,
/// The ABI alignment of the payload type.
lazy_align,
- /// The ABI alignment of the payload type.
+ /// The ABI size of the payload type.
lazy_size,
pub const last_no_payload_tag = Tag.empty_array;
@@ -1201,8 +1201,8 @@ pub const Value = extern union {
}
/// Asserts the value is an integer and it fits in a i64
- pub fn toSignedInt(self: Value) i64 {
- switch (self.tag()) {
+ pub fn toSignedInt(val: Value, target: Target) i64 {
+ switch (val.tag()) {
.zero,
.bool_false,
.the_only_possible_value, // i0, u0
@@ -1212,10 +1212,19 @@ pub const Value = extern union {
.bool_true,
=> return 1,
- .int_u64 => return @intCast(i64, self.castTag(.int_u64).?.data),
- .int_i64 => return self.castTag(.int_i64).?.data,
- .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable,
- .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable,
+ .int_u64 => return @intCast(i64, val.castTag(.int_u64).?.data),
+ .int_i64 => return val.castTag(.int_i64).?.data,
+ .int_big_positive => return val.castTag(.int_big_positive).?.asBigInt().to(i64) catch unreachable,
+ .int_big_negative => return val.castTag(.int_big_negative).?.asBigInt().to(i64) catch unreachable,
+
+ .lazy_align => {
+ const ty = val.castTag(.lazy_align).?.data;
+ return @intCast(i64, ty.abiAlignment(target));
+ },
+ .lazy_size => {
+ const ty = val.castTag(.lazy_size).?.data;
+ return @intCast(i64, ty.abiSize(target));
+ },
.undef => unreachable,
else => unreachable,
diff --git a/test/behavior.zig b/test/behavior.zig
index 4e959fcf7d..3b6eb9c6ef 100644
--- a/test/behavior.zig
+++ b/test/behavior.zig
@@ -90,6 +90,7 @@ test {
_ = @import("behavior/bugs/12430.zig");
_ = @import("behavior/bugs/12486.zig");
_ = @import("behavior/bugs/12488.zig");
+ _ = @import("behavior/bugs/12498.zig");
_ = @import("behavior/bugs/12551.zig");
_ = @import("behavior/bugs/12644.zig");
_ = @import("behavior/bugs/12680.zig");
diff --git a/test/behavior/bugs/12498.zig b/test/behavior/bugs/12498.zig
new file mode 100644
index 0000000000..3e4bafc2db
--- /dev/null
+++ b/test/behavior/bugs/12498.zig
@@ -0,0 +1,8 @@
+const std = @import("std");
+const expect = std.testing.expect;
+
+const S = struct { a: usize };
+test "lazy abi size used in comparison" {
+ var rhs: i32 = 100;
+ try expect(@sizeOf(S) < rhs);
+}
diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig
index 2340e364d1..39e1bd10bb 100644
--- a/test/behavior/struct.zig
+++ b/test/behavior/struct.zig
@@ -1405,3 +1405,15 @@ test "address of zero-bit field is equal to address of only field" {
try std.testing.expectEqual(&a, a_ptr);
}
}
+
+test "struct field has a pointer to an aligned version of itself" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+
+ const E = struct {
+ next: *align(1) @This(),
+ };
+ var e: E = undefined;
+ e = .{ .next = &e };
+
+ try expect(&e == e.next);
+}
diff --git a/test/cases/aarch64-macos/hello_world_with_updates.1.zig b/test/cases/aarch64-macos/hello_world_with_updates.1.zig
index e18a4c6a1e..dcf18bbf87 100644
--- a/test/cases/aarch64-macos/hello_world_with_updates.1.zig
+++ b/test/cases/aarch64-macos/hello_world_with_updates.1.zig
@@ -2,5 +2,5 @@ pub export fn main() noreturn {}
// error
//
-// :1:32: error: function declared 'noreturn' returns
-// :1:22: note: 'noreturn' declared here
+// :1:22: error: function declared 'noreturn' implicitly returns
+// :1:32: note: control flow reaches end of body here
diff --git a/test/cases/compile_errors/calling_var_args_extern_function_passing_array_instead_of_pointer.zig b/test/cases/compile_errors/calling_var_args_extern_function_passing_array_instead_of_pointer.zig
index 846a5ff81a..c9e2e2e5eb 100644
--- a/test/cases/compile_errors/calling_var_args_extern_function_passing_array_instead_of_pointer.zig
+++ b/test/cases/compile_errors/calling_var_args_extern_function_passing_array_instead_of_pointer.zig
@@ -8,3 +8,4 @@ pub extern fn foo(format: *const u8, ...) void;
// target=native
//
// :2:16: error: expected type '*const u8', found '[5:0]u8'
+// :4:27: note: parameter type declared here
diff --git a/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig b/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
index 132627557e..7fd38280c6 100644
--- a/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
+++ b/test/cases/compile_errors/casting_bit_offset_pointer_to_regular_pointer.zig
@@ -21,3 +21,4 @@ export fn entry() usize { return @sizeOf(@TypeOf(&foo)); }
// :8:16: error: expected type '*const u3', found '*align(0:3:1) const u3'
// :8:16: note: pointer host size '1' cannot cast into pointer host size '0'
// :8:16: note: pointer bit offset '3' cannot cast into pointer bit offset '0'
+// :11:11: note: parameter type declared here
diff --git a/test/cases/compile_errors/closure_get_in_param_ty_instantiate_incorrectly.zig b/test/cases/compile_errors/closure_get_in_param_ty_instantiate_incorrectly.zig
index dc533442fb..8855755ec1 100644
--- a/test/cases/compile_errors/closure_get_in_param_ty_instantiate_incorrectly.zig
+++ b/test/cases/compile_errors/closure_get_in_param_ty_instantiate_incorrectly.zig
@@ -22,3 +22,4 @@ pub export fn entry() void {
// target=native
//
// :17:25: error: expected type 'u32', found 'type'
+// :3:21: note: parameter type declared here
diff --git a/test/cases/compile_errors/control_reaches_end_of_non-void_function.zig b/test/cases/compile_errors/control_reaches_end_of_non-void_function.zig
deleted file mode 100644
index c92b6b0927..0000000000
--- a/test/cases/compile_errors/control_reaches_end_of_non-void_function.zig
+++ /dev/null
@@ -1,9 +0,0 @@
-fn a() i32 {}
-export fn entry() void { _ = a(); }
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:13: error: expected type 'i32', found 'void'
-// :1:8: note: function return type declared here
diff --git a/test/cases/compile_errors/disallow_coercion_from_non-null-terminated_pointer_to_null-terminated_pointer.zig b/test/cases/compile_errors/disallow_coercion_from_non-null-terminated_pointer_to_null-terminated_pointer.zig
index 0074c26de7..3670def4ee 100644
--- a/test/cases/compile_errors/disallow_coercion_from_non-null-terminated_pointer_to_null-terminated_pointer.zig
+++ b/test/cases/compile_errors/disallow_coercion_from_non-null-terminated_pointer_to_null-terminated_pointer.zig
@@ -11,3 +11,4 @@ pub export fn entry() void {
//
// :5:14: error: expected type '[*:0]const u8', found '[*]const u8'
// :5:14: note: destination pointer requires '0' sentinel
+// :1:20: note: parameter type declared here
diff --git a/test/cases/compile_errors/double_pointer_to_anyopaque_pointer.zig b/test/cases/compile_errors/double_pointer_to_anyopaque_pointer.zig
index 6aa9618dbd..c7e54738d1 100644
--- a/test/cases/compile_errors/double_pointer_to_anyopaque_pointer.zig
+++ b/test/cases/compile_errors/double_pointer_to_anyopaque_pointer.zig
@@ -24,5 +24,6 @@ pub export fn entry3() void {
// :4:35: note: cannot implicitly cast double pointer '*const *const usize' to anyopaque pointer '*const anyopaque'
// :9:10: error: expected type '?*anyopaque', found '*[*:0]u8'
// :9:10: note: cannot implicitly cast double pointer '*[*:0]u8' to anyopaque pointer '?*anyopaque'
+// :11:12: note: parameter type declared here
// :15:35: error: expected type '*const anyopaque', found '*?*usize'
// :15:35: note: cannot implicitly cast double pointer '*?*usize' to anyopaque pointer '*const anyopaque'
diff --git a/test/cases/compile_errors/implicitly_increasing_pointer_alignment.zig b/test/cases/compile_errors/implicitly_increasing_pointer_alignment.zig
index b9a9fb2faf..13adba1b91 100644
--- a/test/cases/compile_errors/implicitly_increasing_pointer_alignment.zig
+++ b/test/cases/compile_errors/implicitly_increasing_pointer_alignment.zig
@@ -18,3 +18,4 @@ fn bar(x: *u32) void {
//
// :8:9: error: expected type '*u32', found '*align(1) u32'
// :8:9: note: pointer alignment '1' cannot cast into pointer alignment '4'
+// :11:11: note: parameter type declared here
diff --git a/test/cases/compile_errors/invalid_compare_string.zig b/test/cases/compile_errors/invalid_compare_string.zig
new file mode 100644
index 0000000000..a5c7f041a5
--- /dev/null
+++ b/test/cases/compile_errors/invalid_compare_string.zig
@@ -0,0 +1,29 @@
+comptime {
+ var a = "foo";
+ if (a == "foo") unreachable;
+}
+comptime {
+ var a = "foo";
+ if (a == ("foo")) unreachable; // intentionally allow
+}
+comptime {
+ var a = "foo";
+ switch (a) {
+ "foo" => unreachable,
+ else => {},
+ }
+}
+comptime {
+ var a = "foo";
+ switch (a) {
+ ("foo") => unreachable, // intentionally allow
+ else => {},
+ }
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :3:11: error: cannot compare strings with ==
+// :12:9: error: cannot switch on strings
diff --git a/test/cases/compile_errors/invalid_dependency_on_struct_size.zig b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig
new file mode 100644
index 0000000000..02ea7e2710
--- /dev/null
+++ b/test/cases/compile_errors/invalid_dependency_on_struct_size.zig
@@ -0,0 +1,19 @@
+comptime {
+ const S = struct {
+ const Foo = struct {
+ y: Bar,
+ };
+ const Bar = struct {
+ y: if (@sizeOf(Foo) == 0) u64 else void,
+ };
+ };
+
+ _ = @sizeOf(S.Foo) + 1;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :6:21: error: struct layout depends on it having runtime bits
+// :4:13: note: while checking this field
diff --git a/test/cases/compile_errors/missing_parameter_name.zig b/test/cases/compile_errors/missing_parameter_name.zig
new file mode 100644
index 0000000000..3e576a1a6d
--- /dev/null
+++ b/test/cases/compile_errors/missing_parameter_name.zig
@@ -0,0 +1,19 @@
+fn f2(u64) u64 {
+ return x;
+}
+fn f3(*x) u64 {
+ return x;
+}
+fn f1(x) u64 {
+ return x;
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:7: error: missing parameter name
+// :4:7: error: missing parameter name
+// :7:7: error: missing parameter name or type
+// :7:7: note: if this is a name, annotate its type 'x: T'
+// :7:7: note: if this is a type, give it a name '<name>: x'
diff --git a/test/cases/compile_errors/pass_const_ptr_to_mutable_ptr_fn.zig b/test/cases/compile_errors/pass_const_ptr_to_mutable_ptr_fn.zig
index 39c55870f1..fd24b58f55 100644
--- a/test/cases/compile_errors/pass_const_ptr_to_mutable_ptr_fn.zig
+++ b/test/cases/compile_errors/pass_const_ptr_to_mutable_ptr_fn.zig
@@ -16,3 +16,4 @@ export fn entry() usize { return @sizeOf(@TypeOf(&foo)); }
//
// :4:19: error: expected type '*[]const u8', found '*const []const u8'
// :4:19: note: cast discards const qualifier
+// :6:14: note: parameter type declared here
diff --git a/test/cases/compile_errors/struct_init_passed_to_type_param.zig b/test/cases/compile_errors/struct_init_passed_to_type_param.zig
index b00c27986f..01906ac613 100644
--- a/test/cases/compile_errors/struct_init_passed_to_type_param.zig
+++ b/test/cases/compile_errors/struct_init_passed_to_type_param.zig
@@ -12,3 +12,4 @@ export const value = hi(MyStruct{ .x = 12 });
//
// :7:33: error: expected type 'type', found 'tmp.MyStruct'
// :1:18: note: struct declared here
+// :3:19: note: parameter type declared here
diff --git a/test/cases/compile_errors/struct_type_mismatch_in_arg.zig b/test/cases/compile_errors/struct_type_mismatch_in_arg.zig
new file mode 100644
index 0000000000..a52bdfab6c
--- /dev/null
+++ b/test/cases/compile_errors/struct_type_mismatch_in_arg.zig
@@ -0,0 +1,18 @@
+const Foo = struct { i: i32 };
+const Bar = struct { j: i32 };
+
+pub fn helper(_: Foo, _: Bar) void { }
+
+comptime {
+ helper(Bar { .j = 10 }, Bar { .j = 10 });
+ helper(Bar { .i = 10 }, Bar { .j = 10 });
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :7:16: error: expected type 'tmp.Foo', found 'tmp.Bar'
+// :1:13: note: struct declared here
+// :2:13: note: struct declared here
+// :4:18: note: parameter type declared here
diff --git a/test/cases/compile_errors/switch_on_slice.zig b/test/cases/compile_errors/switch_on_slice.zig
index b4644b132c..c2b28f67ab 100644
--- a/test/cases/compile_errors/switch_on_slice.zig
+++ b/test/cases/compile_errors/switch_on_slice.zig
@@ -1,7 +1,7 @@
pub export fn entry() void {
var a: [:0]const u8 = "foo";
switch (a) {
- "--version", "version" => unreachable,
+ ("--version"), ("version") => unreachable,
else => {},
}
}
diff --git a/test/cases/compile_errors/type_error_in_implicit_return.zig b/test/cases/compile_errors/type_error_in_implicit_return.zig
new file mode 100644
index 0000000000..8c8d498c97
--- /dev/null
+++ b/test/cases/compile_errors/type_error_in_implicit_return.zig
@@ -0,0 +1,17 @@
+fn f1(x: bool) u32 {
+ if (x) return 1;
+}
+fn f2() noreturn {}
+pub export fn entry() void {
+ _ = f1(true);
+ _ = f2();
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :1:16: error: function with non-void return type 'u32' implicitly returns
+// :3:1: note: control flow reaches end of body here
+// :4:9: error: function declared 'noreturn' implicitly returns
+// :4:19: note: control flow reaches end of body here
diff --git a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig
index e22c285cb0..a050eb6a4c 100644
--- a/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig
+++ b/test/cases/compile_errors/wrong_pointer_coerced_to_pointer_to_opaque_{}.zig
@@ -12,3 +12,4 @@ export fn foo() void {
// :5:9: error: expected type '*tmp.Derp', found '*anyopaque'
// :5:9: note: pointer type child 'anyopaque' cannot cast into pointer type child 'tmp.Derp'
// :1:14: note: opaque declared here
+// :2:18: note: parameter type declared here
diff --git a/test/cases/x86_64-linux/hello_world_with_updates.1.zig b/test/cases/x86_64-linux/hello_world_with_updates.1.zig
index 1f1a6a9682..dcf18bbf87 100644
--- a/test/cases/x86_64-linux/hello_world_with_updates.1.zig
+++ b/test/cases/x86_64-linux/hello_world_with_updates.1.zig
@@ -1,6 +1,6 @@
-pub export fn _start() noreturn {}
+pub export fn main() noreturn {}
// error
//
-// :1:34: error: function declared 'noreturn' returns
-// :1:24: note: 'noreturn' declared here
+// :1:22: error: function declared 'noreturn' implicitly returns
+// :1:32: note: control flow reaches end of body here
diff --git a/test/cases/x86_64-macos/hello_world_with_updates.1.zig b/test/cases/x86_64-macos/hello_world_with_updates.1.zig
index e18a4c6a1e..dcf18bbf87 100644
--- a/test/cases/x86_64-macos/hello_world_with_updates.1.zig
+++ b/test/cases/x86_64-macos/hello_world_with_updates.1.zig
@@ -2,5 +2,5 @@ pub export fn main() noreturn {}
// error
//
-// :1:32: error: function declared 'noreturn' returns
-// :1:22: note: 'noreturn' declared here
+// :1:22: error: function declared 'noreturn' implicitly returns
+// :1:32: note: control flow reaches end of body here
diff --git a/test/cases/x86_64-windows/hello_world_with_updates.1.zig b/test/cases/x86_64-windows/hello_world_with_updates.1.zig
index e18a4c6a1e..dcf18bbf87 100644
--- a/test/cases/x86_64-windows/hello_world_with_updates.1.zig
+++ b/test/cases/x86_64-windows/hello_world_with_updates.1.zig
@@ -2,5 +2,5 @@ pub export fn main() noreturn {}
// error
//
-// :1:32: error: function declared 'noreturn' returns
-// :1:22: note: 'noreturn' declared here
+// :1:22: error: function declared 'noreturn' implicitly returns
+// :1:32: note: control flow reaches end of body here
diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig
index 441b8b23d3..6c0c5e03cf 100644
--- a/test/stage2/cbe.zig
+++ b/test/stage2/cbe.zig
@@ -985,7 +985,7 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.h("header with bool param function", linux_x64,
\\export fn start(a: bool) void{_ = a;}
,
- \\zig_extern void start(zig_bool const a0);
+ \\zig_extern void start(bool const a0);
\\
);
ctx.h("header with noreturn function", linux_x64,