From d171279d7965137ac835f2ed6d48517478508eae Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Dec 2022 15:42:20 -0700 Subject: CBE: use a 0 literal instead of `error.@"(no error)"` This saves bytes and is easier to read too. --- src/codegen/c.zig | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'src/codegen') diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d7aec355e9..e38aedb125 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1141,8 +1141,10 @@ pub const DeclGen = struct { if (!payload_ty.hasRuntimeBits()) { // We use the error type directly as the type. - const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val; - return dg.renderValue(writer, error_ty, err_val, location); + if (val.errorUnionIsPayload()) { + return try writer.writeByte('0'); + } + return dg.renderValue(writer, error_ty, val, location); } if (location != .Initializer) { -- cgit v1.2.3 From 29e8e67a7ec767aed0b9b689bdcad7355c032751 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 2 Dec 2022 16:06:28 -0700 Subject: CBE: use bool, true, false, instead of `zig_` prefixes In general the C backend should lower to human-maintainable C code whenever possible. Directly using C types that one would use when writing C code is one part of the strategy. The concern with including stdint.h is C89 compatibility. Well, we can just check the C std lib version before deciding to include that header. --- lib/zig.h | 178 +++++++++++++++++++++++++--------------------------- src/codegen/c.zig | 30 ++++++--- test/stage2/cbe.zig | 2 +- 3 files changed, 111 insertions(+), 99 deletions(-) (limited to 'src/codegen') diff --git a/lib/zig.h b/lib/zig.h index acb2255e74..5a57bbda04 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -6,6 +6,16 @@ #include #include +#if !defined(__cplusplus) && __STDC_VERSION__ <= 201710L +#if __STDC_VERSION__ >= 199901L +#include +#else +typedef char bool; +#define false 0 +#define true 1 +#endif +#endif + #if defined(__has_builtin) #define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin) #else @@ -136,8 +146,8 @@ #define memory_order_acq_rel __ATOMIC_ACQ_REL #define memory_order_seq_cst __ATOMIC_SEQ_CST #define zig_atomic(type) type -#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_false, succ, fail) -#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_true , succ, fail) +#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail) +#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail) #define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order) #define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order) #define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order) @@ -189,20 +199,6 @@ #define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T)) -#if defined(__cplusplus) -typedef bool zig_bool; -#define zig_false false -#define zig_true true -#else -#if __STDC_VERSION__ >= 199901L -typedef _Bool zig_bool; -#else -typedef char zig_bool; -#endif -#define zig_false ((zig_bool)0) -#define zig_true ((zig_bool)1) -#endif - typedef uintptr_t zig_usize; typedef intptr_t zig_isize; typedef signed short int zig_c_short; @@ -330,10 +326,10 @@ zig_int_helpers(16) zig_int_helpers(32) zig_int_helpers(64) -static inline zig_bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u32 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); #else @@ -349,14 +345,14 @@ static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n, } zig_extern zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +static inline bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i32 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u32 full_res = __addosi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); @@ -368,10 +364,10 @@ static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u64 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); #else @@ -387,14 +383,14 @@ static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n, } zig_extern zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +static inline bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i64 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u64 full_res = __addodi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); @@ -406,10 +402,10 @@ static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u8 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); #else @@ -423,10 +419,10 @@ static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i8 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); #else @@ -440,10 +436,10 @@ static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u16 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); #else @@ -457,10 +453,10 @@ static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i16 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); #else @@ -474,10 +470,10 @@ static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u32 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); #else @@ -493,14 +489,14 @@ static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n, } zig_extern zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +static inline bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i32 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u32 full_res = __subosi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); @@ -512,10 +508,10 @@ static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u64 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); #else @@ -531,14 +527,14 @@ static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n, } zig_extern zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +static inline bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i64 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u64 full_res = __subodi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); @@ -550,10 +546,10 @@ static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u8 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); #else @@ -567,10 +563,10 @@ static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i8 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); #else @@ -585,10 +581,10 @@ static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n, } -static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u16 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); #else @@ -603,10 +599,10 @@ static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n, } -static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i16 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); #else @@ -620,10 +616,10 @@ static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { +static inline bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u32 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u32(full_res, bits); return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits); #else @@ -639,14 +635,14 @@ static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n, } zig_extern zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow); -static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { +static inline bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i32 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u32 full_res = __mulosi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i32(full_res, bits); return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits); @@ -658,10 +654,10 @@ static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { +static inline bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u64 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u64(full_res, bits); return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits); #else @@ -677,14 +673,14 @@ static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n, } zig_extern zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow); -static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { +static inline bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i64 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_u64 full_res = __mulodi4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i64(full_res, bits); return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits); @@ -696,10 +692,10 @@ static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u8 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u8(full_res, bits); return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits); #else @@ -713,10 +709,10 @@ static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { +static inline bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i8 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i8(full_res, bits); return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits); #else @@ -730,10 +726,10 @@ static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { +static inline bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u16 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u16(full_res, bits); return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits); #else @@ -747,10 +743,10 @@ static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n, for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits); } -static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { +static inline bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i16 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_i16(full_res, bits); return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits); #else @@ -797,12 +793,12 @@ static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n, return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \ } \ \ - static inline zig_bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \ *res = zig_shlw_u##w(lhs, rhs, bits); \ return (lhs & zig_maxInt_u##w << (bits - rhs)) != zig_as_u##w(0); \ } \ \ - static inline zig_bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ + static inline bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \ *res = zig_shlw_i##w(lhs, rhs, bits); \ zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \ return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \ @@ -1327,22 +1323,22 @@ static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { #if zig_has_int128 -static inline zig_bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) { *res = zig_shlw_u128(lhs, rhs, bits); return zig_and_u128(lhs, zig_shl_u128(zig_maxInt_u128, bits - rhs)) != zig_as_u128(0, 0); } -static inline zig_bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { +static inline bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) { *res = zig_shlw_i128(lhs, rhs, bits); zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1))); return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != zig_as_i32(0) && zig_cmp_i128(zig_and_i128(lhs, mask), mask) != zig_as_i32(0); } -static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_u128 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); #else @@ -1352,23 +1348,23 @@ static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, } zig_extern zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline zig_bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { #if zig_has_builtin(add_overflow) zig_i128 full_res; - zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_add_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); } -static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_u128 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); #else @@ -1378,23 +1374,23 @@ static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, } zig_extern zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline zig_bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { #if zig_has_builtin(sub_overflow) zig_i128 full_res; - zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); } -static inline zig_bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { +static inline bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_u128 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); *res = zig_wrap_u128(full_res, bits); return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits); #else @@ -1404,14 +1400,14 @@ static inline zig_bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, } zig_extern zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow); -static inline zig_bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { +static inline bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) { #if zig_has_builtin(mul_overflow) zig_i128 full_res; - zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); + bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res); #else zig_c_int overflow_int; zig_i128 full_res = __muloti4(lhs, rhs, &overflow); - zig_bool overflow = overflow_int != 0; + bool overflow = overflow_int != 0; #endif *res = zig_wrap_i128(full_res, bits); return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits); @@ -1419,12 +1415,12 @@ static inline zig_bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, #else /* zig_has_int128 */ -static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { +static inline bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) | zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX)); } -static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { +static inline bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) { return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) | zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX)); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e38aedb125..f4b54cd3aa 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -717,10 +717,21 @@ pub const DeclGen = struct { val = rt.data; } const target = dg.module.getTarget(); + + const safety_on = switch (dg.module.optimizeMode()) { + .Debug, .ReleaseSafe => true, + .ReleaseFast, .ReleaseSmall => false, + }; + if (val.isUndefDeep()) { switch (ty.zigTypeTag()) { - // bool b = 0xaa; evals to true, but memcpy(&b, 0xaa, 1); evals to false. - .Bool => return dg.renderValue(writer, ty, Value.false, location), + .Bool => { + if (safety_on) { + return writer.writeAll("0xaa"); + } else { + return writer.writeAll("false"); + } + }, .Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val)}), .Float => { const bits = ty.floatBits(target); @@ -1099,7 +1110,13 @@ pub const DeclGen = struct { }, } }, - .Bool => return writer.print("zig_{}", .{val.toBool()}), + .Bool => { + if (val.toBool()) { + return writer.writeAll("true"); + } else { + return writer.writeAll("false"); + } + }, .Optional => { var opt_buf: Type.Payload.ElemType = undefined; const payload_ty = ty.optionalChild(&opt_buf); @@ -1804,10 +1821,9 @@ pub const DeclGen = struct { const target = dg.module.getTarget(); switch (t.zigTypeTag()) { - .Void => { - try w.writeAll("void"); - }, - .NoReturn, .Bool, .Float => { + .Void => try w.writeAll("void"), + .Bool => try w.writeAll("bool"), + .NoReturn, .Float => { try w.writeAll("zig_"); try t.print(w, dg.module); }, diff --git a/test/stage2/cbe.zig b/test/stage2/cbe.zig index 441b8b23d3..6c0c5e03cf 100644 --- a/test/stage2/cbe.zig +++ b/test/stage2/cbe.zig @@ -985,7 +985,7 @@ pub fn addCases(ctx: *TestContext) !void { ctx.h("header with bool param function", linux_x64, \\export fn start(a: bool) void{_ = a;} , - \\zig_extern void start(zig_bool const a0); + \\zig_extern void start(bool const a0); \\ ); ctx.h("header with noreturn function", linux_x64, -- cgit v1.2.3