diff options
Diffstat (limited to 'src')
| -rw-r--r-- | src/Sema.zig | 62 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 6 | ||||
| -rw-r--r-- | src/type.zig | 9 | ||||
| -rw-r--r-- | src/value.zig | 19 |
4 files changed, 69 insertions, 27 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index 1a760be4ef..99519cd562 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4195,7 +4195,15 @@ fn zirDbgVar( const str_op = sema.code.instructions.items(.data)[inst].str_op; const operand = sema.resolveInst(str_op.operand); const operand_ty = sema.typeOf(operand); - if (!(try sema.typeHasRuntimeBits(block, sema.src, operand_ty))) return; + switch (air_tag) { + .dbg_var_ptr => { + if (!(try sema.typeHasRuntimeBits(block, sema.src, operand_ty.childType()))) return; + }, + .dbg_var_val => { + if (!(try sema.typeHasRuntimeBits(block, sema.src, operand_ty))) return; + }, + else => unreachable, + } const name = str_op.getStr(sema.code); // Add the name to the AIR. @@ -13268,7 +13276,7 @@ fn checkFloatType( ty: Type, ) CompileError!void { switch (ty.zigTypeTag()) { - .ComptimeFloat, .Float => {}, + .ComptimeInt, .ComptimeFloat, .Float => {}, else => return sema.fail(block, ty_src, "expected float type, found '{}'", .{ty}), } } @@ -17169,10 +17177,25 @@ fn storePtr2( return; } + // TODO do the same thing for anon structs as for tuples above. + + // Detect if we are storing an array operand to a bitcasted vector pointer. + // If so, we instead reach through the bitcasted pointer to the vector pointer, + // bitcast the array operand to a vector, and then lower this as a store of + // a vector value to a vector pointer. This generally results in better code, + // as well as working around an LLVM bug: + // https://github.com/ziglang/zig/issues/11154 + if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| { + const vector_ty = sema.typeOf(vector_ptr).childType(); + const vector = try sema.coerce(block, vector_ty, uncasted_operand, operand_src); + try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store); + return; + } + const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src); + const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: { - const maybe_operand_val = try sema.resolveMaybeUndefVal(block, operand_src, operand); const operand_val = maybe_operand_val orelse { try sema.checkPtrIsNotComptimeMutable(block, ptr_val, ptr_src, operand_src); break :rs operand_src; @@ -17195,6 +17218,39 @@ fn storePtr2( _ = try block.addBinOp(air_tag, ptr, operand); } +/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector +/// pointer. Only if the final element type matches the vector element type, and the +/// lengths match. +fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref { + const array_ty = sema.typeOf(ptr).childType(); + if (array_ty.zigTypeTag() != .Array) return null; + var ptr_inst = Air.refToIndex(ptr) orelse return null; + const air_datas = sema.air_instructions.items(.data); + const air_tags = sema.air_instructions.items(.tag); + const prev_ptr = while (air_tags[ptr_inst] == .bitcast) { + const prev_ptr = air_datas[ptr_inst].ty_op.operand; + const prev_ptr_ty = sema.typeOf(prev_ptr); + const prev_ptr_child_ty = switch (prev_ptr_ty.tag()) { + .single_mut_pointer => prev_ptr_ty.castTag(.single_mut_pointer).?.data, + .pointer => prev_ptr_ty.castTag(.pointer).?.data.pointee_type, + else => return null, + }; + if (prev_ptr_child_ty.zigTypeTag() == .Vector) break prev_ptr; + ptr_inst = Air.refToIndex(prev_ptr) orelse return null; + } else return null; + + // We have a pointer-to-array and a pointer-to-vector. If the elements and + // lengths match, return the result. + const vector_ty = sema.typeOf(prev_ptr).childType(); + if (array_ty.childType().eql(vector_ty.childType()) and + array_ty.arrayLen() == vector_ty.vectorLen()) + { + return prev_ptr; + } else { + return null; + } +} + /// Call when you have Value objects rather than Air instructions, and you want to /// assert the store must be done at comptime. fn storePtrVal( diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c1c7ac06f0..42e94a3528 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3709,10 +3709,11 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = try self.resolveInst(ty_op.operand); + const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); const dest_llvm_ty = try self.dg.llvmType(dest_ty); - if (dest_ty.isSignedInt()) { + if (operand_ty.isSignedInt()) { return self.builder.buildSIToFP(operand, dest_llvm_ty, ""); } else { return self.builder.buildUIToFP(operand, dest_llvm_ty, ""); @@ -3984,13 +3985,14 @@ pub const FuncGen = struct { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const operand = try self.resolveInst(pl_op.operand); const name = self.air.nullTerminatedString(pl_op.payload); + const ptr_ty = self.air.typeOf(pl_op.operand); const di_local_var = dib.createAutoVariable( self.di_scope.?, name.ptr, self.di_file.?, self.prev_dbg_line, - try self.dg.lowerDebugType(self.air.typeOf(pl_op.operand)), + try self.dg.lowerDebugType(ptr_ty.childType()), true, // always preserve 0, // flags ); diff --git a/src/type.zig b/src/type.zig index 30c8838109..10f0b0325d 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2573,15 +2573,14 @@ pub const Type = extern union { .array_u8_sentinel_0 => self.castTag(.array_u8_sentinel_0).?.data + 1, .array, .vector => { const payload = self.cast(Payload.Array).?.data; - const elem_size = @maximum(payload.elem_type.abiAlignment(target), payload.elem_type.abiSize(target)); + const elem_size = payload.elem_type.abiSize(target); + assert(elem_size >= payload.elem_type.abiAlignment(target)); return payload.len * elem_size; }, .array_sentinel => { const payload = self.castTag(.array_sentinel).?.data; - const elem_size = std.math.max( - payload.elem_type.abiAlignment(target), - payload.elem_type.abiSize(target), - ); + const elem_size = payload.elem_type.abiSize(target); + assert(elem_size >= payload.elem_type.abiAlignment(target)); return (payload.len + 1) * elem_size; }, .i16, .u16 => return 2, diff --git a/src/value.zig b/src/value.zig index 9117ef78df..6ab3adba36 100644 --- a/src/value.zig +++ b/src/value.zig @@ -3931,15 +3931,12 @@ pub const Value = extern union { }, 80 => { if (true) { - @panic("TODO implement compiler_rt fabs for f80"); + @panic("TODO implement compiler_rt fabs for f80 (__fabsx)"); } const f = val.toFloat(f80); return Value.Tag.float_80.create(arena, @fabs(f)); }, 128 => { - if (true) { - @panic("TODO implement compiler_rt fabs for f128"); - } const f = val.toFloat(f128); return Value.Tag.float_128.create(arena, @fabs(f)); }, @@ -3963,15 +3960,12 @@ pub const Value = extern union { }, 80 => { if (true) { - @panic("TODO implement compiler_rt floor for f80"); + @panic("TODO implement compiler_rt floor for f80 (__floorx)"); } const f = val.toFloat(f80); return Value.Tag.float_80.create(arena, @floor(f)); }, 128 => { - if (true) { - @panic("TODO implement compiler_rt floor for f128"); - } const f = val.toFloat(f128); return Value.Tag.float_128.create(arena, @floor(f)); }, @@ -4001,9 +3995,6 @@ pub const Value = extern union { return Value.Tag.float_80.create(arena, @ceil(f)); }, 128 => { - if (true) { - @panic("TODO implement compiler_rt ceil for f128"); - } const f = val.toFloat(f128); return Value.Tag.float_128.create(arena, @ceil(f)); }, @@ -4033,9 +4024,6 @@ pub const Value = extern union { return Value.Tag.float_80.create(arena, @round(f)); }, 128 => { - if (true) { - @panic("TODO implement compiler_rt round for f128"); - } const f = val.toFloat(f128); return Value.Tag.float_128.create(arena, @round(f)); }, @@ -4065,9 +4053,6 @@ pub const Value = extern union { return Value.Tag.float_80.create(arena, @trunc(f)); }, 128 => { - if (true) { - @panic("TODO implement compiler_rt trunc for f128"); - } const f = val.toFloat(f128); return Value.Tag.float_128.create(arena, @trunc(f)); }, |
