diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2022-07-07 18:25:01 -0700 |
|---|---|---|
| committer | Andrew Kelley <andrew@ziglang.org> | 2022-07-07 18:25:01 -0700 |
| commit | 6a3a0fe7ae20e620ec6ad80bf87c72b284d2aafa (patch) | |
| tree | cb977f8239695b217b49ecd68e1be1bf77f085ce /src | |
| parent | 8e07b0c4b97817b89b0cd59c8e558348cfb5005b (diff) | |
| parent | 3a03872af76652515e467c1f33d918ead2c0a6b0 (diff) | |
| download | zig-6a3a0fe7ae20e620ec6ad80bf87c72b284d2aafa.tar.gz zig-6a3a0fe7ae20e620ec6ad80bf87c72b284d2aafa.zip | |
Merge remote-tracking branch 'origin/master' into llvm14
Diffstat (limited to 'src')
| -rw-r--r-- | src/Sema.zig | 15 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 240 | ||||
| -rw-r--r-- | src/stage1/ir.cpp | 5 |
3 files changed, 67 insertions, 193 deletions
diff --git a/src/Sema.zig b/src/Sema.zig index 054f645230..0f504c6c1d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18427,7 +18427,20 @@ fn safetyPanic( fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { - return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}); + const msg = try sema.errMsg( + block, + src, + "evaluation exceeded {d} backwards branches", + .{sema.branch_quota}, + ); + try sema.errNote( + block, + src, + msg, + "use @setEvalBranchQuota() to raise the branch limit from {d}", + .{sema.branch_quota}, + ); + return sema.failWithOwnedErrorMsg(block, msg); } } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1b7e33a3a4..acd571a58b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -858,69 +858,34 @@ pub const Object = struct { try args.append(aggregate); }, .multiple_llvm_ints => { - const param_ty = fn_info.param_types[it.zig_index - 1]; const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; - const is_by_ref = isByRef(param_ty); - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - if (is_by_ref) { - const param_llvm_ty = try dg.lowerType(param_ty); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); - arg_ptr.setAlignment(param_ty.abiAlignment(target)); - - var field_i: u32 = 0; - var field_offset: u32 = 0; - for (llvm_ints) |int_bits| { - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - - const big_int_ty = dg.context.intType(int_bits); - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - const field_int_ty = dg.context.intType(field_abi_bits); - const shifted = if (bits_used == 0) param else s: { - const shift_amt = big_int_ty.constInt(bits_used, .False); - break :s builder.buildLShr(param, shift_amt, ""); - }; - const field_as_int = builder.buildTrunc(shifted, field_int_ty, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; - const field_ptr = builder.buildStructGEP(arg_ptr, llvm_i, ""); - const casted_ptr = builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); - const store_inst = builder.buildStore(field_as_int, casted_ptr); - store_inst.setAlignment(field_alignment); - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; - } - if (field_i >= fields.len) break; - } - - try args.append(arg_ptr); - } else { - @panic("TODO: LLVM backend: implement C calling convention on x86_64 with byval struct parameter"); - } - }, - .Union => { - @panic("TODO: LLVM backend: implement C calling convention on x86_64 with union parameter"); - }, - else => unreachable, + const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_llvm_ty = try dg.lowerType(param_ty); + const param_alignment = param_ty.abiAlignment(target); + const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); + arg_ptr.setAlignment(param_alignment); + var field_types_buf: [8]*const llvm.Type = undefined; + const field_types = field_types_buf[0..llvm_ints.len]; + for (llvm_ints) |int_bits, i| { + field_types[i] = dg.context.intType(int_bits); } + const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), ""); + for (llvm_ints) |_, i_usize| { + const i = @intCast(c_uint, i_usize); + const param = llvm_func.getParam(i); + const field_ptr = builder.buildStructGEP(casted_ptr, i, ""); + const store_inst = builder.buildStore(param, field_ptr); + store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + } + + const is_by_ref = isByRef(param_ty); + const loaded = if (is_by_ref) arg_ptr else l: { + const load_inst = builder.buildLoad(arg_ptr, ""); + load_inst.setAlignment(param_alignment); + break :l load_inst; + }; + try args.append(loaded); }, }; } @@ -2822,65 +2787,11 @@ pub const DeclGen = struct { llvm_params.appendAssumeCapacity(len_llvm_ty); }, .multiple_llvm_ints => { - const param_ty = fn_info.param_types[it.zig_index - 1]; const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; try llvm_params.ensureUnusedCapacity(it.llvm_types_len); - - // The reason we have all this logic instead of simply appending - // big_int_ty is for the special case of a pointer type; - // we want to use a pointer type instead of inttoptr at the callsites, - // which may prevent optimization. - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - var field_i: u32 = 0; - var field_offset: u32 = 0; - llvm_arg: for (llvm_ints) |int_bits| { - const big_int_ty = dg.context.intType(int_bits); - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - - // Special case for when the entire LLVM integer represents - // one field; in this case keep the type information - // to avoid the potentially costly ptrtoint/bitcast. - if (bits_used == 0 and field_abi_bits == int_bits) { - const llvm_field_ty = try dg.lowerType(field.ty); - llvm_params.appendAssumeCapacity(llvm_field_ty); - field_i += 1; - if (field_i >= fields.len) { - break :llvm_arg; - } else { - continue :llvm_arg; - } - } - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; - } - llvm_params.appendAssumeCapacity(big_int_ty); - if (field_i >= fields.len) break; - } - }, - else => { - for (llvm_ints) |int_bits| { - const big_int_ty = dg.context.intType(int_bits); - llvm_params.appendAssumeCapacity(big_int_ty); - } - }, + for (llvm_ints) |int_bits| { + const big_int_ty = dg.context.intType(int_bits); + llvm_params.appendAssumeCapacity(big_int_ty); } }, }; @@ -4300,80 +4211,27 @@ pub const FuncGen = struct { const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty); - try llvm_args.ensureUnusedCapacity(it.llvm_types_len); - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - var field_i: u32 = 0; - var field_offset: u32 = 0; - for (llvm_ints) |int_bits| { - const big_int_ty = self.dg.context.intType(int_bits); - var int_arg: *const llvm.Value = undefined; - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - var ty_buf: Type.Payload.Pointer = undefined; - const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - - // Special case for when the entire LLVM integer represents - // one field; in this case keep the type information - // to avoid the potentially costly ptrtoint/bitcast. - if (bits_used == 0 and field_abi_bits == int_bits) { - int_arg = if (is_by_ref) f: { - const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, ""); - const load_inst = self.builder.buildLoad(field_ptr, ""); - load_inst.setAlignment(field_alignment); - break :f load_inst; - } else self.builder.buildExtractValue(llvm_arg, llvm_i, ""); - field_i += 1; - break; - } - - const field_int_ty = self.dg.context.intType(field_abi_bits); - const llvm_field = if (is_by_ref) f: { - const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, ""); - const casted_ptr = self.builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); - const load_inst = self.builder.buildLoad(casted_ptr, ""); - load_inst.setAlignment(field_alignment); - break :f load_inst; - } else f: { - const llvm_field = self.builder.buildExtractValue(llvm_arg, llvm_i, ""); - break :f self.builder.buildBitCast(llvm_field, field_int_ty, ""); - }; - - const extended = self.builder.buildZExt(llvm_field, big_int_ty, ""); - if (bits_used == 0) { - int_arg = extended; - } else { - const shift_amt = big_int_ty.constInt(bits_used, .False); - const shifted = self.builder.buildShl(extended, shift_amt, ""); - int_arg = self.builder.buildOr(int_arg, shifted, ""); - } - - field_i += 1; - if (field_i >= fields.len) break; + const arg_ptr = if (is_by_ref) llvm_arg else p: { + const p = self.buildAlloca(llvm_arg.typeOf()); + const store_inst = self.builder.buildStore(llvm_arg, p); + store_inst.setAlignment(param_ty.abiAlignment(target)); + break :p p; + }; - bits_used += field_abi_bits; - field_offset += field_size; - } - llvm_args.appendAssumeCapacity(int_arg); - if (field_i >= fields.len) break; - } - }, - .Union => { - return self.todo("airCall C calling convention on x86_64 with union argument ", .{}); - }, - else => unreachable, + var field_types_buf: [8]*const llvm.Type = undefined; + const field_types = field_types_buf[0..llvm_ints.len]; + for (llvm_ints) |int_bits, i| { + field_types[i] = self.dg.context.intType(int_bits); + } + const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), ""); + try llvm_args.ensureUnusedCapacity(it.llvm_types_len); + for (llvm_ints) |_, i_usize| { + const i = @intCast(c_uint, i_usize); + const field_ptr = self.builder.buildStructGEP(casted_ptr, i, ""); + const load_inst = self.builder.buildLoad(field_ptr, ""); + load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + llvm_args.appendAssumeCapacity(load_inst); } }, }; diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index 52044e9dce..5a3952dc67 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -5769,8 +5769,10 @@ static bool ir_emit_backward_branch(IrAnalyze *ira, AstNode* source_node) { *bbc += 1; if (*bbc > *quota) { - ir_add_error_node(ira, source_node, + ErrorMsg *msg = ir_add_error_node(ira, source_node, buf_sprintf("evaluation exceeded %" ZIG_PRI_usize " backwards branches", *quota)); + add_error_note(ira->codegen, msg, source_node, + buf_sprintf("use @setEvalBranchQuota to raise branch limit from %" ZIG_PRI_usize, *quota)); return false; } return true; @@ -21573,6 +21575,7 @@ done_with_return_type: // handle `[N]T` target_len = target->type->data.array.len; target_sentinel = target->type->data.array.sentinel; + expand_undef_array(ira->codegen, target); target_elements = target->data.x_array.data.s_none.elements; break; } else if (target->type->id == ZigTypeIdPointer && target->type->data.pointer.child_type->id == ZigTypeIdArray) { |
