From a6ae45145f5814963cfdff4e18c1f984729588b9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 12 May 2018 17:35:15 -0400 Subject: add @newStackCall builtin function See #1006 --- src/all_types.hpp | 7 ++++ src/codegen.cpp | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- src/ir.cpp | 66 ++++++++++++++++++++++++++++++++----- src/target.cpp | 62 ++++++++++++++++++++++++++++++++++ src/target.hpp | 2 ++ 5 files changed, 225 insertions(+), 11 deletions(-) (limited to 'src') diff --git a/src/all_types.hpp b/src/all_types.hpp index c1c6c9a1a5..dc61c8235f 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1340,6 +1340,7 @@ enum BuiltinFnId { BuiltinFnIdOffsetOf, BuiltinFnIdInlineCall, BuiltinFnIdNoInlineCall, + BuiltinFnIdNewStackCall, BuiltinFnIdTypeId, BuiltinFnIdShlExact, BuiltinFnIdShrExact, @@ -1656,8 +1657,13 @@ struct CodeGen { LLVMValueRef coro_alloc_helper_fn_val; LLVMValueRef merge_err_ret_traces_fn_val; LLVMValueRef add_error_return_trace_addr_fn_val; + LLVMValueRef stacksave_fn_val; + LLVMValueRef stackrestore_fn_val; + LLVMValueRef write_register_fn_val; bool error_during_imports; + LLVMValueRef sp_md_node; + const char **clang_argv; size_t clang_argv_len; ZigList lib_dirs; @@ -2280,6 +2286,7 @@ struct IrInstructionCall { bool is_async; IrInstruction *async_allocator; + IrInstruction *new_stack; }; struct IrInstructionConst { diff --git a/src/codegen.cpp b/src/codegen.cpp index 4e58f86d4b..f1e102392a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -938,6 +938,53 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) { return g->memcpy_fn_val; } +static LLVMValueRef get_stacksave_fn_val(CodeGen *g) { + if (g->stacksave_fn_val) + return g->stacksave_fn_val; + + // declare i8* @llvm.stacksave() + + LLVMTypeRef fn_type = LLVMFunctionType(LLVMPointerType(LLVMInt8Type(), 0), nullptr, 0, false); + g->stacksave_fn_val = LLVMAddFunction(g->module, "llvm.stacksave", fn_type); + assert(LLVMGetIntrinsicID(g->stacksave_fn_val)); + + return g->stacksave_fn_val; +} + +static LLVMValueRef get_stackrestore_fn_val(CodeGen *g) { + if (g->stackrestore_fn_val) + return g->stackrestore_fn_val; + + // declare void @llvm.stackrestore(i8* %ptr) + + LLVMTypeRef param_type = LLVMPointerType(LLVMInt8Type(), 0); + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), ¶m_type, 1, false); + g->stackrestore_fn_val = LLVMAddFunction(g->module, "llvm.stackrestore", fn_type); + assert(LLVMGetIntrinsicID(g->stackrestore_fn_val)); + + return g->stackrestore_fn_val; +} + +static LLVMValueRef get_write_register_fn_val(CodeGen *g) { + if (g->write_register_fn_val) + return g->write_register_fn_val; + + // declare void @llvm.write_register.i64(metadata, i64 @value) + // !0 = !{!"sp\00"} + + LLVMTypeRef param_types[] = { + LLVMMetadataTypeInContext(LLVMGetGlobalContext()), + LLVMIntType(g->pointer_size_bytes * 8), + }; + + LLVMTypeRef fn_type = LLVMFunctionType(LLVMVoidType(), param_types, 2, false); + Buf *name = buf_sprintf("llvm.write_register.i%d", g->pointer_size_bytes * 8); + g->write_register_fn_val = LLVMAddFunction(g->module, buf_ptr(name), fn_type); + assert(LLVMGetIntrinsicID(g->write_register_fn_val)); + + return g->write_register_fn_val; +} + static LLVMValueRef get_coro_destroy_fn_val(CodeGen *g) { if (g->coro_destroy_fn_val) return g->coro_destroy_fn_val; @@ -2901,6 +2948,38 @@ static size_t get_async_err_code_arg_index(CodeGen *g, FnTypeId *fn_type_id) { return 1 + get_async_allocator_arg_index(g, fn_type_id); } + +static LLVMValueRef get_new_stack_addr(CodeGen *g, LLVMValueRef new_stack) { + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_ptr_index, ""); + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, new_stack, (unsigned)slice_len_index, ""); + + LLVMValueRef ptr_value = gen_load_untyped(g, ptr_field_ptr, 0, false, ""); + LLVMValueRef len_value = gen_load_untyped(g, len_field_ptr, 0, false, ""); + + LLVMValueRef ptr_addr = LLVMBuildPtrToInt(g->builder, ptr_value, LLVMTypeOf(len_value), ""); + LLVMValueRef end_addr = LLVMBuildNUWAdd(g->builder, ptr_addr, len_value, ""); + LLVMValueRef align_amt = LLVMConstInt(LLVMTypeOf(end_addr), get_abi_alignment(g, g->builtin_types.entry_usize), false); + LLVMValueRef align_adj = LLVMBuildURem(g->builder, end_addr, align_amt, ""); + return LLVMBuildNUWSub(g->builder, end_addr, align_adj, ""); +} + +static void gen_set_stack_pointer(CodeGen *g, LLVMValueRef aligned_end_addr) { + LLVMValueRef write_register_fn_val = get_write_register_fn_val(g); + + if (g->sp_md_node == nullptr) { + Buf *sp_reg_name = buf_create_from_str(arch_stack_pointer_register_name(&g->zig_target.arch)); + LLVMValueRef str_node = LLVMMDString(buf_ptr(sp_reg_name), buf_len(sp_reg_name) + 1); + g->sp_md_node = LLVMMDNode(&str_node, 1); + } + + LLVMValueRef params[] = { + g->sp_md_node, + aligned_end_addr, + }; + + LLVMBuildCall(g->builder, write_register_fn_val, params, 2, ""); +} + static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstructionCall *instruction) { LLVMValueRef fn_val; TypeTableEntry *fn_type; @@ -2967,8 +3046,23 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr } LLVMCallConv llvm_cc = get_llvm_cc(g, fn_type->data.fn.fn_type_id.cc); - LLVMValueRef result = ZigLLVMBuildCall(g->builder, fn_val, - gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + LLVMValueRef result; + + if (instruction->new_stack == nullptr) { + result = ZigLLVMBuildCall(g->builder, fn_val, + gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + } else { + LLVMValueRef stacksave_fn_val = get_stacksave_fn_val(g); + LLVMValueRef stackrestore_fn_val = get_stackrestore_fn_val(g); + + LLVMValueRef new_stack_addr = get_new_stack_addr(g, ir_llvm_value(g, instruction->new_stack)); + LLVMValueRef old_stack_ref = LLVMBuildCall(g->builder, stacksave_fn_val, nullptr, 0, ""); + gen_set_stack_pointer(g, new_stack_addr); + result = ZigLLVMBuildCall(g->builder, fn_val, + gen_param_values, (unsigned)gen_param_index, llvm_cc, fn_inline, ""); + LLVMBuildCall(g->builder, stackrestore_fn_val, &old_stack_ref, 1, ""); + } + for (size_t param_i = 0; param_i < fn_type_id->param_count; param_i += 1) { FnGenParamInfo *gen_info = &fn_type->data.fn.gen_param_info[param_i]; @@ -6171,6 +6265,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdSqrt, "sqrt", 2); create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdNoInlineCall, "noInlineCall", SIZE_MAX); + create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX); create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1); create_builtin_fn(g, BuiltinFnIdShlExact, "shlExact", 2); create_builtin_fn(g, BuiltinFnIdShrExact, "shrExact", 2); diff --git a/src/ir.cpp b/src/ir.cpp index c251f30320..7bc837d908 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -1102,7 +1102,8 @@ static IrInstruction *ir_build_union_field_ptr_from(IrBuilder *irb, IrInstructio static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *source_node, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, + IrInstruction *new_stack) { IrInstructionCall *call_instruction = ir_build_instruction(irb, scope, source_node); call_instruction->fn_entry = fn_entry; @@ -1113,6 +1114,7 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc call_instruction->arg_count = arg_count; call_instruction->is_async = is_async; call_instruction->async_allocator = async_allocator; + call_instruction->new_stack = new_stack; if (fn_ref) ir_ref_instruction(fn_ref, irb->current_basic_block); @@ -1120,16 +1122,19 @@ static IrInstruction *ir_build_call(IrBuilder *irb, Scope *scope, AstNode *sourc ir_ref_instruction(args[i], irb->current_basic_block); if (async_allocator) ir_ref_instruction(async_allocator, irb->current_basic_block); + if (new_stack != nullptr) + ir_ref_instruction(new_stack, irb->current_basic_block); return &call_instruction->base; } static IrInstruction *ir_build_call_from(IrBuilder *irb, IrInstruction *old_instruction, FnTableEntry *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args, - bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator) + bool is_comptime, FnInline fn_inline, bool is_async, IrInstruction *async_allocator, + IrInstruction *new_stack) { IrInstruction *new_instruction = ir_build_call(irb, old_instruction->scope, - old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator); + old_instruction->source_node, fn_entry, fn_ref, arg_count, args, is_comptime, fn_inline, is_async, async_allocator, new_stack); ir_link_new_instruction(new_instruction, old_instruction); return new_instruction; } @@ -4303,7 +4308,37 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo } FnInline fn_inline = (builtin_fn->id == BuiltinFnIdInlineCall) ? FnInlineAlways : FnInlineNever; - IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr); + IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, fn_inline, false, nullptr, nullptr); + return ir_lval_wrap(irb, scope, call, lval); + } + case BuiltinFnIdNewStackCall: + { + if (node->data.fn_call_expr.params.length == 0) { + add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0")); + return irb->codegen->invalid_instruction; + } + + AstNode *new_stack_node = node->data.fn_call_expr.params.at(0); + IrInstruction *new_stack = ir_gen_node(irb, new_stack_node, scope); + if (new_stack == irb->codegen->invalid_instruction) + return new_stack; + + AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1); + IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope); + if (fn_ref == irb->codegen->invalid_instruction) + return fn_ref; + + size_t arg_count = node->data.fn_call_expr.params.length - 2; + + IrInstruction **args = allocate(arg_count); + for (size_t i = 0; i < arg_count; i += 1) { + AstNode *arg_node = node->data.fn_call_expr.params.at(i + 2); + args[i] = ir_gen_node(irb, arg_node, scope); + if (args[i] == irb->codegen->invalid_instruction) + return args[i]; + } + + IrInstruction *call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, false, nullptr, new_stack); return ir_lval_wrap(irb, scope, call, lval); } case BuiltinFnIdTypeId: @@ -4513,7 +4548,7 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node } } - IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator); + IrInstruction *fn_call = ir_build_call(irb, scope, node, nullptr, fn_ref, arg_count, args, false, FnInlineAuto, is_async, async_allocator, nullptr); return ir_lval_wrap(irb, scope, fn_call, lval); } @@ -6825,7 +6860,7 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec IrInstruction **args = allocate(arg_count); args[0] = implicit_allocator_ptr; // self args[1] = mem_slice; // old_mem - ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr); + ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr); IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume"); ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false); @@ -11992,7 +12027,7 @@ static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *c TypeTableEntry *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type); IrInstruction *result = ir_build_call(&ira->new_irb, call_instruction->base.scope, call_instruction->base.source_node, - fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst); + fn_entry, fn_ref, arg_count, casted_args, false, FnInlineAuto, true, async_allocator_inst, nullptr); result->value.type = async_return_type; return result; } @@ -12362,6 +12397,19 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal return ir_finish_anal(ira, return_type); } + IrInstruction *casted_new_stack = nullptr; + if (call_instruction->new_stack != nullptr) { + TypeTableEntry *u8_ptr = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false); + TypeTableEntry *u8_slice = get_slice_type(ira->codegen, u8_ptr); + IrInstruction *new_stack = call_instruction->new_stack->other; + if (type_is_invalid(new_stack->value.type)) + return ira->codegen->builtin_types.entry_invalid; + + casted_new_stack = ir_implicit_cast(ira, new_stack, u8_slice); + if (type_is_invalid(casted_new_stack->value.type)) + return ira->codegen->builtin_types.entry_invalid; + } + if (fn_type->data.fn.is_generic) { if (!fn_entry) { ir_add_error(ira, call_instruction->fn_ref, @@ -12588,7 +12636,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal assert(async_allocator_inst == nullptr); IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, impl_fn, nullptr, impl_param_count, casted_args, false, fn_inline, - call_instruction->is_async, nullptr); + call_instruction->is_async, nullptr, casted_new_stack); ir_add_alloca(ira, new_call_instruction, return_type); @@ -12679,7 +12727,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal IrInstruction *new_call_instruction = ir_build_call_from(&ira->new_irb, &call_instruction->base, - fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr); + fn_entry, fn_ref, call_param_count, casted_args, false, fn_inline, false, nullptr, casted_new_stack); ir_add_alloca(ira, new_call_instruction, return_type); return ir_finish_anal(ira, return_type); diff --git a/src/target.cpp b/src/target.cpp index 5008b51a09..57970888fc 100644 --- a/src/target.cpp +++ b/src/target.cpp @@ -896,3 +896,65 @@ bool target_can_exec(const ZigTarget *host_target, const ZigTarget *guest_target return false; } + +const char *arch_stack_pointer_register_name(const ArchType *arch) { + switch (arch->arch) { + case ZigLLVM_UnknownArch: + zig_unreachable(); + case ZigLLVM_x86: + return "sp"; + case ZigLLVM_x86_64: + return "rsp"; + + case ZigLLVM_aarch64: + case ZigLLVM_arm: + case ZigLLVM_thumb: + case ZigLLVM_aarch64_be: + case ZigLLVM_amdgcn: + case ZigLLVM_amdil: + case ZigLLVM_amdil64: + case ZigLLVM_armeb: + case ZigLLVM_arc: + case ZigLLVM_avr: + case ZigLLVM_bpfeb: + case ZigLLVM_bpfel: + case ZigLLVM_hexagon: + case ZigLLVM_lanai: + case ZigLLVM_hsail: + case ZigLLVM_hsail64: + case ZigLLVM_kalimba: + case ZigLLVM_le32: + case ZigLLVM_le64: + case ZigLLVM_mips: + case ZigLLVM_mips64: + case ZigLLVM_mips64el: + case ZigLLVM_mipsel: + case ZigLLVM_msp430: + case ZigLLVM_nios2: + case ZigLLVM_nvptx: + case ZigLLVM_nvptx64: + case ZigLLVM_ppc64le: + case ZigLLVM_r600: + case ZigLLVM_renderscript32: + case ZigLLVM_renderscript64: + case ZigLLVM_riscv32: + case ZigLLVM_riscv64: + case ZigLLVM_shave: + case ZigLLVM_sparc: + case ZigLLVM_sparcel: + case ZigLLVM_sparcv9: + case ZigLLVM_spir: + case ZigLLVM_spir64: + case ZigLLVM_systemz: + case ZigLLVM_tce: + case ZigLLVM_tcele: + case ZigLLVM_thumbeb: + case ZigLLVM_wasm32: + case ZigLLVM_wasm64: + case ZigLLVM_xcore: + case ZigLLVM_ppc: + case ZigLLVM_ppc64: + zig_panic("TODO populate this table with stack pointer register name for this CPU architecture"); + } + zig_unreachable(); +} diff --git a/src/target.hpp b/src/target.hpp index 614b0627d5..5a118f6d8d 100644 --- a/src/target.hpp +++ b/src/target.hpp @@ -77,6 +77,8 @@ size_t target_arch_count(void); const ArchType *get_target_arch(size_t index); void get_arch_name(char *out_str, const ArchType *arch); +const char *arch_stack_pointer_register_name(const ArchType *arch); + size_t target_vendor_count(void); ZigLLVM_VendorType get_target_vendor(size_t index); -- cgit v1.2.3