aboutsummaryrefslogtreecommitdiff
path: root/src/codegen
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-03-11 00:04:42 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-03-11 00:04:42 -0700
commit078037ab9b410fa13a86eabdfc30918fc83cdcf3 (patch)
treeb5e80992e82d015393872fcc57911b03ca7211fd /src/codegen
parentb28b3f6f7b1dd4c3c8a0f3d3a6305a84daed8ead (diff)
downloadzig-078037ab9b410fa13a86eabdfc30918fc83cdcf3.tar.gz
zig-078037ab9b410fa13a86eabdfc30918fc83cdcf3.zip
stage2: passing threadlocal tests for x86_64-linux
* use the real start code for LLVM backend with x86_64-linux - there is still a check for zig_backend after initializing the TLS area to skip some stuff. * introduce new AIR instructions and implement them for the LLVM backend. They are the same as `call` except with a modifier. - call_always_tail - call_never_tail - call_never_inline * LLVM backend calls hasRuntimeBitsIgnoringComptime in more places to avoid unnecessarily depending on comptimeOnly being resolved for some types. * LLVM backend: remove duplicate code for setting linkage and value name. The canonical place for this is in `updateDeclExports`. * LLVM backend: do some assembly template massaging to make `%%` rendered as `%`. More hacks will be needed to make inline assembly catch up with stage1.
Diffstat (limited to 'src/codegen')
-rw-r--r--src/codegen/c.zig25
-rw-r--r--src/codegen/llvm.zig94
2 files changed, 84 insertions, 35 deletions
diff --git a/src/codegen/c.zig b/src/codegen/c.zig
index 2a10a8094a..4a30bf023b 100644
--- a/src/codegen/c.zig
+++ b/src/codegen/c.zig
@@ -1685,7 +1685,6 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.assembly => try airAsm(f, inst),
.block => try airBlock(f, inst),
.bitcast => try airBitcast(f, inst),
- .call => try airCall(f, inst),
.dbg_stmt => try airDbgStmt(f, inst),
.intcast => try airIntCast(f, inst),
.trunc => try airTrunc(f, inst),
@@ -1721,6 +1720,11 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.union_init => try airUnionInit(f, inst),
.prefetch => try airPrefetch(f, inst),
+ .call => try airCall(f, inst, .auto),
+ .call_always_tail => try airCall(f, inst, .always_tail),
+ .call_never_tail => try airCall(f, inst, .never_tail),
+ .call_never_inline => try airCall(f, inst, .never_inline),
+
.int_to_float,
.float_to_int,
.fptrunc,
@@ -1904,7 +1908,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const elem_type = inst_ty.elemType();
const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut;
- if (!elem_type.isFnOrHasRuntimeBits()) {
+ if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime()) {
return CValue.undefined_ptr;
}
@@ -1979,7 +1983,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
fn airRet(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
- if (f.air.typeOf(un_op).isFnOrHasRuntimeBits()) {
+ if (f.air.typeOf(un_op).isFnOrHasRuntimeBitsIgnoreComptime()) {
const operand = try f.resolveInst(un_op);
try writer.writeAll("return ");
try f.writeCValue(writer, operand);
@@ -1995,7 +1999,7 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const ptr_ty = f.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
- if (!ret_ty.isFnOrHasRuntimeBits()) {
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
try writer.writeAll("return;\n");
}
const ptr = try f.resolveInst(un_op);
@@ -2561,7 +2565,18 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
+fn airCall(
+ f: *Function,
+ inst: Air.Inst.Index,
+ modifier: std.builtin.CallOptions.Modifier,
+) !CValue {
+ switch (modifier) {
+ .auto => {},
+ .always_tail => return f.fail("TODO: C backend: call with always_tail attribute", .{}),
+ .never_tail => return f.fail("TODO: C backend: call with never_tail attribute", .{}),
+ .never_inline => return f.fail("TODO: C backend: call with never_inline attribute", .{}),
+ else => unreachable,
+ }
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index 44c7e75629..d7df161b00 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -661,14 +661,19 @@ pub const Object = struct {
// If the module does not already have the function, we ignore this function call
// because we call `updateDeclExports` at the end of `updateFunc` and `updateDecl`.
const llvm_global = self.decl_map.get(decl) orelse return;
- const is_extern = decl.isExtern();
- if (is_extern) {
+ if (decl.isExtern()) {
llvm_global.setValueName(decl.name);
llvm_global.setUnnamedAddr(.False);
llvm_global.setLinkage(.External);
if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
- if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
+ if (variable.data.is_threadlocal) {
+ llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
+ } else {
+ llvm_global.setThreadLocalMode(.NotThreadLocal);
+ }
+ if (variable.data.is_weak_linkage) {
+ llvm_global.setLinkage(.ExternalWeak);
+ }
}
} else if (exports.len != 0) {
const exp_name = exports[0].options.name;
@@ -681,7 +686,9 @@ pub const Object = struct {
.LinkOnce => llvm_global.setLinkage(.LinkOnceODR),
}
if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
+ if (variable.data.is_threadlocal) {
+ llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
+ }
}
// If a Decl is exported more than one time (which is rare),
// we add aliases for all but the first export.
@@ -709,6 +716,14 @@ pub const Object = struct {
llvm_global.setValueName2(fqn.ptr, fqn.len);
llvm_global.setLinkage(.Internal);
llvm_global.setUnnamedAddr(.True);
+ if (decl.val.castTag(.variable)) |variable| {
+ const single_threaded = module.comp.bin_file.options.single_threaded;
+ if (variable.data.is_threadlocal and !single_threaded) {
+ llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
+ } else {
+ llvm_global.setThreadLocalMode(.NotThreadLocal);
+ }
+ }
}
}
@@ -937,19 +952,6 @@ pub const DeclGen = struct {
const llvm_global = dg.object.llvm_module.addGlobalInAddressSpace(llvm_type, fqn, llvm_addrspace);
gop.value_ptr.* = llvm_global;
- if (decl.isExtern()) {
- llvm_global.setValueName(decl.name);
- llvm_global.setUnnamedAddr(.False);
- llvm_global.setLinkage(.External);
- if (decl.val.castTag(.variable)) |variable| {
- if (variable.data.is_threadlocal) llvm_global.setThreadLocalMode(.GeneralDynamicTLSModel);
- if (variable.data.is_weak_linkage) llvm_global.setLinkage(.ExternalWeak);
- }
- } else {
- llvm_global.setLinkage(.Internal);
- llvm_global.setUnnamedAddr(.True);
- }
-
return llvm_global;
}
@@ -1033,8 +1035,8 @@ pub const DeclGen = struct {
const elem_ty = ptr_info.pointee_type;
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
.Opaque, .Fn => true,
- .Array => elem_ty.childType().hasRuntimeBits(),
- else => elem_ty.hasRuntimeBits(),
+ .Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(),
+ else => elem_ty.hasRuntimeBitsIgnoreComptime(),
};
const llvm_elem_ty = if (lower_elem_ty)
try dg.llvmType(elem_ty)
@@ -3158,7 +3160,6 @@ pub const FuncGen = struct {
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
- .call => try self.airCall(inst),
.cond_br => try self.airCondBr(inst),
.intcast => try self.airIntCast(inst),
.trunc => try self.airTrunc(inst),
@@ -3175,6 +3176,11 @@ pub const FuncGen = struct {
.slice_ptr => try self.airSliceField(inst, 0),
.slice_len => try self.airSliceField(inst, 1),
+ .call => try self.airCall(inst, .Auto),
+ .call_always_tail => try self.airCall(inst, .AlwaysTail),
+ .call_never_tail => try self.airCall(inst, .NeverTail),
+ .call_never_inline => try self.airCall(inst, .NeverInline),
+
.ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0),
.ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1),
@@ -3253,7 +3259,7 @@ pub const FuncGen = struct {
}
}
- fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*const llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
@@ -3298,7 +3304,7 @@ pub const FuncGen = struct {
llvm_args.items.ptr,
@intCast(c_uint, llvm_args.items.len),
toLlvmCallConv(zig_fn_ty.fnCallingConvention(), target),
- .Auto,
+ attr,
"",
);
@@ -4063,6 +4069,34 @@ pub const FuncGen = struct {
}
const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
+ // hackety hacks until stage2 has proper inline asm in the frontend.
+ var rendered_template = std.ArrayList(u8).init(self.gpa);
+ defer rendered_template.deinit();
+
+ const State = enum { start, percent };
+
+ var state: State = .start;
+
+ for (asm_source) |byte| {
+ switch (state) {
+ .start => switch (byte) {
+ '%' => state = .percent,
+ else => try rendered_template.append(byte),
+ },
+ .percent => switch (byte) {
+ '%' => {
+ try rendered_template.append('%');
+ state = .start;
+ },
+ else => {
+ try rendered_template.append('%');
+ try rendered_template.append(byte);
+ state = .start;
+ },
+ },
+ }
+ }
+
const ret_ty = self.air.typeOfIndex(inst);
const ret_llvm_ty = try self.dg.llvmType(ret_ty);
const llvm_fn_ty = llvm.functionType(
@@ -4073,8 +4107,8 @@ pub const FuncGen = struct {
);
const asm_fn = llvm.getInlineAsm(
llvm_fn_ty,
- asm_source.ptr,
- asm_source.len,
+ rendered_template.items.ptr,
+ rendered_template.items.len,
llvm_constraints.items.ptr,
llvm_constraints.items.len,
llvm.Bool.fromBool(is_volatile),
@@ -5206,7 +5240,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
- if (!pointee_type.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty);
+ if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.llvmType(pointee_type);
const alloca_inst = self.buildAlloca(pointee_llvm_ty);
@@ -5220,7 +5254,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
- if (!ret_ty.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty);
+ if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return self.dg.lowerPtrToVoid(ptr_ty);
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.llvmType(ret_ty);
const target = self.dg.module.getTarget();
@@ -5457,7 +5491,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
- if (!operand_ty.isFnOrHasRuntimeBits()) return null;
+ if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime()) return null;
var ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
@@ -6329,7 +6363,7 @@ pub const FuncGen = struct {
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value {
const info = ptr_ty.ptrInfo().data;
- if (!info.pointee_type.hasRuntimeBits()) return null;
+ if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null;
const target = self.dg.module.getTarget();
const ptr_alignment = ptr_ty.ptrAlignment(target);
@@ -6384,7 +6418,7 @@ pub const FuncGen = struct {
) void {
const info = ptr_ty.ptrInfo().data;
const elem_ty = info.pointee_type;
- if (!elem_ty.isFnOrHasRuntimeBits()) {
+ if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime()) {
return;
}
const target = self.dg.module.getTarget();