aboutsummaryrefslogtreecommitdiff
path: root/src/codegen/llvm.zig
diff options
context:
space:
mode:
Diffstat (limited to 'src/codegen/llvm.zig')
-rw-r--r--src/codegen/llvm.zig692
1 files changed, 346 insertions, 346 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index d20b6f2ab6..b2b811beba 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -316,7 +316,7 @@ pub fn supportsTailCall(target: std.Target) bool {
}
/// TODO can this be done with simpler logic / different API binding?
-fn deleteLlvmGlobal(llvm_global: *const llvm.Value) void {
+fn deleteLlvmGlobal(llvm_global: *llvm.Value) void {
if (llvm_global.globalGetValueType().getTypeKind() == .Function) {
llvm_global.deleteFunction();
return;
@@ -327,7 +327,7 @@ fn deleteLlvmGlobal(llvm_global: *const llvm.Value) void {
pub const Object = struct {
gpa: Allocator,
module: *Module,
- llvm_module: *const llvm.Module,
+ llvm_module: *llvm.Module,
di_builder: ?*llvm.DIBuilder,
/// One of these mappings:
/// - *Module.File => *DIFile
@@ -335,9 +335,9 @@ pub const Object = struct {
/// - *Module.Decl (Non-Fn) => *DIGlobalVariable
di_map: std.AutoHashMapUnmanaged(*const anyopaque, *llvm.DINode),
di_compile_unit: ?*llvm.DICompileUnit,
- context: *const llvm.Context,
- target_machine: *const llvm.TargetMachine,
- target_data: *const llvm.TargetData,
+ context: *llvm.Context,
+ target_machine: *llvm.TargetMachine,
+ target_data: *llvm.TargetData,
target: std.Target,
/// Ideally we would use `llvm_module.getNamedFunction` to go from *Decl to LLVM function,
/// but that has some downsides:
@@ -347,9 +347,9 @@ pub const Object = struct {
/// version of the name and incorrectly get function not found in the llvm module.
/// * it works for functions not all globals.
/// Therefore, this table keeps track of the mapping.
- decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
+ decl_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
/// Serves the same purpose as `decl_map` but only used for the `is_named_enum_value` instruction.
- named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *const llvm.Value),
+ named_enum_map: std.AutoHashMapUnmanaged(Module.Decl.Index, *llvm.Value),
/// Maps Zig types to LLVM types. The table memory itself is backed by the GPA of
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
/// TODO we need to remove entries from this map in response to incremental compilation
@@ -363,7 +363,7 @@ pub const Object = struct {
/// The LLVM global table which holds the names corresponding to Zig errors.
/// Note that the values are not added until flushModule, when all errors in
/// the compilation are known.
- error_name_table: ?*const llvm.Value,
+ error_name_table: ?*llvm.Value,
/// This map is usually very close to empty. It tracks only the cases when a
/// second extern Decl could not be emitted with the correct name due to a
/// name collision.
@@ -371,7 +371,7 @@ pub const Object = struct {
pub const TypeMap = std.HashMapUnmanaged(
Type,
- *const llvm.Type,
+ *llvm.Type,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
);
@@ -405,7 +405,7 @@ pub const Object = struct {
defer gpa.free(llvm_target_triple);
var error_message: [*:0]const u8 = undefined;
- var target: *const llvm.Target = undefined;
+ var target: *llvm.Target = undefined;
if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) {
defer llvm.disposeMessage(error_message);
@@ -578,7 +578,7 @@ pub const Object = struct {
const llvm_ptr_ty = self.context.intType(8).pointerType(0); // TODO: Address space
const llvm_usize_ty = self.context.intType(target.cpu.arch.ptrBitWidth());
- const type_fields = [_]*const llvm.Type{
+ const type_fields = [_]*llvm.Type{
llvm_ptr_ty,
llvm_usize_ty,
};
@@ -587,7 +587,7 @@ pub const Object = struct {
const slice_alignment = slice_ty.abiAlignment(target);
const error_name_list = mod.error_name_list.items;
- const llvm_errors = try mod.gpa.alloc(*const llvm.Value, error_name_list.len);
+ const llvm_errors = try mod.gpa.alloc(*llvm.Value, error_name_list.len);
defer mod.gpa.free(llvm_errors);
llvm_errors[0] = llvm_slice_ty.getUndef();
@@ -601,7 +601,7 @@ pub const Object = struct {
str_global.setUnnamedAddr(.True);
str_global.setAlignment(1);
- const slice_fields = [_]*const llvm.Value{
+ const slice_fields = [_]*llvm.Value{
str_global.constBitCast(llvm_ptr_ty),
llvm_usize_ty.constInt(name.len, .False),
};
@@ -911,7 +911,7 @@ pub const Object = struct {
// This is the list of args we will use that correspond directly to the AIR arg
// instructions. Depending on the calling convention, this list is not necessarily
// a bijection with the actual LLVM parameters of the function.
- var args = std.ArrayList(*const llvm.Value).init(gpa);
+ var args = std.ArrayList(*llvm.Value).init(gpa);
defer args.deinit();
{
@@ -1028,7 +1028,7 @@ pub const Object = struct {
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
arg_ptr.setAlignment(param_alignment);
- var field_types_buf: [8]*const llvm.Type = undefined;
+ var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_ints.len];
for (llvm_ints) |int_bits, i| {
field_types[i] = dg.context.intType(int_bits);
@@ -1059,7 +1059,7 @@ pub const Object = struct {
const param_alignment = param_ty.abiAlignment(target);
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
arg_ptr.setAlignment(param_alignment);
- var field_types_buf: [8]*const llvm.Type = undefined;
+ var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_floats.len];
for (llvm_floats) |float_bits, i| {
switch (float_bits) {
@@ -1215,7 +1215,7 @@ pub const Object = struct {
/// TODO replace this with a call to `Module::getNamedValue`. This will require adding
/// a new wrapper in zig_llvm.h/zig_llvm.cpp.
- fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*const llvm.Value {
+ fn getLlvmGlobal(o: Object, name: [*:0]const u8) ?*llvm.Value {
if (o.llvm_module.getNamedFunction(name)) |x| return x;
if (o.llvm_module.getNamedGlobal(name)) |x| return x;
return null;
@@ -2338,7 +2338,7 @@ pub const Object = struct {
};
pub const DeclGen = struct {
- context: *const llvm.Context,
+ context: *llvm.Context,
object: *Object,
module: *Module,
decl: *Module.Decl,
@@ -2354,7 +2354,7 @@ pub const DeclGen = struct {
return error.CodegenFail;
}
- fn llvmModule(self: *DeclGen) *const llvm.Module {
+ fn llvmModule(self: *DeclGen) *llvm.Module {
return self.object.llvm_module;
}
@@ -2444,7 +2444,7 @@ pub const DeclGen = struct {
/// If the llvm function does not exist, create it.
/// Note that this can be called before the function's semantic analysis has
/// completed, so if any attributes rely on that, they must be done in updateFunc, not here.
- fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*const llvm.Value {
+ fn resolveLlvmFunction(dg: *DeclGen, decl_index: Module.Decl.Index) !*llvm.Value {
const decl = dg.module.declPtr(decl_index);
const zig_fn_type = decl.ty;
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
@@ -2559,7 +2559,7 @@ pub const DeclGen = struct {
return llvm_fn;
}
- fn addCommonFnAttributes(dg: *DeclGen, llvm_fn: *const llvm.Value) void {
+ fn addCommonFnAttributes(dg: *DeclGen, llvm_fn: *llvm.Value) void {
const comp = dg.module.comp;
if (!comp.bin_file.options.red_zone) {
@@ -2599,7 +2599,7 @@ pub const DeclGen = struct {
}
}
- fn resolveGlobalDecl(dg: *DeclGen, decl_index: Module.Decl.Index) Error!*const llvm.Value {
+ fn resolveGlobalDecl(dg: *DeclGen, decl_index: Module.Decl.Index) Error!*llvm.Value {
const gop = try dg.object.decl_map.getOrPut(dg.gpa, decl_index);
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(dg.object.decl_map.remove(decl_index));
@@ -2661,7 +2661,7 @@ pub const DeclGen = struct {
};
}
- fn isUnnamedType(dg: *DeclGen, ty: Type, val: *const llvm.Value) bool {
+ fn isUnnamedType(dg: *DeclGen, ty: Type, val: *llvm.Value) bool {
// Once `lowerType` succeeds, successive calls to it with the same Zig type
// are guaranteed to succeed. So if a call to `lowerType` fails here it means
// it is the first time lowering the type, which means the value can't possible
@@ -2670,7 +2670,7 @@ pub const DeclGen = struct {
return val.typeOf() != llvm_ty;
}
- fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
+ fn lowerType(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const llvm_ty = try lowerTypeInner(dg, t);
if (std.debug.runtime_safety and false) check: {
if (t.zigTypeTag() == .Opaque) break :check;
@@ -2688,7 +2688,7 @@ pub const DeclGen = struct {
return llvm_ty;
}
- fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*const llvm.Type {
+ fn lowerTypeInner(dg: *DeclGen, t: Type) Allocator.Error!*llvm.Type {
const gpa = dg.gpa;
const target = dg.module.getTarget();
switch (t.zigTypeTag()) {
@@ -2719,7 +2719,7 @@ pub const DeclGen = struct {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_type = t.slicePtrFieldType(&buf);
- const fields: [2]*const llvm.Type = .{
+ const fields: [2]*llvm.Type = .{
try dg.lowerType(ptr_type),
try dg.lowerType(Type.usize),
};
@@ -2776,7 +2776,7 @@ pub const DeclGen = struct {
}
comptime assert(optional_layout_version == 3);
- var fields_buf: [3]*const llvm.Type = .{
+ var fields_buf: [3]*llvm.Type = .{
payload_llvm_ty, dg.context.intType(8), undefined,
};
const offset = child_ty.abiSize(target) + 1;
@@ -2802,7 +2802,7 @@ pub const DeclGen = struct {
const payload_size = payload_ty.abiSize(target);
const error_size = Type.anyerror.abiSize(target);
- var fields_buf: [3]*const llvm.Type = undefined;
+ var fields_buf: [3]*llvm.Type = undefined;
if (error_align > payload_align) {
fields_buf[0] = llvm_error_type;
fields_buf[1] = llvm_payload_type;
@@ -2845,7 +2845,7 @@ pub const DeclGen = struct {
const llvm_struct_ty = dg.context.structCreateNamed("");
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
- var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{};
+ var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
defer llvm_field_types.deinit(gpa);
try llvm_field_types.ensureUnusedCapacity(gpa, tuple.types.len);
@@ -2908,7 +2908,7 @@ pub const DeclGen = struct {
assert(struct_obj.haveFieldTypes());
- var llvm_field_types: std.ArrayListUnmanaged(*const llvm.Type) = .{};
+ var llvm_field_types: std.ArrayListUnmanaged(*llvm.Type) = .{};
defer llvm_field_types.deinit(gpa);
try llvm_field_types.ensureUnusedCapacity(gpa, struct_obj.fields.count());
@@ -2991,7 +2991,7 @@ pub const DeclGen = struct {
@intCast(c_uint, layout.abi_size - layout.most_aligned_field_size)
else
@intCast(c_uint, layout.payload_size - layout.most_aligned_field_size);
- const fields: [2]*const llvm.Type = .{
+ const fields: [2]*llvm.Type = .{
llvm_aligned_field_ty,
dg.context.intType(8).arrayType(padding_len),
};
@@ -2999,7 +2999,7 @@ pub const DeclGen = struct {
};
if (layout.tag_size == 0) {
- var llvm_fields: [1]*const llvm.Type = .{llvm_payload_ty};
+ var llvm_fields: [1]*llvm.Type = .{llvm_payload_ty};
llvm_union_ty.structSetBody(&llvm_fields, llvm_fields.len, .False);
return llvm_union_ty;
}
@@ -3007,7 +3007,7 @@ pub const DeclGen = struct {
// Put the tag before or after the payload depending on which one's
// alignment is greater.
- var llvm_fields: [3]*const llvm.Type = undefined;
+ var llvm_fields: [3]*llvm.Type = undefined;
var llvm_fields_len: c_uint = 2;
if (layout.tag_align >= layout.payload_align) {
@@ -3040,12 +3040,12 @@ pub const DeclGen = struct {
}
}
- fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*const llvm.Type {
+ fn lowerTypeFn(dg: *DeclGen, fn_ty: Type) Allocator.Error!*llvm.Type {
const target = dg.module.getTarget();
const fn_info = fn_ty.fnInfo();
const llvm_ret_ty = try lowerFnRetTy(dg, fn_info);
- var llvm_params = std.ArrayList(*const llvm.Type).init(dg.gpa);
+ var llvm_params = std.ArrayList(*llvm.Type).init(dg.gpa);
defer llvm_params.deinit();
if (firstParamSRet(fn_info, target)) {
@@ -3135,7 +3135,7 @@ pub const DeclGen = struct {
/// Use this instead of lowerType when you want to handle correctly the case of elem_ty
/// being a zero bit type, but it should still be lowered as an i8 in such case.
/// There are other similar cases handled here as well.
- fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*const llvm.Type {
+ fn lowerPtrElemTy(dg: *DeclGen, elem_ty: Type) Allocator.Error!*llvm.Type {
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
.Opaque, .Fn => true,
.Array => elem_ty.childType().hasRuntimeBitsIgnoreComptime(),
@@ -3149,7 +3149,7 @@ pub const DeclGen = struct {
return llvm_elem_ty;
}
- fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value {
+ fn lowerValue(dg: *DeclGen, tv: TypedValue) Error!*llvm.Value {
if (tv.val.isUndef()) {
const llvm_type = try dg.lowerType(tv.ty);
return llvm_type.getUndef();
@@ -3264,7 +3264,7 @@ pub const DeclGen = struct {
.slice => {
const slice = tv.val.castTag(.slice).?.data;
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const fields: [2]*const llvm.Value = .{
+ const fields: [2]*llvm.Value = .{
try dg.lowerValue(.{
.ty = tv.ty.slicePtrFieldType(&buf),
.val = slice.ptr,
@@ -3336,7 +3336,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const gpa = dg.gpa;
const len = @intCast(usize, tv.ty.arrayLenIncludingSentinel());
- const llvm_elems = try gpa.alloc(*const llvm.Value, len);
+ const llvm_elems = try gpa.alloc(*llvm.Value, len);
defer gpa.free(llvm_elems);
var need_unnamed = false;
for (elem_vals[0..len]) |elem_val, i| {
@@ -3364,7 +3364,7 @@ pub const DeclGen = struct {
const len = @intCast(usize, tv.ty.arrayLen());
const len_including_sent = len + @boolToInt(sentinel != null);
const gpa = dg.gpa;
- const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent);
+ const llvm_elems = try gpa.alloc(*llvm.Value, len_including_sent);
defer gpa.free(llvm_elems);
var need_unnamed = false;
@@ -3398,7 +3398,7 @@ pub const DeclGen = struct {
const elem_ty = tv.ty.elemType();
const sent_val = tv.ty.sentinel().?;
const sentinel = try dg.lowerValue(.{ .ty = elem_ty, .val = sent_val });
- const llvm_elems: [1]*const llvm.Value = .{sentinel};
+ const llvm_elems: [1]*llvm.Value = .{sentinel};
const need_unnamed = dg.isUnnamedType(elem_ty, llvm_elems[0]);
if (need_unnamed) {
return dg.context.constStruct(&llvm_elems, llvm_elems.len, .True);
@@ -3433,7 +3433,7 @@ pub const DeclGen = struct {
assert(payload_ty.zigTypeTag() != .Fn);
const llvm_field_count = llvm_ty.countStructElementTypes();
- var fields_buf: [3]*const llvm.Value = undefined;
+ var fields_buf: [3]*llvm.Value = undefined;
fields_buf[0] = try dg.lowerValue(.{
.ty = payload_ty,
.val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
@@ -3489,7 +3489,7 @@ pub const DeclGen = struct {
.ty = payload_type,
.val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef),
});
- var fields_buf: [3]*const llvm.Value = undefined;
+ var fields_buf: [3]*llvm.Value = undefined;
const llvm_ty = try dg.lowerType(tv.ty);
const llvm_field_count = llvm_ty.countStructElementTypes();
@@ -3515,7 +3515,7 @@ pub const DeclGen = struct {
if (tv.ty.isTupleOrAnonStruct()) {
const tuple = tv.ty.tupleFields();
- var llvm_fields: std.ArrayListUnmanaged(*const llvm.Value) = .{};
+ var llvm_fields: std.ArrayListUnmanaged(*llvm.Value) = .{};
defer llvm_fields.deinit(gpa);
try llvm_fields.ensureUnusedCapacity(gpa, tuple.types.len);
@@ -3584,7 +3584,7 @@ pub const DeclGen = struct {
const int_llvm_ty = dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *const llvm.Value = int_llvm_ty.constNull();
+ var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
for (field_vals) |field_val, i| {
const field = fields[i];
@@ -3613,7 +3613,7 @@ pub const DeclGen = struct {
}
const llvm_field_count = llvm_struct_ty.countStructElementTypes();
- var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, llvm_field_count);
+ var llvm_fields = try std.ArrayListUnmanaged(*llvm.Value).initCapacity(gpa, llvm_field_count);
defer llvm_fields.deinit(gpa);
comptime assert(struct_layout_version == 2);
@@ -3706,14 +3706,14 @@ pub const DeclGen = struct {
break :p field;
}
const padding_len = @intCast(c_uint, layout.payload_size - field_size);
- const fields: [2]*const llvm.Value = .{
+ const fields: [2]*llvm.Value = .{
field, dg.context.intType(8).arrayType(padding_len).getUndef(),
};
break :p dg.context.constStruct(&fields, fields.len, .True);
};
if (layout.tag_size == 0) {
- const fields: [1]*const llvm.Value = .{payload};
+ const fields: [1]*llvm.Value = .{payload};
if (need_unnamed) {
return dg.context.constStruct(&fields, fields.len, .False);
} else {
@@ -3724,7 +3724,7 @@ pub const DeclGen = struct {
.ty = tv.ty.unionTagTypeSafety().?,
.val = tag_and_val.tag,
});
- var fields: [3]*const llvm.Value = undefined;
+ var fields: [3]*llvm.Value = undefined;
var fields_len: c_uint = 2;
if (layout.tag_align >= layout.payload_align) {
fields = .{ llvm_tag_value, payload, undefined };
@@ -3749,7 +3749,7 @@ pub const DeclGen = struct {
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
const elem_ty = tv.ty.elemType();
- const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len);
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem, i| {
var byte_payload: Value.Payload.U64 = .{
@@ -3774,7 +3774,7 @@ pub const DeclGen = struct {
const vector_len = @intCast(usize, tv.ty.arrayLen());
assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
const elem_ty = tv.ty.elemType();
- const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len);
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, vector_len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem, i| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = elem_vals[i] });
@@ -3789,7 +3789,7 @@ pub const DeclGen = struct {
const val = tv.val.castTag(.repeated).?.data;
const elem_ty = tv.ty.elemType();
const len = @intCast(usize, tv.ty.arrayLen());
- const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len);
+ const llvm_elems = try dg.gpa.alloc(*llvm.Value, len);
defer dg.gpa.free(llvm_elems);
for (llvm_elems) |*elem| {
elem.* = try dg.lowerValue(.{ .ty = elem_ty, .val = val });
@@ -3821,7 +3821,7 @@ pub const DeclGen = struct {
const ParentPtr = struct {
ty: Type,
- llvm_ptr: *const llvm.Value,
+ llvm_ptr: *llvm.Value,
};
fn lowerParentPtrDecl(
@@ -3829,7 +3829,7 @@ pub const DeclGen = struct {
ptr_val: Value,
decl_index: Module.Decl.Index,
ptr_child_ty: Type,
- ) Error!*const llvm.Value {
+ ) Error!*llvm.Value {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
var ptr_ty_payload: Type.Payload.ElemType = .{
@@ -3846,7 +3846,7 @@ pub const DeclGen = struct {
}
}
- fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*const llvm.Value {
+ fn lowerParentPtr(dg: *DeclGen, ptr_val: Value, ptr_child_ty: Type) Error!*llvm.Value {
const target = dg.module.getTarget();
var bitcast_needed: bool = undefined;
const llvm_ptr = switch (ptr_val.tag()) {
@@ -3895,7 +3895,7 @@ pub const DeclGen = struct {
0
else
@boolToInt(layout.tag_align >= layout.payload_align);
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_pl_index, .False),
};
@@ -3926,7 +3926,7 @@ pub const DeclGen = struct {
var ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(parent_ty, field_index, target, &ty_buf).?;
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(llvm_field_index, .False),
};
@@ -3942,7 +3942,7 @@ pub const DeclGen = struct {
bitcast_needed = !elem_ptr.elem_ty.eql(ptr_child_ty, dg.module);
const llvm_usize = try dg.lowerType(Type.usize);
- const indices: [1]*const llvm.Value = .{
+ const indices: [1]*llvm.Value = .{
llvm_usize.constInt(elem_ptr.index, .False),
};
const elem_llvm_ty = try dg.lowerType(elem_ptr.elem_ty);
@@ -3965,7 +3965,7 @@ pub const DeclGen = struct {
}
const llvm_u32 = dg.context.intType(32);
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(0, .False),
};
@@ -3987,7 +3987,7 @@ pub const DeclGen = struct {
const payload_offset: u8 = if (payload_ty.abiAlignment(target) > Type.anyerror.abiSize(target)) 2 else 1;
const llvm_u32 = dg.context.intType(32);
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_u32.constInt(0, .False),
llvm_u32.constInt(payload_offset, .False),
};
@@ -4007,7 +4007,7 @@ pub const DeclGen = struct {
self: *DeclGen,
tv: TypedValue,
decl_index: Module.Decl.Index,
- ) Error!*const llvm.Value {
+ ) Error!*llvm.Value {
if (tv.ty.isSlice()) {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = tv.ty.slicePtrFieldType(&buf);
@@ -4015,7 +4015,7 @@ pub const DeclGen = struct {
.base = .{ .tag = .int_u64 },
.data = tv.val.sliceLen(self.module),
};
- const fields: [2]*const llvm.Value = .{
+ const fields: [2]*llvm.Value = .{
try self.lowerValue(.{
.ty = ptr_ty,
.val = tv.val,
@@ -4060,7 +4060,7 @@ pub const DeclGen = struct {
}
}
- fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*const llvm.Value {
+ fn lowerPtrToVoid(dg: *DeclGen, ptr_ty: Type) !*llvm.Value {
const alignment = ptr_ty.ptrInfo().data.@"align";
// Even though we are pointing at something which has zero bits (e.g. `void`),
// Pointers are defined to have bits. So we must return something here.
@@ -4086,19 +4086,19 @@ pub const DeclGen = struct {
return int.constIntToPtr(llvm_ptr_ty);
}
- fn addAttr(dg: DeclGen, val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ fn addAttr(dg: DeclGen, val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
return dg.addAttrInt(val, index, name, 0);
}
- fn addArgAttr(dg: DeclGen, fn_val: *const llvm.Value, param_index: u32, attr_name: []const u8) void {
+ fn addArgAttr(dg: DeclGen, fn_val: *llvm.Value, param_index: u32, attr_name: []const u8) void {
return dg.addAttr(fn_val, param_index + 1, attr_name);
}
- fn addArgAttrInt(dg: DeclGen, fn_val: *const llvm.Value, param_index: u32, attr_name: []const u8, int: u64) void {
+ fn addArgAttrInt(dg: DeclGen, fn_val: *llvm.Value, param_index: u32, attr_name: []const u8, int: u64) void {
return dg.addAttrInt(fn_val, param_index + 1, attr_name, int);
}
- fn removeAttr(val: *const llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
+ fn removeAttr(val: *llvm.Value, index: llvm.AttributeIndex, name: []const u8) void {
const kind_id = llvm.getEnumAttributeKindForName(name.ptr, name.len);
assert(kind_id != 0);
val.removeEnumAttributeAtIndex(index, kind_id);
@@ -4106,7 +4106,7 @@ pub const DeclGen = struct {
fn addAttrInt(
dg: DeclGen,
- val: *const llvm.Value,
+ val: *llvm.Value,
index: llvm.AttributeIndex,
name: []const u8,
int: u64,
@@ -4119,7 +4119,7 @@ pub const DeclGen = struct {
fn addAttrString(
dg: *DeclGen,
- val: *const llvm.Value,
+ val: *llvm.Value,
index: llvm.AttributeIndex,
name: []const u8,
value: []const u8,
@@ -4133,19 +4133,19 @@ pub const DeclGen = struct {
val.addAttributeAtIndex(index, llvm_attr);
}
- fn addFnAttr(dg: DeclGen, val: *const llvm.Value, name: []const u8) void {
+ fn addFnAttr(dg: DeclGen, val: *llvm.Value, name: []const u8) void {
dg.addAttr(val, std.math.maxInt(llvm.AttributeIndex), name);
}
- fn addFnAttrString(dg: *DeclGen, val: *const llvm.Value, name: []const u8, value: []const u8) void {
+ fn addFnAttrString(dg: *DeclGen, val: *llvm.Value, name: []const u8, value: []const u8) void {
dg.addAttrString(val, std.math.maxInt(llvm.AttributeIndex), name, value);
}
- fn removeFnAttr(fn_val: *const llvm.Value, name: []const u8) void {
+ fn removeFnAttr(fn_val: *llvm.Value, name: []const u8) void {
removeAttr(fn_val, std.math.maxInt(llvm.AttributeIndex), name);
}
- fn addFnAttrInt(dg: DeclGen, fn_val: *const llvm.Value, name: []const u8, int: u64) void {
+ fn addFnAttrInt(dg: DeclGen, fn_val: *llvm.Value, name: []const u8, int: u64) void {
return dg.addAttrInt(fn_val, std.math.maxInt(llvm.AttributeIndex), name, int);
}
@@ -4153,7 +4153,7 @@ pub const DeclGen = struct {
/// widen it before using it and then truncate the result.
/// RMW exchange of floating-point values is bitcasted to same-sized integer
/// types to work around a LLVM deficiency when targeting ARM/AArch64.
- fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*const llvm.Type {
+ fn getAtomicAbiType(dg: *DeclGen, ty: Type, is_rmw_xchg: bool) ?*llvm.Type {
const target = dg.module.getTarget();
var buffer: Type.Payload.Bits = undefined;
const int_ty = switch (ty.zigTypeTag()) {
@@ -4176,7 +4176,7 @@ pub const DeclGen = struct {
fn addByValParamAttrs(
dg: DeclGen,
- llvm_fn: *const llvm.Value,
+ llvm_fn: *llvm.Value,
param_ty: Type,
param_index: u32,
fn_info: Type.Payload.Function.Data,
@@ -4213,11 +4213,11 @@ pub const DeclGen = struct {
fn addByRefParamAttrs(
dg: DeclGen,
- llvm_fn: *const llvm.Value,
+ llvm_fn: *llvm.Value,
llvm_arg_i: u32,
alignment: u32,
byval_attr: bool,
- param_llvm_ty: *const llvm.Type,
+ param_llvm_ty: *llvm.Type,
) void {
dg.addArgAttr(llvm_fn, llvm_arg_i, "nonnull");
dg.addArgAttr(llvm_fn, llvm_arg_i, "readonly");
@@ -4233,8 +4233,8 @@ pub const FuncGen = struct {
dg: *DeclGen,
air: Air,
liveness: Liveness,
- context: *const llvm.Context,
- builder: *const llvm.Builder,
+ context: *llvm.Context,
+ builder: *llvm.Builder,
di_scope: ?*llvm.DIScope,
di_file: ?*llvm.DIFile,
base_line: u32,
@@ -4250,30 +4250,30 @@ pub const FuncGen = struct {
/// This stores the LLVM values used in a function, such that they can be referred to
/// in other instructions. This table is cleared before every function is generated.
- func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *const llvm.Value),
+ func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Ref, *llvm.Value),
/// If the return type is sret, this is the result pointer. Otherwise null.
/// Note that this can disagree with isByRef for the return type in the case
/// of C ABI functions.
- ret_ptr: ?*const llvm.Value,
+ ret_ptr: ?*llvm.Value,
/// Any function that needs to perform Valgrind client requests needs an array alloca
/// instruction, however a maximum of one per function is needed.
- valgrind_client_request_array: ?*const llvm.Value = null,
+ valgrind_client_request_array: ?*llvm.Value = null,
/// These fields are used to refer to the LLVM value of the function parameters
/// in an Arg instruction.
/// This list may be shorter than the list according to the zig type system;
/// it omits 0-bit types. If the function uses sret as the first parameter,
/// this slice does not include it.
- args: []const *const llvm.Value,
+ args: []const *llvm.Value,
arg_index: c_uint,
- llvm_func: *const llvm.Value,
+ llvm_func: *llvm.Value,
- err_ret_trace: ?*const llvm.Value = null,
+ err_ret_trace: ?*llvm.Value = null,
/// This data structure is used to implement breaking to blocks.
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
- parent_bb: *const llvm.BasicBlock,
+ parent_bb: *llvm.BasicBlock,
breaks: *BreakList,
}),
@@ -4281,8 +4281,8 @@ pub const FuncGen = struct {
const DbgState = struct { loc: *llvm.DILocation, scope: *llvm.DIScope, base_line: u32 };
const BreakList = std.MultiArrayList(struct {
- bb: *const llvm.BasicBlock,
- val: *const llvm.Value,
+ bb: *llvm.BasicBlock,
+ val: *llvm.Value,
});
fn deinit(self: *FuncGen) void {
@@ -4298,11 +4298,11 @@ pub const FuncGen = struct {
return self.dg.todo(format, args);
}
- fn llvmModule(self: *FuncGen) *const llvm.Module {
+ fn llvmModule(self: *FuncGen) *llvm.Module {
return self.dg.object.llvm_module;
}
- fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value {
+ fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*llvm.Value {
const gop = try self.func_inst_table.getOrPut(self.dg.gpa, inst);
if (gop.found_existing) return gop.value_ptr.*;
@@ -4336,7 +4336,7 @@ pub const FuncGen = struct {
fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
const air_tags = self.air.instructions.items(.tag);
for (body) |inst, i| {
- const opt_value: ?*const llvm.Value = switch (air_tags[inst]) {
+ const opt_value: ?*llvm.Value = switch (air_tags[inst]) {
// zig fmt: off
.add => try self.airAdd(inst, false),
.addwrap => try self.airAddWrap(inst, false),
@@ -4563,7 +4563,7 @@ pub const FuncGen = struct {
}
}
- fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*const llvm.Value {
+ fn airCall(self: *FuncGen, inst: Air.Inst.Index, attr: llvm.CallAttr) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
@@ -4579,7 +4579,7 @@ pub const FuncGen = struct {
const target = self.dg.module.getTarget();
const sret = firstParamSRet(fn_info, target);
- var llvm_args = std.ArrayList(*const llvm.Value).init(self.gpa);
+ var llvm_args = std.ArrayList(*llvm.Value).init(self.gpa);
defer llvm_args.deinit();
const ret_ptr = if (!sret) null else blk: {
@@ -4693,7 +4693,7 @@ pub const FuncGen = struct {
break :p p;
};
- var field_types_buf: [8]*const llvm.Type = undefined;
+ var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_ints.len];
for (llvm_ints) |int_bits, i| {
field_types[i] = self.dg.context.intType(int_bits);
@@ -4722,7 +4722,7 @@ pub const FuncGen = struct {
break :p p;
};
- var field_types_buf: [8]*const llvm.Type = undefined;
+ var field_types_buf: [8]*llvm.Type = undefined;
const field_types = field_types_buf[0..llvm_floats.len];
for (llvm_floats) |float_bits, i| {
switch (float_bits) {
@@ -4840,7 +4840,7 @@ pub const FuncGen = struct {
}
}
- fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ret_ty = self.air.typeOf(un_op);
if (self.ret_ptr) |ret_ptr| {
@@ -4901,7 +4901,7 @@ pub const FuncGen = struct {
return null;
}
- fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
@@ -4936,7 +4936,7 @@ pub const FuncGen = struct {
return null;
}
- fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*const llvm.Value {
+ fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -4948,7 +4948,7 @@ pub const FuncGen = struct {
return self.cmp(lhs, rhs, operand_ty, op);
}
- fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airCmpVector(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -4963,23 +4963,23 @@ pub const FuncGen = struct {
return self.cmp(lhs, rhs, vec_ty, cmp_op);
}
- fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airCmpLtErrorsLen(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const llvm_fn = try self.getCmpLtErrorsLenFunction();
- const args: [1]*const llvm.Value = .{operand};
+ const args: [1]*llvm.Value = .{operand};
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
}
fn cmp(
self: *FuncGen,
- lhs: *const llvm.Value,
- rhs: *const llvm.Value,
+ lhs: *llvm.Value,
+ rhs: *llvm.Value,
operand_ty: Type,
op: math.CompareOperator,
- ) Allocator.Error!*const llvm.Value {
+ ) Allocator.Error!*llvm.Value {
var int_buffer: Type.Payload.Bits = undefined;
var opt_buffer: Type.Payload.ElemType = undefined;
@@ -5029,7 +5029,7 @@ pub const FuncGen = struct {
const both_pl_block_end = self.builder.getInsertBlock();
self.builder.positionBuilderAtEnd(end_block);
- const incoming_blocks: [3]*const llvm.BasicBlock = .{
+ const incoming_blocks: [3]*llvm.BasicBlock = .{
both_null_block,
mixed_block,
both_pl_block_end,
@@ -5037,7 +5037,7 @@ pub const FuncGen = struct {
const llvm_i1 = self.context.intType(1);
const llvm_i1_0 = llvm_i1.constInt(0, .False);
const llvm_i1_1 = llvm_i1.constInt(1, .False);
- const incoming_values: [3]*const llvm.Value = .{
+ const incoming_values: [3]*llvm.Value = .{
switch (op) {
.eq => llvm_i1_1,
.neq => llvm_i1_0,
@@ -5075,7 +5075,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(operation, lhs, rhs, "");
}
- fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[extra.end..][0..extra.data.body_len];
@@ -5127,7 +5127,7 @@ pub const FuncGen = struct {
return phi_node;
}
- fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const branch = self.air.instructions.items(.data)[inst].br;
const block = self.blocks.get(branch.block_inst).?;
@@ -5147,7 +5147,7 @@ pub const FuncGen = struct {
return null;
}
- fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
@@ -5168,7 +5168,7 @@ pub const FuncGen = struct {
return null;
}
- fn airTry(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airTry(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
@@ -5178,7 +5178,7 @@ pub const FuncGen = struct {
return lowerTry(self, err_union, body, err_union_ty, false, result_ty);
}
- fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
@@ -5190,12 +5190,12 @@ pub const FuncGen = struct {
fn lowerTry(
fg: *FuncGen,
- err_union: *const llvm.Value,
+ err_union: *llvm.Value,
body: []const Air.Inst.Index,
err_union_ty: Type,
operand_is_ptr: bool,
result_ty: Type,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
const payload_ty = err_union_ty.errorUnionPayload();
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
const target = fg.dg.module.getTarget();
@@ -5256,7 +5256,7 @@ pub const FuncGen = struct {
return fg.builder.buildExtractValue(err_union, offset, "");
}
- fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
@@ -5305,7 +5305,7 @@ pub const FuncGen = struct {
return null;
}
- fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
@@ -5327,7 +5327,7 @@ pub const FuncGen = struct {
return null;
}
- fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -5341,7 +5341,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
}
const operand = try self.resolveInst(ty_op.operand);
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_usize.constNull(), llvm_usize.constNull(),
};
const array_llvm_ty = try self.dg.lowerType(array_ty);
@@ -5350,7 +5350,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, len, 1, "");
}
- fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airIntToFloat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -5394,22 +5394,22 @@ pub const FuncGen = struct {
compiler_rt_dest_abbrev,
}) catch unreachable;
- var param_types = [1]*const llvm.Type{rt_int_ty};
+ var param_types = [1]*llvm.Type{rt_int_ty};
if (rt_int_bits == 128 and (target.os.tag == .windows and target.cpu.arch == .x86_64)) {
// On Windows x86-64, "ti" functions must use Vector(2, u64) instead of the standard
// i128 calling convention to adhere to the ABI that LLVM expects compiler-rt to have.
const v2i64 = self.context.intType(64).vectorType(2);
extended = self.builder.buildBitCast(extended, v2i64, "");
- param_types = [1]*const llvm.Type{v2i64};
+ param_types = [1]*llvm.Type{v2i64};
}
const libc_fn = self.getLibcFunction(fn_name, &param_types, dest_llvm_ty);
- const params = [1]*const llvm.Value{extended};
+ const params = [1]*llvm.Value{extended};
return self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, &params, params.len, .C, .Auto, "");
}
- fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airFloatToInt(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -5457,9 +5457,9 @@ pub const FuncGen = struct {
}) catch unreachable;
const operand_llvm_ty = try self.dg.lowerType(operand_ty);
- const param_types = [1]*const llvm.Type{operand_llvm_ty};
+ const param_types = [1]*llvm.Type{operand_llvm_ty};
const libc_fn = self.getLibcFunction(fn_name, &param_types, libc_ret_ty);
- const params = [1]*const llvm.Value{operand};
+ const params = [1]*llvm.Value{operand};
var result = self.builder.buildCall(libc_fn.globalGetValueType(), libc_fn, &params, params.len, .C, .Auto, "");
@@ -5468,7 +5468,7 @@ pub const FuncGen = struct {
return result;
}
- fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
+ fn airSliceField(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -5476,7 +5476,7 @@ pub const FuncGen = struct {
return self.builder.buildExtractValue(operand, index, "");
}
- fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*const llvm.Value {
+ fn airPtrSliceFieldPtr(self: *FuncGen, inst: Air.Inst.Index, index: c_uint) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -5487,7 +5487,7 @@ pub const FuncGen = struct {
return self.builder.buildStructGEP(slice_llvm_ty, slice_ptr, index, "");
}
- fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const slice_ty = self.air.typeOf(bin_op.lhs);
if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
@@ -5496,12 +5496,12 @@ pub const FuncGen = struct {
const index = try self.resolveInst(bin_op.rhs);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType());
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*const llvm.Value = .{index};
+ const indices: [1]*llvm.Value = .{index};
const ptr = self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
return self.load(ptr, slice_ty);
}
- fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSliceElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
@@ -5511,11 +5511,11 @@ pub const FuncGen = struct {
const index = try self.resolveInst(bin_op.rhs);
const llvm_elem_ty = try self.dg.lowerPtrElemTy(slice_ty.childType());
const base_ptr = self.builder.buildExtractValue(slice, 0, "");
- const indices: [1]*const llvm.Value = .{index};
+ const indices: [1]*llvm.Value = .{index};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
}
- fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -5524,7 +5524,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs);
if (isByRef(array_ty)) {
const array_llvm_ty = try self.dg.lowerType(array_ty);
- const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs };
+ const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, array_llvm_val, &indices, indices.len, "");
const elem_ty = array_ty.childType();
if (isByRef(elem_ty)) {
@@ -5539,7 +5539,7 @@ pub const FuncGen = struct {
return self.builder.buildExtractElement(array_llvm_val, rhs, "");
}
- fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) return null;
@@ -5550,16 +5550,16 @@ pub const FuncGen = struct {
// TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
const ptr = if (ptr_ty.isSinglePointer()) ptr: {
// If this is a single-item pointer to an array, we need another index in the GEP.
- const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs };
+ const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
} else ptr: {
- const indices: [1]*const llvm.Value = .{rhs};
+ const indices: [1]*llvm.Value = .{rhs};
break :ptr self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
};
return self.load(ptr, ptr_ty);
}
- fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPtrElemPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5573,15 +5573,15 @@ pub const FuncGen = struct {
const llvm_elem_ty = try self.dg.lowerPtrElemTy(elem_ty);
if (ptr_ty.isSinglePointer()) {
// If this is a single-item pointer to an array, we need another index in the GEP.
- const indices: [2]*const llvm.Value = .{ self.context.intType(32).constNull(), rhs };
+ const indices: [2]*llvm.Value = .{ self.context.intType(32).constNull(), rhs };
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
} else {
- const indices: [1]*const llvm.Value = .{rhs};
+ const indices: [1]*llvm.Value = .{rhs};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
}
}
- fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airStructFieldPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -5596,7 +5596,7 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
field_index: u32,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -5605,7 +5605,7 @@ pub const FuncGen = struct {
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
}
- fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5683,7 +5683,7 @@ pub const FuncGen = struct {
}
}
- fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFieldParentPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5706,7 +5706,7 @@ pub const FuncGen = struct {
return self.builder.buildIntToPtr(base_ptr_int, res_ty, "");
}
- fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -5716,13 +5716,13 @@ pub const FuncGen = struct {
return self.builder.buildNot(operand, "");
}
- fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value {
+ fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
_ = inst;
_ = self.builder.buildUnreachable();
return null;
}
- fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value {
+ fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) ?*llvm.Value {
const di_scope = self.di_scope orelse return null;
const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
self.prev_dbg_line = @intCast(c_uint, self.base_line + dbg_stmt.line + 1);
@@ -5735,7 +5735,7 @@ pub const FuncGen = struct {
return null;
}
- fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airDbgInlineBegin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const dib = self.dg.object.di_builder orelse return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5778,7 +5778,7 @@ pub const FuncGen = struct {
return null;
}
- fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airDbgInlineEnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.dg.object.di_builder == null) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -5793,7 +5793,7 @@ pub const FuncGen = struct {
return null;
}
- fn airDbgBlockBegin(self: *FuncGen) !?*const llvm.Value {
+ fn airDbgBlockBegin(self: *FuncGen) !?*llvm.Value {
const dib = self.dg.object.di_builder orelse return null;
const old_scope = self.di_scope.?;
try self.dbg_block_stack.append(self.gpa, old_scope);
@@ -5802,13 +5802,13 @@ pub const FuncGen = struct {
return null;
}
- fn airDbgBlockEnd(self: *FuncGen) !?*const llvm.Value {
+ fn airDbgBlockEnd(self: *FuncGen) !?*llvm.Value {
if (self.dg.object.di_builder == null) return null;
self.di_scope = self.dbg_block_stack.pop();
return null;
}
- fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const dib = self.dg.object.di_builder orelse return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
@@ -5834,7 +5834,7 @@ pub const FuncGen = struct {
return null;
}
- fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airDbgVarVal(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const dib = self.dg.object.di_builder orelse return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const operand = try self.resolveInst(pl_op.operand);
@@ -5868,7 +5868,7 @@ pub const FuncGen = struct {
return null;
}
- fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAssembly(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
// Eventually, the Zig compiler needs to be reworked to have inline assembly go
// through the same parsing code regardless of backend, and have LLVM-flavored
// inline assembly be *output* from that assembler.
@@ -5899,15 +5899,15 @@ pub const FuncGen = struct {
// The exact number of return / parameter values depends on which output values
// are passed by reference as indirect outputs (determined below).
const max_return_count = outputs.len;
- const llvm_ret_types = try arena.alloc(*const llvm.Type, max_return_count);
+ const llvm_ret_types = try arena.alloc(*llvm.Type, max_return_count);
const llvm_ret_indirect = try arena.alloc(bool, max_return_count);
const max_param_count = inputs.len + outputs.len;
- const llvm_param_types = try arena.alloc(*const llvm.Type, max_param_count);
- const llvm_param_values = try arena.alloc(*const llvm.Value, max_param_count);
+ const llvm_param_types = try arena.alloc(*llvm.Type, max_param_count);
+ const llvm_param_values = try arena.alloc(*llvm.Value, max_param_count);
// This stores whether we need to add an elementtype attribute and
// if so, the element type itself.
- const llvm_param_attrs = try arena.alloc(?*const llvm.Type, max_param_count);
+ const llvm_param_attrs = try arena.alloc(?*llvm.Type, max_param_count);
const target = self.dg.module.getTarget();
var llvm_ret_i: usize = 0;
@@ -5987,7 +5987,7 @@ pub const FuncGen = struct {
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.air.typeOf(input);
- var llvm_elem_ty: ?*const llvm.Type = null;
+ var llvm_elem_ty: ?*llvm.Type = null;
if (isByRef(arg_ty)) {
llvm_elem_ty = try self.dg.lowerPtrElemTy(arg_ty);
if (constraintAllowsMemory(constraint)) {
@@ -6208,7 +6208,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
operand_is_ptr: bool,
pred: llvm.IntPredicate,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -6251,7 +6251,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
op: llvm.IntPredicate,
operand_is_ptr: bool,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -6293,7 +6293,7 @@ pub const FuncGen = struct {
return self.builder.buildICmp(op, loaded, zero, "");
}
- fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airOptionalPayloadPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6318,7 +6318,7 @@ pub const FuncGen = struct {
return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, "");
}
- fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airOptionalPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
comptime assert(optional_layout_version == 3);
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6355,7 +6355,7 @@ pub const FuncGen = struct {
return self.builder.buildStructGEP(optional_llvm_ty, operand, 0, "");
}
- fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6377,7 +6377,7 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
operand_is_ptr: bool,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6415,7 +6415,7 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
operand_is_ptr: bool,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -6452,7 +6452,7 @@ pub const FuncGen = struct {
return self.builder.buildExtractValue(operand, offset, "");
}
- fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airErrUnionPayloadPtrSet(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand).childType();
@@ -6480,18 +6480,18 @@ pub const FuncGen = struct {
return self.builder.buildStructGEP(err_union_llvm_ty, operand, payload_offset, "");
}
- fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !?*const llvm.Value {
+ fn airErrReturnTrace(self: *FuncGen, _: Air.Inst.Index) !?*llvm.Value {
return self.err_ret_trace.?;
}
- fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSetErrReturnTrace(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
self.err_ret_trace = operand;
return null;
}
- fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airWrapOptional(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6524,7 +6524,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, non_null_bit, 1, "");
}
- fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airWrapErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6560,7 +6560,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, operand, payload_offset, "");
}
- fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airWrapErrUnionErr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -6598,31 +6598,31 @@ pub const FuncGen = struct {
return partial;
}
- fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airWasmMemorySize(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
const llvm_u32 = self.context.intType(32);
const llvm_fn = self.getIntrinsic("llvm.wasm.memory.size", &.{llvm_u32});
- const args: [1]*const llvm.Value = .{llvm_u32.constInt(index, .False)};
+ const args: [1]*llvm.Value = .{llvm_u32.constInt(index, .False)};
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
}
- fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airWasmMemoryGrow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const index = pl_op.payload;
const operand = try self.resolveInst(pl_op.operand);
const llvm_u32 = self.context.intType(32);
const llvm_fn = self.getIntrinsic("llvm.wasm.memory.grow", &.{llvm_u32});
- const args: [2]*const llvm.Value = .{
+ const args: [2]*llvm.Value = .{
llvm_u32.constInt(index, .False),
operand,
};
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .Fast, .Auto, "");
}
- fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMin(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6635,7 +6635,7 @@ pub const FuncGen = struct {
return self.builder.buildUMin(lhs, rhs, "");
}
- fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMax(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6648,7 +6648,7 @@ pub const FuncGen = struct {
return self.builder.buildUMax(lhs, rhs, "");
}
- fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSlice(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -6670,7 +6670,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, len, 1, "");
}
- fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airAdd(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6685,7 +6685,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWAdd(lhs, rhs, "");
}
- fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airAddWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6696,7 +6696,7 @@ pub const FuncGen = struct {
return self.builder.buildAdd(lhs, rhs, "");
}
- fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAddSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6711,7 +6711,7 @@ pub const FuncGen = struct {
return self.builder.buildUAddSat(lhs, rhs, "");
}
- fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airSub(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6726,7 +6726,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWSub(lhs, rhs, "");
}
- fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airSubWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6737,7 +6737,7 @@ pub const FuncGen = struct {
return self.builder.buildSub(lhs, rhs, "");
}
- fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSubSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6751,7 +6751,7 @@ pub const FuncGen = struct {
return self.builder.buildUSubSat(lhs, rhs, "");
}
- fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airMul(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6766,7 +6766,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWMul(lhs, rhs, "");
}
- fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airMulWrap(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6777,7 +6777,7 @@ pub const FuncGen = struct {
return self.builder.buildMul(lhs, rhs, "");
}
- fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMulSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -6791,7 +6791,7 @@ pub const FuncGen = struct {
return self.builder.buildUMulFixSat(lhs, rhs, "");
}
- fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airDivFloat(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6803,7 +6803,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(.div, inst_ty, 2, .{ lhs, rhs });
}
- fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airDivTrunc(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6821,7 +6821,7 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
- fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airDivFloor(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6854,7 +6854,7 @@ pub const FuncGen = struct {
return self.builder.buildUDiv(lhs, rhs, "");
}
- fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airDivExact(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6869,7 +6869,7 @@ pub const FuncGen = struct {
return self.builder.buildExactUDiv(lhs, rhs, "");
}
- fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airRem(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6884,7 +6884,7 @@ pub const FuncGen = struct {
return self.builder.buildURem(lhs, rhs, "");
}
- fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airMod(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -6914,7 +6914,7 @@ pub const FuncGen = struct {
return self.builder.buildURem(lhs, rhs, "");
}
- fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPtrAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -6925,17 +6925,17 @@ pub const FuncGen = struct {
const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
if (ptr_ty.ptrSize() == .One) {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
self.context.intType(32).constNull(), offset,
};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
} else {
- const indices: [1]*const llvm.Value = .{offset};
+ const indices: [1]*llvm.Value = .{offset};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
}
}
- fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPtrSub(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -6947,12 +6947,12 @@ pub const FuncGen = struct {
const llvm_elem_ty = try self.dg.lowerPtrElemTy(ptr_ty.childType());
if (ptr_ty.ptrSize() == .One) {
// It's a pointer to an array, so according to LLVM we need an extra GEP index.
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
self.context.intType(32).constNull(), negative_offset,
};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
} else {
- const indices: [1]*const llvm.Value = .{negative_offset};
+ const indices: [1]*llvm.Value = .{negative_offset};
return self.builder.buildInBoundsGEP(llvm_elem_ty, base_ptr, &indices, indices.len, "");
}
}
@@ -6962,7 +6962,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
signed_intrinsic: []const u8,
unsigned_intrinsic: []const u8,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -6984,7 +6984,7 @@ pub const FuncGen = struct {
const tg = self.dg.module.getTarget();
const llvm_fn = self.getIntrinsic(intrinsic_name, &.{llvm_lhs_ty});
- const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*const llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
+ const result_struct = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &[_]*llvm.Value{ lhs, rhs }, 2, .Fast, .Auto, "");
const result = self.builder.buildExtractValue(result_struct, 0, "");
const overflow_bit = self.builder.buildExtractValue(result_struct, 1, "");
@@ -7018,11 +7018,11 @@ pub const FuncGen = struct {
fn buildElementwiseCall(
self: *FuncGen,
- llvm_fn: *const llvm.Value,
- args_vectors: []const *const llvm.Value,
- result_vector: *const llvm.Value,
+ llvm_fn: *llvm.Value,
+ args_vectors: []const *llvm.Value,
+ result_vector: *llvm.Value,
vector_len: usize,
- ) !*const llvm.Value {
+ ) !*llvm.Value {
const args_len = @intCast(c_uint, args_vectors.len);
const llvm_i32 = self.context.intType(32);
assert(args_len <= 3);
@@ -7032,7 +7032,7 @@ pub const FuncGen = struct {
while (i < vector_len) : (i += 1) {
const index_i32 = llvm_i32.constInt(i, .False);
- var args: [3]*const llvm.Value = undefined;
+ var args: [3]*llvm.Value = undefined;
for (args_vectors) |arg_vector, k| {
args[k] = self.builder.buildExtractElement(arg_vector, index_i32, "");
}
@@ -7045,9 +7045,9 @@ pub const FuncGen = struct {
fn getLibcFunction(
self: *FuncGen,
fn_name: [:0]const u8,
- param_types: []const *const llvm.Type,
- return_type: *const llvm.Type,
- ) *const llvm.Value {
+ param_types: []const *llvm.Type,
+ return_type: *llvm.Type,
+ ) *llvm.Value {
return self.dg.object.llvm_module.getNamedFunction(fn_name.ptr) orelse b: {
const alias = self.dg.object.llvm_module.getNamedGlobalAlias(fn_name.ptr, fn_name.len);
break :b if (alias) |a| a.getAliasee() else null;
@@ -7105,8 +7105,8 @@ pub const FuncGen = struct {
self: *FuncGen,
pred: math.CompareOperator,
ty: Type,
- params: [2]*const llvm.Value,
- ) !*const llvm.Value {
+ params: [2]*llvm.Value,
+ ) !*llvm.Value {
const target = self.dg.module.getTarget();
const scalar_ty = ty.scalarType();
const scalar_llvm_ty = try self.dg.lowerType(scalar_ty);
@@ -7138,7 +7138,7 @@ pub const FuncGen = struct {
fn_base_name, compiler_rt_float_abbrev,
}) catch unreachable;
- const param_types = [2]*const llvm.Type{ scalar_llvm_ty, scalar_llvm_ty };
+ const param_types = [2]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty };
const llvm_i32 = self.context.intType(32);
const libc_fn = self.getLibcFunction(fn_name, param_types[0..], llvm_i32);
@@ -7206,8 +7206,8 @@ pub const FuncGen = struct {
comptime op: FloatOp,
ty: Type,
comptime params_len: usize,
- params: [params_len]*const llvm.Value,
- ) !*const llvm.Value {
+ params: [params_len]*llvm.Value,
+ ) !*llvm.Value {
const target = self.dg.module.getTarget();
const scalar_ty = ty.scalarType();
const llvm_ty = try self.dg.lowerType(ty);
@@ -7278,10 +7278,10 @@ pub const FuncGen = struct {
};
};
- const llvm_fn: *const llvm.Value = switch (strat) {
+ const llvm_fn: *llvm.Value = switch (strat) {
.intrinsic => |fn_name| self.getIntrinsic(fn_name, &.{llvm_ty}),
.libc => |fn_name| b: {
- const param_types = [3]*const llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty };
+ const param_types = [3]*llvm.Type{ scalar_llvm_ty, scalar_llvm_ty, scalar_llvm_ty };
const libc_fn = self.getLibcFunction(fn_name, param_types[0..params.len], scalar_llvm_ty);
if (ty.zigTypeTag() == .Vector) {
const result = llvm_ty.getUndef();
@@ -7294,7 +7294,7 @@ pub const FuncGen = struct {
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params_len, .C, .Auto, "");
}
- fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMulAdd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
@@ -7308,7 +7308,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(.fma, ty, 3, .{ mulend1, mulend2, addend });
}
- fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airShlWithOverflow(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7368,7 +7368,7 @@ pub const FuncGen = struct {
return self.builder.buildInsertValue(partial, overflow_bit, overflow_index, "");
}
- fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAnd(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7377,7 +7377,7 @@ pub const FuncGen = struct {
return self.builder.buildAnd(lhs, rhs, "");
}
- fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airOr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7386,7 +7386,7 @@ pub const FuncGen = struct {
return self.builder.buildOr(lhs, rhs, "");
}
- fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airXor(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7395,7 +7395,7 @@ pub const FuncGen = struct {
return self.builder.buildXor(lhs, rhs, "");
}
- fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airShlExact(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7418,7 +7418,7 @@ pub const FuncGen = struct {
return self.builder.buildNUWShl(lhs, casted_rhs, "");
}
- fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airShl(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7440,7 +7440,7 @@ pub const FuncGen = struct {
return self.builder.buildShl(lhs, casted_rhs, "");
}
- fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airShlSat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7485,7 +7485,7 @@ pub const FuncGen = struct {
}
}
- fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*const llvm.Value {
+ fn airShr(self: *FuncGen, inst: Air.Inst.Index, is_exact: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@@ -7521,7 +7521,7 @@ pub const FuncGen = struct {
}
}
- fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7546,7 +7546,7 @@ pub const FuncGen = struct {
}
}
- fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airTrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -7555,7 +7555,7 @@ pub const FuncGen = struct {
return self.builder.buildTrunc(operand, dest_llvm_ty, "");
}
- fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFptrunc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7573,7 +7573,7 @@ pub const FuncGen = struct {
return self.builder.buildFPTrunc(operand, dest_llvm_ty, "");
}
- fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFpext(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7591,7 +7591,7 @@ pub const FuncGen = struct {
return self.builder.buildFPExt(operand, dest_llvm_ty, "");
}
- fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPtrToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7601,7 +7601,7 @@ pub const FuncGen = struct {
return self.builder.buildPtrToInt(operand, dest_llvm_ty, "");
}
- fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -7645,7 +7645,7 @@ pub const FuncGen = struct {
while (i < vector_len) : (i += 1) {
const index_usize = llvm_usize.constInt(i, .False);
const index_u32 = llvm_u32.constInt(i, .False);
- const indexes: [2]*const llvm.Value = .{ zero, index_usize };
+ const indexes: [2]*llvm.Value = .{ zero, index_usize };
const elem_ptr = self.builder.buildInBoundsGEP(llvm_dest_ty, array_ptr, &indexes, indexes.len, "");
const elem = self.builder.buildExtractElement(operand, index_u32, "");
_ = self.builder.buildStore(elem, elem_ptr);
@@ -7682,7 +7682,7 @@ pub const FuncGen = struct {
while (i < vector_len) : (i += 1) {
const index_usize = llvm_usize.constInt(i, .False);
const index_u32 = llvm_u32.constInt(i, .False);
- const indexes: [2]*const llvm.Value = .{ zero, index_usize };
+ const indexes: [2]*llvm.Value = .{ zero, index_usize };
const elem_ptr = self.builder.buildInBoundsGEP(array_llvm_ty, operand, &indexes, indexes.len, "");
const elem = self.builder.buildLoad(elem_llvm_ty, elem_ptr, "");
vector = self.builder.buildInsertElement(vector, elem, index_u32, "");
@@ -7731,7 +7731,7 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(operand, llvm_dest_ty, "");
}
- fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airBoolToInt(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst))
return null;
@@ -7740,7 +7740,7 @@ pub const FuncGen = struct {
return operand;
}
- fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const arg_val = self.args[self.arg_index];
self.arg_index += 1;
@@ -7787,7 +7787,7 @@ pub const FuncGen = struct {
} else unreachable;
}
- fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
@@ -7801,7 +7801,7 @@ pub const FuncGen = struct {
return alloca_inst;
}
- fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
@@ -7816,11 +7816,11 @@ pub const FuncGen = struct {
/// Use this instead of builder.buildAlloca, because this function makes sure to
/// put the alloca instruction at the top of the function!
- fn buildAlloca(self: *FuncGen, llvm_ty: *const llvm.Type) *const llvm.Value {
+ fn buildAlloca(self: *FuncGen, llvm_ty: *llvm.Type) *llvm.Value {
return buildAllocaInner(self.builder, self.llvm_func, self.di_scope != null, llvm_ty);
}
- fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const dest_ptr = try self.resolveInst(bin_op.lhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
@@ -7873,7 +7873,7 @@ pub const FuncGen = struct {
inst: Air.Inst.Index,
body: []const Air.Inst.Index,
body_i: usize,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const ptr_ty = self.air.typeOf(ty_op.operand);
elide: {
@@ -7899,14 +7899,14 @@ pub const FuncGen = struct {
return self.load(ptr, ptr_ty);
}
- fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
_ = inst;
const llvm_fn = self.getIntrinsic("llvm.debugtrap", &.{});
_ = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, undefined, 0, .C, .Auto, "");
return null;
}
- fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airRetAddr(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const llvm_usize = try self.dg.lowerType(Type.usize);
@@ -7918,30 +7918,30 @@ pub const FuncGen = struct {
const llvm_i32 = self.context.intType(32);
const llvm_fn = self.getIntrinsic("llvm.returnaddress", &.{});
- const params = [_]*const llvm.Value{llvm_i32.constNull()};
+ const params = [_]*llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
- fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFrameAddress(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const llvm_i32 = self.context.intType(32);
const llvm_fn_name = "llvm.frameaddress.p0";
const llvm_fn = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
const llvm_p0i8 = self.context.intType(8).pointerType(0);
- const param_types = [_]*const llvm.Type{llvm_i32};
+ const param_types = [_]*llvm.Type{llvm_i32};
const fn_type = llvm.functionType(llvm_p0i8, &param_types, param_types.len, .False);
break :blk self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
};
- const params = [_]*const llvm.Value{llvm_i32.constNull()};
+ const params = [_]*llvm.Value{llvm_i32.constNull()};
const ptr_val = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
const llvm_usize = try self.dg.lowerType(Type.usize);
return self.builder.buildPtrToInt(ptr_val, llvm_usize, "");
}
- fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airFence(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const atomic_order = self.air.instructions.items(.data)[inst].fence;
const llvm_memory_order = toLlvmAtomicOrdering(atomic_order);
const single_threaded = llvm.Bool.fromBool(self.single_threaded);
@@ -7949,7 +7949,7 @@ pub const FuncGen = struct {
return null;
}
- fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*const llvm.Value {
+ fn airCmpxchg(self: *FuncGen, inst: Air.Inst.Index, is_weak: bool) !?*llvm.Value {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
var ptr = try self.resolveInst(extra.ptr);
@@ -7996,7 +7996,7 @@ pub const FuncGen = struct {
return buildOptional(self, optional_ty, payload, non_null_bit);
}
- fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAtomicRmw(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
const ptr = try self.resolveInst(pl_op.operand);
@@ -8053,7 +8053,7 @@ pub const FuncGen = struct {
return self.builder.buildIntToPtr(uncasted_result, operand_llvm_ty, "");
}
- fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAtomicLoad(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const atomic_load = self.air.instructions.items(.data)[inst].atomic_load;
const ptr = try self.resolveInst(atomic_load.ptr);
const ptr_ty = self.air.typeOf(atomic_load.ptr);
@@ -8090,7 +8090,7 @@ pub const FuncGen = struct {
self: *FuncGen,
inst: Air.Inst.Index,
ordering: llvm.AtomicOrdering,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
@@ -8112,7 +8112,7 @@ pub const FuncGen = struct {
return null;
}
- fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMemset(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const dest_ptr = try self.resolveInst(pl_op.operand);
@@ -8134,7 +8134,7 @@ pub const FuncGen = struct {
return null;
}
- fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airMemcpy(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
const dest_ptr = try self.resolveInst(pl_op.operand);
@@ -8158,7 +8158,7 @@ pub const FuncGen = struct {
return null;
}
- fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const un_ty = self.air.typeOf(bin_op.lhs).childType();
const target = self.dg.module.getTarget();
@@ -8179,7 +8179,7 @@ pub const FuncGen = struct {
return null;
}
- fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airGetUnionTag(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8205,7 +8205,7 @@ pub const FuncGen = struct {
}
}
- fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*const llvm.Value {
+ fn airUnaryOp(self: *FuncGen, inst: Air.Inst.Index, comptime op: FloatOp) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8215,7 +8215,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(op, operand_ty, 1, .{operand});
}
- fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airNeg(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -8226,7 +8226,7 @@ pub const FuncGen = struct {
return self.buildFloatOp(.neg, operand_ty, 1, .{operand});
}
- fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
+ fn airClzCtz(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8237,7 +8237,7 @@ pub const FuncGen = struct {
const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
- const params = [_]*const llvm.Value{ operand, llvm_i1.constNull() };
+ const params = [_]*llvm.Value{ operand, llvm_i1.constNull() };
const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
const result_ty = self.air.typeOfIndex(inst);
const result_llvm_ty = try self.dg.lowerType(result_ty);
@@ -8254,14 +8254,14 @@ pub const FuncGen = struct {
}
}
- fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
+ fn airBitOp(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const operand_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
- const params = [_]*const llvm.Value{operand};
+ const params = [_]*llvm.Value{operand};
const operand_llvm_ty = try self.dg.lowerType(operand_ty);
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
@@ -8281,7 +8281,7 @@ pub const FuncGen = struct {
}
}
- fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*const llvm.Value {
+ fn airByteSwap(self: *FuncGen, inst: Air.Inst.Index, llvm_fn_name: []const u8) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.module.getTarget();
@@ -8301,7 +8301,7 @@ pub const FuncGen = struct {
const vec_len = operand_ty.vectorLen();
operand_llvm_ty = scalar_llvm_ty.vectorType(vec_len);
- const shifts = try self.gpa.alloc(*const llvm.Value, vec_len);
+ const shifts = try self.gpa.alloc(*llvm.Value, vec_len);
defer self.gpa.free(shifts);
for (shifts) |*elem| {
@@ -8319,7 +8319,7 @@ pub const FuncGen = struct {
bits = bits + 8;
}
- const params = [_]*const llvm.Value{operand};
+ const params = [_]*llvm.Value{operand};
const fn_val = self.getIntrinsic(llvm_fn_name, &.{operand_llvm_ty});
const wrong_size_result = self.builder.buildCall(fn_val.globalGetValueType(), fn_val, &params, params.len, .C, .Auto, "");
@@ -8336,7 +8336,7 @@ pub const FuncGen = struct {
}
}
- fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airErrorSetHasValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8372,10 +8372,10 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(end_block);
const llvm_type = self.dg.context.intType(1);
- const incoming_values: [2]*const llvm.Value = .{
+ const incoming_values: [2]*llvm.Value = .{
llvm_type.constInt(1, .False), llvm_type.constInt(0, .False),
};
- const incoming_blocks: [2]*const llvm.BasicBlock = .{
+ const incoming_blocks: [2]*llvm.BasicBlock = .{
valid_block, invalid_block,
};
const phi_node = self.builder.buildPhi(llvm_type, "");
@@ -8383,7 +8383,7 @@ pub const FuncGen = struct {
return phi_node;
}
- fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airIsNamedEnumValue(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8391,11 +8391,11 @@ pub const FuncGen = struct {
const enum_ty = self.air.typeOf(un_op);
const llvm_fn = try self.getIsNamedEnumValueFunction(enum_ty);
- const params = [_]*const llvm.Value{operand};
+ const params = [_]*llvm.Value{operand};
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
}
- fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*const llvm.Value {
+ fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
const enum_decl = enum_ty.getOwnerDecl();
// TODO: detect when the type changes and re-emit this function.
@@ -8414,7 +8414,7 @@ pub const FuncGen = struct {
var int_tag_type_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
- const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)};
+ const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
const llvm_ret_ty = try self.dg.lowerType(Type.bool);
const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
@@ -8464,7 +8464,7 @@ pub const FuncGen = struct {
return fn_val;
}
- fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airTagName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8472,11 +8472,11 @@ pub const FuncGen = struct {
const enum_ty = self.air.typeOf(un_op);
const llvm_fn = try self.getEnumTagNameFunction(enum_ty);
- const params = [_]*const llvm.Value{operand};
+ const params = [_]*llvm.Value{operand};
return self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &params, params.len, .Fast, .Auto, "");
}
- fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*const llvm.Value {
+ fn getEnumTagNameFunction(self: *FuncGen, enum_ty: Type) !*llvm.Value {
const enum_decl = enum_ty.getOwnerDecl();
// TODO: detect when the type changes and re-emit this function.
@@ -8501,7 +8501,7 @@ pub const FuncGen = struct {
var int_tag_type_buffer: Type.Payload.Bits = undefined;
const int_tag_ty = enum_ty.intTagType(&int_tag_type_buffer);
- const param_types = [_]*const llvm.Type{try self.dg.lowerType(int_tag_ty)};
+ const param_types = [_]*llvm.Type{try self.dg.lowerType(int_tag_ty)};
const fn_type = llvm.functionType(llvm_ret_ty, &param_types, param_types.len, .False);
const fn_val = self.dg.object.llvm_module.addFunction(llvm_fn_name, fn_type);
@@ -8528,7 +8528,7 @@ pub const FuncGen = struct {
const tag_int_value = fn_val.getParam(0);
const switch_instr = self.builder.buildSwitch(tag_int_value, bad_value_block, @intCast(c_uint, fields.count()));
- const array_ptr_indices = [_]*const llvm.Value{
+ const array_ptr_indices = [_]*llvm.Value{
usize_llvm_ty.constNull(), usize_llvm_ty.constNull(),
};
@@ -8542,7 +8542,7 @@ pub const FuncGen = struct {
str_global.setUnnamedAddr(.True);
str_global.setAlignment(1);
- const slice_fields = [_]*const llvm.Value{
+ const slice_fields = [_]*llvm.Value{
str_init_llvm_ty.constInBoundsGEP(str_global, &array_ptr_indices, array_ptr_indices.len),
usize_llvm_ty.constInt(name.len, .False),
};
@@ -8578,7 +8578,7 @@ pub const FuncGen = struct {
return fn_val;
}
- fn getCmpLtErrorsLenFunction(self: *FuncGen) !*const llvm.Value {
+ fn getCmpLtErrorsLenFunction(self: *FuncGen) !*llvm.Value {
if (self.dg.object.llvm_module.getNamedFunction(lt_errors_fn_name)) |llvm_fn| {
return llvm_fn;
}
@@ -8587,7 +8587,7 @@ pub const FuncGen = struct {
const ret_llvm_ty = try self.dg.lowerType(Type.bool);
const anyerror_llvm_ty = try self.dg.lowerType(Type.anyerror);
- const param_types = [_]*const llvm.Type{anyerror_llvm_ty};
+ const param_types = [_]*llvm.Type{anyerror_llvm_ty};
const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
const llvm_fn = self.dg.object.llvm_module.addFunction(lt_errors_fn_name, fn_type);
@@ -8597,7 +8597,7 @@ pub const FuncGen = struct {
return llvm_fn;
}
- fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airErrorName(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const un_op = self.air.instructions.items(.data)[inst].un_op;
@@ -8608,12 +8608,12 @@ pub const FuncGen = struct {
const error_name_table_ptr = try self.getErrorNameTable();
const ptr_slice_llvm_ty = slice_llvm_ty.pointerType(0);
const error_name_table = self.builder.buildLoad(ptr_slice_llvm_ty, error_name_table_ptr, "");
- const indices = [_]*const llvm.Value{operand};
+ const indices = [_]*llvm.Value{operand};
const error_name_ptr = self.builder.buildInBoundsGEP(slice_llvm_ty, error_name_table, &indices, indices.len, "");
return self.builder.buildLoad(slice_llvm_ty, error_name_ptr, "");
}
- fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSplat(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
@@ -8623,7 +8623,7 @@ pub const FuncGen = struct {
return self.builder.buildVectorSplat(len, scalar, "");
}
- fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airSelect(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
@@ -8635,7 +8635,7 @@ pub const FuncGen = struct {
return self.builder.buildSelect(pred, a, b, "");
}
- fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airShuffle(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -8651,7 +8651,7 @@ pub const FuncGen = struct {
// when changing code, so Zig uses negative numbers to index the
// second vector. These start at -1 and go down, and are easiest to use
// with the ~ operator. Here we convert between the two formats.
- const values = try self.gpa.alloc(*const llvm.Value, mask_len);
+ const values = try self.gpa.alloc(*llvm.Value, mask_len);
defer self.gpa.free(values);
const llvm_i32 = self.context.intType(32);
@@ -8672,7 +8672,7 @@ pub const FuncGen = struct {
return self.builder.buildShuffleVector(a, b, llvm_mask_value, "");
}
- fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*const llvm.Value {
+ fn airReduce(self: *FuncGen, inst: Air.Inst.Index, want_fast_math: bool) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
self.builder.setFastMath(want_fast_math);
@@ -8717,7 +8717,7 @@ pub const FuncGen = struct {
}
}
- fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airAggregateInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -8746,7 +8746,7 @@ pub const FuncGen = struct {
const int_llvm_ty = self.dg.context.intType(@intCast(c_uint, big_bits));
const fields = struct_obj.fields.values();
comptime assert(Type.packed_struct_layout_version == 2);
- var running_int: *const llvm.Value = int_llvm_ty.constNull();
+ var running_int: *llvm.Value = int_llvm_ty.constNull();
var running_bits: u16 = 0;
for (elements) |elem, i| {
const field = fields[i];
@@ -8780,7 +8780,7 @@ pub const FuncGen = struct {
// even if we fully populate the fields.
alloca_inst.setAlignment(result_ty.abiAlignment(target));
- var indices: [2]*const llvm.Value = .{ llvm_u32.constNull(), undefined };
+ var indices: [2]*llvm.Value = .{ llvm_u32.constNull(), undefined };
for (elements) |elem, i| {
if (result_ty.structFieldValueComptime(i) != null) continue;
@@ -8829,7 +8829,7 @@ pub const FuncGen = struct {
const elem_ptr_ty = Type.initPayload(&elem_ptr_payload.base);
for (elements) |elem, i| {
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, i), .False),
};
@@ -8838,7 +8838,7 @@ pub const FuncGen = struct {
self.store(elem_ptr, elem_ptr_ty, llvm_elem, .NotAtomic);
}
if (array_info.sentinel) |sent_val| {
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, array_info.len), .False),
};
@@ -8857,7 +8857,7 @@ pub const FuncGen = struct {
}
}
- fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airUnionInit(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
@@ -8910,17 +8910,17 @@ pub const FuncGen = struct {
break :p field_llvm_ty;
}
const padding_len = @intCast(c_uint, layout.payload_size - field_size);
- const fields: [2]*const llvm.Type = .{
+ const fields: [2]*llvm.Type = .{
field_llvm_ty, self.context.intType(8).arrayType(padding_len),
};
break :p self.context.structType(&fields, fields.len, .False);
};
if (layout.tag_size == 0) {
- const fields: [1]*const llvm.Type = .{payload};
+ const fields: [1]*llvm.Type = .{payload};
break :t self.context.structType(&fields, fields.len, .False);
}
const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
- var fields: [3]*const llvm.Type = undefined;
+ var fields: [3]*llvm.Type = undefined;
var fields_len: c_uint = 2;
if (layout.tag_align >= layout.payload_align) {
fields = .{ tag_llvm_ty, payload, undefined };
@@ -8949,7 +8949,7 @@ pub const FuncGen = struct {
};
const field_ptr_ty = Type.initPayload(&field_ptr_payload.base);
if (layout.tag_size == 0) {
- const indices: [3]*const llvm.Value = .{
+ const indices: [3]*llvm.Value = .{
index_type.constNull(),
index_type.constNull(),
index_type.constNull(),
@@ -8961,7 +8961,7 @@ pub const FuncGen = struct {
}
{
- const indices: [3]*const llvm.Value = .{
+ const indices: [3]*llvm.Value = .{
index_type.constNull(),
index_type.constInt(@boolToInt(layout.tag_align >= layout.payload_align), .False),
index_type.constNull(),
@@ -8971,7 +8971,7 @@ pub const FuncGen = struct {
self.store(field_ptr, field_ptr_ty, llvm_payload, .NotAtomic);
}
{
- const indices: [2]*const llvm.Value = .{
+ const indices: [2]*llvm.Value = .{
index_type.constNull(),
index_type.constInt(@boolToInt(layout.tag_align < layout.payload_align), .False),
};
@@ -8985,7 +8985,7 @@ pub const FuncGen = struct {
return result_ptr;
}
- fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
+ fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*llvm.Value {
const prefetch = self.air.instructions.items(.data)[inst].prefetch;
comptime assert(@enumToInt(std.builtin.PrefetchOptions.Rw.read) == 0);
@@ -9026,7 +9026,7 @@ pub const FuncGen = struct {
const fn_val = self.dg.object.llvm_module.getNamedFunction(llvm_fn_name) orelse blk: {
// declare void @llvm.prefetch(i8*, i32, i32, i32)
const llvm_void = self.context.voidType();
- const param_types = [_]*const llvm.Type{
+ const param_types = [_]*llvm.Type{
llvm_ptr_u8, llvm_u32, llvm_u32, llvm_u32,
};
const fn_type = llvm.functionType(llvm_void, &param_types, param_types.len, .False);
@@ -9036,7 +9036,7 @@ pub const FuncGen = struct {
const ptr = try self.resolveInst(prefetch.ptr);
const ptr_u8 = self.builder.buildBitCast(ptr, llvm_ptr_u8, "");
- const params = [_]*const llvm.Value{
+ const params = [_]*llvm.Value{
ptr_u8,
llvm_u32.constInt(@enumToInt(prefetch.rw), .False),
llvm_u32.constInt(prefetch.locality, .False),
@@ -9048,17 +9048,17 @@ pub const FuncGen = struct {
fn softF80TruncOrExt(
self: *FuncGen,
- operand: *const llvm.Value,
+ operand: *llvm.Value,
src_bits: u16,
dest_bits: u16,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
const target = self.dg.module.getTarget();
- var param_llvm_ty: *const llvm.Type = self.context.intType(80);
- var ret_llvm_ty: *const llvm.Type = param_llvm_ty;
+ var param_llvm_ty: *llvm.Type = self.context.intType(80);
+ var ret_llvm_ty: *llvm.Type = param_llvm_ty;
var fn_name: [*:0]const u8 = undefined;
var arg = operand;
- var final_cast: ?*const llvm.Type = null;
+ var final_cast: ?*llvm.Type = null;
assert(src_bits == 80 or dest_bits == 80);
@@ -9116,18 +9116,18 @@ pub const FuncGen = struct {
}
const llvm_fn = self.dg.object.llvm_module.getNamedFunction(fn_name) orelse f: {
- const param_types = [_]*const llvm.Type{param_llvm_ty};
+ const param_types = [_]*llvm.Type{param_llvm_ty};
const fn_type = llvm.functionType(ret_llvm_ty, &param_types, param_types.len, .False);
break :f self.dg.object.llvm_module.addFunction(fn_name, fn_type);
};
- var args: [1]*const llvm.Value = .{arg};
+ var args: [1]*llvm.Value = .{arg};
const result = self.builder.buildCall(llvm_fn.globalGetValueType(), llvm_fn, &args, args.len, .C, .Auto, "");
const final_cast_llvm_ty = final_cast orelse return result;
return self.builder.buildBitCast(result, final_cast_llvm_ty, "");
}
- fn getErrorNameTable(self: *FuncGen) !*const llvm.Value {
+ fn getErrorNameTable(self: *FuncGen) !*llvm.Value {
if (self.dg.object.error_name_table) |table| {
return table;
}
@@ -9151,10 +9151,10 @@ pub const FuncGen = struct {
/// Assumes the optional is not pointer-like and payload has bits.
fn optIsNonNull(
self: *FuncGen,
- opt_llvm_ty: *const llvm.Type,
- opt_handle: *const llvm.Value,
+ opt_llvm_ty: *llvm.Type,
+ opt_handle: *llvm.Value,
is_by_ref: bool,
- ) *const llvm.Value {
+ ) *llvm.Value {
const non_null_llvm_ty = self.context.intType(8);
const field = b: {
if (is_by_ref) {
@@ -9171,10 +9171,10 @@ pub const FuncGen = struct {
/// Assumes the optional is not pointer-like and payload has bits.
fn optPayloadHandle(
fg: *FuncGen,
- opt_llvm_ty: *const llvm.Type,
- opt_handle: *const llvm.Value,
+ opt_llvm_ty: *llvm.Type,
+ opt_handle: *llvm.Value,
opt_ty: Type,
- ) !*const llvm.Value {
+ ) !*llvm.Value {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
@@ -9200,9 +9200,9 @@ pub const FuncGen = struct {
fn buildOptional(
self: *FuncGen,
optional_ty: Type,
- payload: *const llvm.Value,
- non_null_bit: *const llvm.Value,
- ) !?*const llvm.Value {
+ payload: *llvm.Value,
+ non_null_bit: *llvm.Value,
+ ) !?*llvm.Value {
const optional_llvm_ty = try self.dg.lowerType(optional_ty);
const non_null_field = self.builder.buildZExt(non_null_bit, self.dg.context.intType(8), "");
@@ -9233,10 +9233,10 @@ pub const FuncGen = struct {
fn fieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
- struct_ptr: *const llvm.Value,
+ struct_ptr: *llvm.Value,
struct_ptr_ty: Type,
field_index: u32,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
if (self.liveness.isUnused(inst)) return null;
const target = self.dg.object.target;
@@ -9268,7 +9268,7 @@ pub const FuncGen = struct {
const ptr_as_bytes = self.builder.buildBitCast(struct_ptr, byte_llvm_ty.pointerType(0), "");
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_index = llvm_usize.constInt(byte_offset, .False);
- const indices: [1]*const llvm.Value = .{llvm_index};
+ const indices: [1]*llvm.Value = .{llvm_index};
const new_ptr = self.builder.buildInBoundsGEP(byte_llvm_ty, ptr_as_bytes, &indices, indices.len, "");
return self.builder.buildBitCast(new_ptr, result_llvm_ty, "");
},
@@ -9285,7 +9285,7 @@ pub const FuncGen = struct {
// the struct.
const llvm_usize = try self.dg.lowerType(Type.usize);
const llvm_index = llvm_usize.constInt(1, .False);
- const indices: [1]*const llvm.Value = .{llvm_index};
+ const indices: [1]*llvm.Value = .{llvm_index};
return self.builder.buildInBoundsGEP(struct_llvm_ty, struct_ptr, &indices, indices.len, "");
}
},
@@ -9298,9 +9298,9 @@ pub const FuncGen = struct {
fn unionFieldPtr(
self: *FuncGen,
inst: Air.Inst.Index,
- union_ptr: *const llvm.Value,
+ union_ptr: *llvm.Value,
union_ty: Type,
- ) !?*const llvm.Value {
+ ) !?*llvm.Value {
const target = self.dg.module.getTarget();
const layout = union_ty.unionGetLayout(target);
const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
@@ -9313,7 +9313,7 @@ pub const FuncGen = struct {
return self.builder.buildBitCast(union_field_ptr, result_llvm_ty, "");
}
- fn getIntrinsic(self: *FuncGen, name: []const u8, types: []const *const llvm.Type) *const llvm.Value {
+ fn getIntrinsic(self: *FuncGen, name: []const u8, types: []const *llvm.Type) *llvm.Value {
const id = llvm.lookupIntrinsicID(name.ptr, name.len);
assert(id != 0);
return self.llvmModule().getIntrinsicDeclaration(id, types.ptr, types.len);
@@ -9322,7 +9322,7 @@ pub const FuncGen = struct {
/// This function always performs a copy. For isByRef=true types, it creates a new
/// alloca and copies the value into it, then returns the alloca instruction.
/// For isByRef=false types, it creates a load instruction and returns it.
- fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value {
+ fn load(self: *FuncGen, ptr: *llvm.Value, ptr_ty: Type) !?*llvm.Value {
const info = ptr_ty.ptrInfo().data;
if (!info.pointee_type.hasRuntimeBitsIgnoreComptime()) return null;
@@ -9396,9 +9396,9 @@ pub const FuncGen = struct {
fn store(
self: *FuncGen,
- ptr: *const llvm.Value,
+ ptr: *llvm.Value,
ptr_ty: Type,
- elem: *const llvm.Value,
+ elem: *llvm.Value,
ordering: llvm.AtomicOrdering,
) void {
const info = ptr_ty.ptrInfo().data;
@@ -9463,7 +9463,7 @@ pub const FuncGen = struct {
);
}
- fn valgrindMarkUndef(fg: *FuncGen, ptr: *const llvm.Value, len: *const llvm.Value) void {
+ fn valgrindMarkUndef(fg: *FuncGen, ptr: *llvm.Value, len: *llvm.Value) void {
const VG_USERREQ__MAKE_MEM_UNDEFINED = 1296236545;
const target = fg.dg.module.getTarget();
const usize_llvm_ty = fg.context.intType(target.cpu.arch.ptrBitWidth());
@@ -9475,14 +9475,14 @@ pub const FuncGen = struct {
fn valgrindClientRequest(
fg: *FuncGen,
- default_value: *const llvm.Value,
- request: *const llvm.Value,
- a1: *const llvm.Value,
- a2: *const llvm.Value,
- a3: *const llvm.Value,
- a4: *const llvm.Value,
- a5: *const llvm.Value,
- ) *const llvm.Value {
+ default_value: *llvm.Value,
+ request: *llvm.Value,
+ a1: *llvm.Value,
+ a2: *llvm.Value,
+ a3: *llvm.Value,
+ a4: *llvm.Value,
+ a5: *llvm.Value,
+ ) *llvm.Value {
const target = fg.dg.module.getTarget();
if (!target_util.hasValgrindSupport(target)) return default_value;
@@ -9498,10 +9498,10 @@ pub const FuncGen = struct {
fg.valgrind_client_request_array = array_ptr;
break :a array_ptr;
};
- const array_elements = [_]*const llvm.Value{ request, a1, a2, a3, a4, a5 };
+ const array_elements = [_]*llvm.Value{ request, a1, a2, a3, a4, a5 };
const zero = usize_llvm_ty.constInt(0, .False);
for (array_elements) |elem, i| {
- const indexes = [_]*const llvm.Value{
+ const indexes = [_]*llvm.Value{
zero, usize_llvm_ty.constInt(@intCast(c_uint, i), .False),
};
const elem_ptr = fg.builder.buildInBoundsGEP(array_llvm_ty, array_ptr, &indexes, indexes.len, "");
@@ -9518,8 +9518,8 @@ pub const FuncGen = struct {
const asm_constraints = "={rdx},{rax},0,~{cc},~{memory}";
const array_ptr_as_usize = fg.builder.buildPtrToInt(array_ptr, usize_llvm_ty, "");
- const args = [_]*const llvm.Value{ array_ptr_as_usize, default_value };
- const param_types = [_]*const llvm.Type{ usize_llvm_ty, usize_llvm_ty };
+ const args = [_]*llvm.Value{ array_ptr_as_usize, default_value };
+ const param_types = [_]*llvm.Type{ usize_llvm_ty, usize_llvm_ty };
const fn_llvm_ty = llvm.functionType(usize_llvm_ty, &param_types, args.len, .False);
const asm_fn = llvm.getInlineAsm(
fn_llvm_ty,
@@ -9898,7 +9898,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
/// In order to support the C calling convention, some return types need to be lowered
/// completely differently in the function prototype to honor the C ABI, and then
/// be effectively bitcasted to the actual return type.
-fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.Type {
+fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) {
// If the return type is an error set or an error union, then we make this
// anyerror return type instead, so that it can be coerced into a function
@@ -9945,7 +9945,7 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
if (classes[0] == .memory) {
return dg.context.voidType();
}
- var llvm_types_buffer: [8]*const llvm.Type = undefined;
+ var llvm_types_buffer: [8]*llvm.Type = undefined;
var llvm_types_index: u32 = 0;
for (classes) |class| {
switch (class) {
@@ -10488,11 +10488,11 @@ fn compilerRtIntBits(bits: u16) u16 {
}
fn buildAllocaInner(
- builder: *const llvm.Builder,
- llvm_func: *const llvm.Value,
+ builder: *llvm.Builder,
+ llvm_func: *llvm.Value,
di_scope_non_null: bool,
- llvm_ty: *const llvm.Type,
-) *const llvm.Value {
+ llvm_ty: *llvm.Type,
+) *llvm.Value {
const prev_block = builder.getInsertBlock();
const prev_debug_location = builder.getCurrentDebugLocation2();
defer {