aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2022-06-07 20:51:14 -0700
committerAndrew Kelley <andrew@ziglang.org>2022-06-07 21:05:40 -0700
commit3e30ba3f20dce2d406253de3fc0eb86934a3eaa7 (patch)
tree37cde6ce42dc2e326bd7beb599b6ad0d77051c28 /src
parent70dc910086582b028d404d5de5049ceae0a95161 (diff)
downloadzig-3e30ba3f20dce2d406253de3fc0eb86934a3eaa7.tar.gz
zig-3e30ba3f20dce2d406253de3fc0eb86934a3eaa7.zip
stage2: better codegen for byte-aligned packed struct fields
* Sema: handle overaligned packed struct field pointers * LLVM: handle byte-aligned packed struct field pointers
Diffstat (limited to 'src')
-rw-r--r--src/Sema.zig23
-rw-r--r--src/codegen/llvm.zig40
-rw-r--r--src/type.zig21
3 files changed, 71 insertions, 13 deletions
diff --git a/src/Sema.zig b/src/Sema.zig
index 5159d6f5d3..0cf449991d 100644
--- a/src/Sema.zig
+++ b/src/Sema.zig
@@ -18678,8 +18678,6 @@ fn structFieldPtrByIndex(
const target = sema.mod.getTarget();
- // TODO handle when the struct pointer is overaligned, we should return a potentially
- // over-aligned field pointer too.
if (struct_obj.layout == .Packed) {
comptime assert(Type.packed_struct_layout_version == 2);
@@ -18700,6 +18698,27 @@ fn structFieldPtrByIndex(
ptr_ty_data.host_size = struct_ptr_ty_info.host_size;
ptr_ty_data.bit_offset += struct_ptr_ty_info.bit_offset;
}
+
+ const parent_align = if (struct_ptr_ty_info.@"align" != 0)
+ struct_ptr_ty_info.@"align"
+ else
+ struct_ptr_ty_info.pointee_type.abiAlignment(target);
+ ptr_ty_data.@"align" = parent_align;
+
+ // If the field happens to be byte-aligned, simplify the pointer type.
+ // The pointee type bit size must match its ABI byte size so that loads and stores
+ // do not interfere with the surrounding packed bits.
+ if (parent_align != 0 and ptr_ty_data.bit_offset % 8 == 0) {
+ const byte_offset = ptr_ty_data.bit_offset / 8;
+ const elem_size_bytes = ptr_ty_data.pointee_type.abiSize(target);
+ const elem_size_bits = ptr_ty_data.pointee_type.bitSize(target);
+ if (elem_size_bytes * 8 == elem_size_bits) {
+ const new_align = @as(u32, 1) << @intCast(u5, @ctz(u64, byte_offset | parent_align));
+ ptr_ty_data.bit_offset = 0;
+ ptr_ty_data.host_size = 0;
+ ptr_ty_data.@"align" = new_align;
+ }
+ }
} else {
ptr_ty_data.@"align" = field.abi_align;
}
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig
index dcdf4888ea..188c2f6f11 100644
--- a/src/codegen/llvm.zig
+++ b/src/codegen/llvm.zig
@@ -8311,23 +8311,41 @@ pub const FuncGen = struct {
field_index: u32,
) !?*const llvm.Value {
if (self.liveness.isUnused(inst)) return null;
+
+ const target = self.dg.object.target;
const struct_ty = struct_ptr_ty.childType();
switch (struct_ty.zigTypeTag()) {
.Struct => switch (struct_ty.containerLayout()) {
.Packed => {
- // From LLVM's perspective, a pointer to a packed struct and a pointer
- // to a field of a packed struct are the same. The difference is in the
- // Zig pointer type which provides information for how to mask and shift
- // out the relevant bits when accessing the pointee.
- // Here we perform a bitcast because we want to use the host_size
- // as the llvm pointer element type.
- const result_llvm_ty = try self.dg.lowerType(self.air.typeOfIndex(inst));
- // TODO this can be removed if we change host_size to be bits instead
- // of bytes.
- return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
+ const result_ty = self.air.typeOfIndex(inst);
+ const result_ty_info = result_ty.ptrInfo().data;
+ const result_llvm_ty = try self.dg.lowerType(result_ty);
+
+ if (result_ty_info.host_size != 0) {
+ // From LLVM's perspective, a pointer to a packed struct and a pointer
+ // to a field of a packed struct are the same. The difference is in the
+ // Zig pointer type which provides information for how to mask and shift
+ // out the relevant bits when accessing the pointee.
+ // Here we perform a bitcast because we want to use the host_size
+ // as the llvm pointer element type.
+ return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
+ }
+
+ // We have a pointer to a packed struct field that happens to be byte-aligned.
+ // Offset our operand pointer by the correct number of bytes.
+ const byte_offset = struct_ty.packedStructFieldByteOffset(field_index, target);
+ if (byte_offset == 0) {
+ return self.builder.buildBitCast(struct_ptr, result_llvm_ty, "");
+ }
+ const llvm_bytes_ptr_ty = self.context.intType(8).pointerType(0);
+ const ptr_as_bytes = self.builder.buildBitCast(struct_ptr, llvm_bytes_ptr_ty, "");
+ const llvm_usize = try self.dg.lowerType(Type.usize);
+ const llvm_index = llvm_usize.constInt(byte_offset, .False);
+ const indices: [1]*const llvm.Value = .{llvm_index};
+ const new_ptr = self.builder.buildInBoundsGEP(ptr_as_bytes, &indices, indices.len, "");
+ return self.builder.buildBitCast(new_ptr, result_llvm_ty, "");
},
else => {
- const target = self.dg.module.getTarget();
var ty_buf: Type.Payload.Pointer = undefined;
if (llvmFieldIndex(struct_ty, field_index, target, &ty_buf)) |llvm_field_index| {
return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
diff --git a/src/type.zig b/src/type.zig
index 14c613a947..8e5eaf0ec7 100644
--- a/src/type.zig
+++ b/src/type.zig
@@ -5591,6 +5591,27 @@ pub const Type = extern union {
}
}
+ pub fn packedStructFieldByteOffset(ty: Type, field_index: usize, target: Target) u32 {
+ const struct_obj = ty.castTag(.@"struct").?.data;
+ assert(struct_obj.layout == .Packed);
+ comptime assert(Type.packed_struct_layout_version == 2);
+
+ var bit_offset: u16 = undefined;
+ var running_bits: u16 = 0;
+ for (struct_obj.fields.values()) |f, i| {
+ if (!f.ty.hasRuntimeBits()) continue;
+
+ if (i == field_index) {
+ bit_offset = running_bits;
+ }
+ running_bits += @intCast(u16, f.ty.bitSize(target));
+ }
+ const host_size = (running_bits + 7) / 8;
+ _ = host_size; // TODO big-endian
+ const byte_offset = bit_offset / 8;
+ return byte_offset;
+ }
+
pub const FieldOffset = struct {
field: usize,
offset: u64,