aboutsummaryrefslogtreecommitdiff
path: root/src/Module.zig
diff options
context:
space:
mode:
authorAndrew Kelley <andrew@ziglang.org>2023-09-24 14:37:36 -0700
committerAndrew Kelley <andrew@ziglang.org>2023-09-24 14:37:36 -0700
commitc08c0fc6eddf601785abfbc5e5a9ab5c89d7cfbf (patch)
tree65cabc6acd2db112c1fa9557c2f34dfd04659113 /src/Module.zig
parenta7088fd9a3edb037f0f51bb402a3c557334634f3 (diff)
downloadzig-c08c0fc6eddf601785abfbc5e5a9ab5c89d7cfbf.tar.gz
zig-c08c0fc6eddf601785abfbc5e5a9ab5c89d7cfbf.zip
revert "compiler: packed structs cache bit offsets"
This is mostly a revert of a7088fd9a3edb037f0f51bb402a3c557334634f3. Measurement revealed the commit actually regressed performance.
Diffstat (limited to 'src/Module.zig')
-rw-r--r--src/Module.zig23
1 files changed, 23 insertions, 0 deletions
diff --git a/src/Module.zig b/src/Module.zig
index 8ac9d794de..bdf8d3a768 100644
--- a/src/Module.zig
+++ b/src/Module.zig
@@ -6649,3 +6649,26 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
return ty_abi_align;
}
+
+/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
+/// into the packed struct InternPool data rather than computing this on the
+/// fly, however it was found to perform worse when measured on real world
+/// projects.
+pub fn structPackedFieldBitOffset(
+ mod: *Module,
+ struct_type: InternPool.Key.StructType,
+ field_index: u32,
+) u16 {
+ const ip = &mod.intern_pool;
+ assert(struct_type.layout == .Packed);
+ assert(struct_type.haveLayout(ip));
+ var bit_sum: u64 = 0;
+ for (0..struct_type.field_types.len) |i| {
+ if (i == field_index) {
+ return @intCast(bit_sum);
+ }
+ const field_ty = struct_type.field_types.get(ip)[i].toType();
+ bit_sum += field_ty.bitSize(mod);
+ }
+ unreachable; // index out of bounds
+}