diff options
| author | Veikka Tuominen <git@vexu.eu> | 2023-01-16 19:46:41 +0200 |
|---|---|---|
| committer | Veikka Tuominen <git@vexu.eu> | 2023-01-16 19:46:41 +0200 |
| commit | 342bae02d86d9bd9f2db6ae9489021bab28595ae (patch) | |
| tree | 8b50d7cffff029c135a17295c4c324603d2358b1 /src/codegen | |
| parent | 31a2b8c3642f1240a70d78203d568051d4dbcd3f (diff) | |
| download | zig-342bae02d86d9bd9f2db6ae9489021bab28595ae.tar.gz zig-342bae02d86d9bd9f2db6ae9489021bab28595ae.zip | |
Sema: automatically optimize order of struct fields
This is a simple starting version of the optimization described in #168
where the fields are just sorted by order of descending alignment.
Diffstat (limited to 'src/codegen')
| -rw-r--r-- | src/codegen/llvm.zig | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8604c7d7f6..b7e1466a2b 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2083,15 +2083,15 @@ pub const Object = struct { comptime assert(struct_layout_version == 2); var offset: u64 = 0; - for (fields.values()) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; - + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + while (it.next()) |field_and_index| { + const field = field_and_index.field; const field_size = field.ty.abiSize(target); const field_align = field.alignment(target, layout); const field_offset = std.mem.alignForwardGeneric(u64, offset, field_align); offset = field_offset + field_size; - const field_name = try gpa.dupeZ(u8, fields.keys()[i]); + const field_name = try gpa.dupeZ(u8, fields.keys()[field_and_index.index]); defer gpa.free(field_name); try di_fields.append(gpa, dib.createMemberType( @@ -2985,9 +2985,9 @@ pub const DeclGen = struct { var big_align: u32 = 1; var any_underaligned_fields = false; - for (struct_obj.fields.values()) |field| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; - + var it = struct_obj.runtimeFieldIterator(); + while (it.next()) |field_and_index| { + const field = field_and_index.field; const field_align = field.alignment(target, struct_obj.layout); const field_ty_align = field.ty.abiAlignment(target); any_underaligned_fields = any_underaligned_fields or @@ -3714,9 +3714,9 @@ pub const DeclGen = struct { var big_align: u32 = 0; var need_unnamed = false; - for (struct_obj.fields.values()) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; - + var it = struct_obj.runtimeFieldIterator(); + while (it.next()) |field_and_index| { + const field = field_and_index.field; const field_align = field.alignment(target, struct_obj.layout); big_align = @max(big_align, field_align); const prev_offset = offset; @@ -3732,7 +3732,7 @@ pub const DeclGen = struct { const field_llvm_val = try dg.lowerValue(.{ .ty = field.ty, - .val = field_vals[i], + .val = field_vals[field_and_index.index], }); need_unnamed = need_unnamed or dg.isUnnamedType(field.ty, field_llvm_val); @@ -10354,9 +10354,9 @@ fn llvmFieldIndex( assert(layout != .Packed); var llvm_field_index: c_uint = 0; - for (ty.structFields().values()) |field, i| { - if (field.is_comptime or !field.ty.hasRuntimeBits()) continue; - + var it = ty.castTag(.@"struct").?.data.runtimeFieldIterator(); + while (it.next()) |field_and_index| { + const field = field_and_index.field; const field_align = field.alignment(target, layout); big_align = @max(big_align, field_align); const prev_offset = offset; @@ -10367,7 +10367,7 @@ fn llvmFieldIndex( llvm_field_index += 1; } - if (field_index <= i) { + if (field_index == field_and_index.index) { ptr_pl_buf.* = .{ .data = .{ .pointee_type = field.ty, |
