diff options
| author | Andrew Kelley <andrew@ziglang.org> | 2021-09-20 20:37:04 -0400 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2021-09-20 20:37:04 -0400 |
| commit | 1ad905c71e0896295d4781853cd577bbe1b4111a (patch) | |
| tree | 7d81da6b6fd3ee721b041eb33b3918707f2698df /src | |
| parent | 2a728f6e5f0c5d12e110313342e714f9f23c4044 (diff) | |
| parent | f8b914fcf328b30f98d31bb6461c953e4b7a33a7 (diff) | |
| download | zig-1ad905c71e0896295d4781853cd577bbe1b4111a.tar.gz zig-1ad905c71e0896295d4781853cd577bbe1b4111a.zip | |
Merge pull request #9649 from Snektron/address-space
Address Spaces
Diffstat (limited to 'src')
| -rw-r--r-- | src/AstGen.zig | 51 | ||||
| -rw-r--r-- | src/Module.zig | 145 | ||||
| -rw-r--r-- | src/Sema.zig | 303 | ||||
| -rw-r--r-- | src/Zir.zig | 47 | ||||
| -rw-r--r-- | src/codegen.zig | 2 | ||||
| -rw-r--r-- | src/codegen/c.zig | 2 | ||||
| -rw-r--r-- | src/codegen/llvm.zig | 38 | ||||
| -rw-r--r-- | src/codegen/llvm/bindings.zig | 68 | ||||
| -rw-r--r-- | src/print_zir.zig | 14 | ||||
| -rw-r--r-- | src/stage1/all_types.hpp | 8 | ||||
| -rw-r--r-- | src/stage1/analyze.cpp | 10 | ||||
| -rw-r--r-- | src/stage1/analyze.hpp | 2 | ||||
| -rw-r--r-- | src/stage1/ir.cpp | 67 | ||||
| -rw-r--r-- | src/target.zig | 18 | ||||
| -rw-r--r-- | src/translate_c/ast.zig | 3 | ||||
| -rw-r--r-- | src/type.zig | 128 | ||||
| -rw-r--r-- | src/value.zig | 5 | ||||
| -rw-r--r-- | src/zig_llvm.cpp | 5 | ||||
| -rw-r--r-- | src/zig_llvm.h | 3 |
19 files changed, 741 insertions, 178 deletions
diff --git a/src/AstGen.zig b/src/AstGen.zig index be3613dfb9..f1eabe4c0c 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1116,6 +1116,11 @@ fn fnProtoExpr( const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { break :inst try expr(gz, scope, align_rl, fn_proto.ast.align_expr); }; + + if (fn_proto.ast.addrspace_expr != 0) { + return astgen.failNode(fn_proto.ast.addrspace_expr, "addrspace not allowed on function prototypes", .{}); + } + if (fn_proto.ast.section_expr != 0) { return astgen.failNode(fn_proto.ast.section_expr, "linksection not allowed on function prototypes", .{}); } @@ -2371,6 +2376,7 @@ fn varDecl( const gpa = astgen.gpa; const tree = astgen.tree; const token_tags = tree.tokens.items(.tag); + const main_tokens = tree.nodes.items(.main_token); const name_token = var_decl.ast.mut_token + 1; const ident_name_raw = tree.tokenSlice(name_token); @@ -2385,6 +2391,14 @@ fn varDecl( return astgen.failNode(node, "variables must be initialized", .{}); } + if (var_decl.ast.addrspace_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.addrspace_node], "cannot set address space of local variable '{s}'", .{ident_name_raw}); + } + + if (var_decl.ast.section_node != 0) { + return astgen.failTok(main_tokens[var_decl.ast.section_node], "cannot set section of local variable '{s}'", .{ident_name_raw}); + } + const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node != 0) try expr(gz, scope, align_rl, var_decl.ast.align_node) else @@ -2714,6 +2728,7 @@ fn ptrType( const elem_type = try typeExpr(gz, scope, ptr_info.ast.child_type); const simple = ptr_info.ast.align_node == 0 and + ptr_info.ast.addrspace_node == 0 and ptr_info.ast.sentinel == 0 and ptr_info.ast.bit_range_start == 0; @@ -2732,6 +2747,7 @@ fn ptrType( var sentinel_ref: Zir.Inst.Ref = .none; var align_ref: Zir.Inst.Ref = .none; + var addrspace_ref: Zir.Inst.Ref = .none; var bit_start_ref: Zir.Inst.Ref = .none; var bit_end_ref: Zir.Inst.Ref = .none; var trailing_count: u32 = 0; @@ -2744,6 +2760,10 @@ fn ptrType( align_ref = try expr(gz, scope, align_rl, ptr_info.ast.align_node); trailing_count += 1; } + if (ptr_info.ast.addrspace_node != 0) { + addrspace_ref = try expr(gz, scope, .{ .ty = .address_space_type }, ptr_info.ast.addrspace_node); + trailing_count += 1; + } if (ptr_info.ast.bit_range_start != 0) { assert(ptr_info.ast.bit_range_end != 0); bit_start_ref = try expr(gz, scope, .none, ptr_info.ast.bit_range_start); @@ -2764,6 +2784,9 @@ fn ptrType( if (align_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@enumToInt(align_ref)); } + if (addrspace_ref != .none) { + gz.astgen.extra.appendAssumeCapacity(@enumToInt(addrspace_ref)); + } if (bit_start_ref != .none) { gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_start_ref)); gz.astgen.extra.appendAssumeCapacity(@enumToInt(bit_end_ref)); @@ -2779,6 +2802,7 @@ fn ptrType( .is_volatile = ptr_info.volatile_token != null, .has_sentinel = sentinel_ref != .none, .has_align = align_ref != .none, + .has_addrspace = addrspace_ref != .none, .has_bit_range = bit_start_ref != .none, }, .size = ptr_info.size, @@ -2847,7 +2871,7 @@ const WipDecls = struct { is_pub: bool, is_export: bool, has_align: bool, - has_section: bool, + has_section_or_addrspace: bool, ) Allocator.Error!void { if (wip_decls.decl_index % fields_per_u32 == 0 and wip_decls.decl_index != 0) { try wip_decls.bit_bag.append(gpa, wip_decls.cur_bit_bag); @@ -2857,7 +2881,7 @@ const WipDecls = struct { (@as(u32, @boolToInt(is_pub)) << 28) | (@as(u32, @boolToInt(is_export)) << 29) | (@as(u32, @boolToInt(has_align)) << 30) | - (@as(u32, @boolToInt(has_section)) << 31); + (@as(u32, @boolToInt(has_section_or_addrspace)) << 31); wip_decls.decl_index += 1; } @@ -2922,7 +2946,8 @@ fn fnDecl( const maybe_inline_token = fn_proto.extern_export_inline_token orelse break :blk false; break :blk token_tags[maybe_inline_token] == .keyword_inline; }; - try wip_decls.next(gpa, is_pub, is_export, fn_proto.ast.align_expr != 0, fn_proto.ast.section_expr != 0); + const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0; + try wip_decls.next(gpa, is_pub, is_export, fn_proto.ast.align_expr != 0, has_section_or_addrspace); var params_scope = &fn_gz.base; const is_var_args = is_var_args: { @@ -3011,6 +3036,9 @@ fn fnDecl( const align_inst: Zir.Inst.Ref = if (fn_proto.ast.align_expr == 0) .none else inst: { break :inst try expr(&decl_gz, params_scope, align_rl, fn_proto.ast.align_expr); }; + const addrspace_inst: Zir.Inst.Ref = if (fn_proto.ast.addrspace_expr == 0) .none else inst: { + break :inst try expr(&decl_gz, params_scope, .{ .ty = .address_space_type }, fn_proto.ast.addrspace_expr); + }; const section_inst: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: { break :inst try comptimeExpr(&decl_gz, params_scope, .{ .ty = .const_slice_u8_type }, fn_proto.ast.section_expr); }; @@ -3112,7 +3140,7 @@ fn fnDecl( _ = try decl_gz.addBreak(.break_inline, block_inst, func_inst); try decl_gz.setBlockBody(block_inst); - try wip_decls.payload.ensureUnusedCapacity(gpa, 9); + try wip_decls.payload.ensureUnusedCapacity(gpa, 10); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(decl_node)); const casted = @bitCast([4]u32, contents_hash); @@ -3127,8 +3155,10 @@ fn fnDecl( if (align_inst != .none) { wip_decls.payload.appendAssumeCapacity(@enumToInt(align_inst)); } - if (section_inst != .none) { + + if (has_section_or_addrspace) { wip_decls.payload.appendAssumeCapacity(@enumToInt(section_inst)); + wip_decls.payload.appendAssumeCapacity(@enumToInt(addrspace_inst)); } } @@ -3175,10 +3205,14 @@ fn globalVarDecl( const align_inst: Zir.Inst.Ref = if (var_decl.ast.align_node == 0) .none else inst: { break :inst try expr(&block_scope, &block_scope.base, align_rl, var_decl.ast.align_node); }; + const addrspace_inst: Zir.Inst.Ref = if (var_decl.ast.addrspace_node == 0) .none else inst: { + break :inst try expr(&block_scope, &block_scope.base, .{ .ty = .address_space_type }, var_decl.ast.addrspace_node); + }; const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: { break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .ty = .const_slice_u8_type }, var_decl.ast.section_node); }; - try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, section_inst != .none); + const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none; + try wip_decls.next(gpa, is_pub, is_export, align_inst != .none, has_section_or_addrspace); const is_threadlocal = if (var_decl.threadlocal_token) |tok| blk: { if (!is_mutable) { @@ -3256,7 +3290,7 @@ fn globalVarDecl( _ = try block_scope.addBreak(.break_inline, block_inst, var_inst); try block_scope.setBlockBody(block_inst); - try wip_decls.payload.ensureUnusedCapacity(gpa, 9); + try wip_decls.payload.ensureUnusedCapacity(gpa, 10); { const contents_hash = std.zig.hashSrc(tree.getNodeSource(node)); const casted = @bitCast([4]u32, contents_hash); @@ -3271,8 +3305,9 @@ fn globalVarDecl( if (align_inst != .none) { wip_decls.payload.appendAssumeCapacity(@enumToInt(align_inst)); } - if (section_inst != .none) { + if (has_section_or_addrspace) { wip_decls.payload.appendAssumeCapacity(@enumToInt(section_inst)); + wip_decls.payload.appendAssumeCapacity(@enumToInt(addrspace_inst)); } } diff --git a/src/Module.zig b/src/Module.zig index add0562d93..500c34bcb0 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -288,6 +288,8 @@ pub const Decl = struct { align_val: Value, /// Populated when `has_tv`. linksection_val: Value, + /// Populated when `has_tv`. + @"addrspace": std.builtin.AddressSpace, /// The memory for ty, val, align_val, linksection_val. /// If this is `null` then there is no memory management needed. value_arena: ?*std.heap.ArenaAllocator.State = null, @@ -351,7 +353,7 @@ pub const Decl = struct { /// to require re-analysis. outdated, }, - /// Whether `typed_value`, `align_val`, and `linksection_val` are populated. + /// Whether `typed_value`, `align_val`, `linksection_val` and `addrspace` are populated. has_tv: bool, /// If `true` it means the `Decl` is the resource owner of the type/value associated /// with it. That means when `Decl` is destroyed, the cleanup code should additionally @@ -366,8 +368,8 @@ pub const Decl = struct { is_exported: bool, /// Whether the ZIR code provides an align instruction. has_align: bool, - /// Whether the ZIR code provides a linksection instruction. - has_linksection: bool, + /// Whether the ZIR code provides a linksection and address space instruction. + has_linksection_or_addrspace: bool, /// Flag used by garbage collection to mark and sweep. /// Decls which correspond to an AST node always have this field set to `true`. /// Anonymous Decls are initialized with this field set to `false` and then it @@ -489,14 +491,22 @@ pub const Decl = struct { if (!decl.has_align) return .none; assert(decl.zir_decl_index != 0); const zir = decl.namespace.file_scope.zir; - return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 6]); + return @intToEnum(Zir.Inst.Ref, zir.extra[decl.zir_decl_index + 7]); } pub fn zirLinksectionRef(decl: Decl) Zir.Inst.Ref { - if (!decl.has_linksection) return .none; + if (!decl.has_linksection_or_addrspace) return .none; assert(decl.zir_decl_index != 0); const zir = decl.namespace.file_scope.zir; - const extra_index = decl.zir_decl_index + 6 + @boolToInt(decl.has_align); + const extra_index = decl.zir_decl_index + 7 + @boolToInt(decl.has_align); + return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); + } + + pub fn zirAddrspaceRef(decl: Decl) Zir.Inst.Ref { + if (!decl.has_linksection_or_addrspace) return .none; + assert(decl.zir_decl_index != 0); + const zir = decl.namespace.file_scope.zir; + const extra_index = decl.zir_decl_index + 7 + @boolToInt(decl.has_align) + 1; return @intToEnum(Zir.Inst.Ref, zir.extra[extra_index]); } @@ -3072,7 +3082,7 @@ pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void { new_decl.is_pub = true; new_decl.is_exported = false; new_decl.has_align = false; - new_decl.has_linksection = false; + new_decl.has_linksection_or_addrspace = false; new_decl.ty = struct_ty; new_decl.val = struct_val; new_decl.has_tv = true; @@ -3202,6 +3212,24 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (linksection_ref == .none) break :blk Value.initTag(.null_value); break :blk (try sema.resolveInstConst(&block_scope, src, linksection_ref)).val; }; + const address_space = blk: { + const addrspace_ctx: Sema.AddressSpaceContext = switch (decl_tv.val.tag()) { + .function, .extern_fn => .function, + .variable => .variable, + else => .constant, + }; + + break :blk switch (decl.zirAddrspaceRef()) { + .none => switch (addrspace_ctx) { + .function => target_util.defaultAddressSpace(sema.mod.getTarget(), .function), + .variable => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_mutable), + .constant => target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), + else => unreachable, + }, + else => |addrspace_ref| try sema.analyzeAddrspace(&block_scope, src, addrspace_ref, addrspace_ctx), + }; + }; + // Note this resolves the type of the Decl, not the value; if this Decl // is a struct, for example, this resolves `type` (which needs no resolution), // not the struct itself. @@ -3258,6 +3286,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.val = try decl_tv.val.copy(&decl_arena.allocator); decl.align_val = try align_val.copy(&decl_arena.allocator); decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.@"addrspace" = address_space; decl.has_tv = true; decl.owns_tv = owns_tv; decl_arena_state.* = decl_arena.state; @@ -3319,6 +3348,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.val = try decl_tv.val.copy(&decl_arena.allocator); decl.align_val = try align_val.copy(&decl_arena.allocator); decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.@"addrspace" = address_space; decl.has_tv = true; decl_arena_state.* = decl_arena.state; decl.value_arena = decl_arena_state; @@ -3526,8 +3556,8 @@ pub fn scanNamespace( const decl_sub_index = extra_index; extra_index += 7; // src_hash(4) + line(1) + name(1) + value(1) - extra_index += @truncate(u1, flags >> 2); - extra_index += @truncate(u1, flags >> 3); + extra_index += @truncate(u1, flags >> 2); // Align + extra_index += @as(u2, @truncate(u1, flags >> 3)) * 2; // Link section or address space, consists of 2 Refs try scanDecl(&scan_decl_iter, decl_sub_index, flags); } @@ -3553,10 +3583,10 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi const zir = namespace.file_scope.zir; // zig fmt: off - const is_pub = (flags & 0b0001) != 0; - const export_bit = (flags & 0b0010) != 0; - const has_align = (flags & 0b0100) != 0; - const has_linksection = (flags & 0b1000) != 0; + const is_pub = (flags & 0b0001) != 0; + const export_bit = (flags & 0b0010) != 0; + const has_align = (flags & 0b0100) != 0; + const has_linksection_or_addrspace = (flags & 0b1000) != 0; // zig fmt: on const line = iter.parent_decl.relativeToLine(zir.extra[decl_sub_index + 4]); @@ -3639,7 +3669,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi new_decl.is_exported = is_exported; new_decl.is_usingnamespace = is_usingnamespace; new_decl.has_align = has_align; - new_decl.has_linksection = has_linksection; + new_decl.has_linksection_or_addrspace = has_linksection_or_addrspace; new_decl.zir_decl_index = @intCast(u32, decl_sub_index); new_decl.alive = true; // This Decl corresponds to an AST node and therefore always alive. return; @@ -3656,7 +3686,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!voi decl.is_exported = is_exported; decl.is_usingnamespace = is_usingnamespace; decl.has_align = has_align; - decl.has_linksection = has_linksection; + decl.has_linksection_or_addrspace = has_linksection_or_addrspace; decl.zir_decl_index = @intCast(u32, decl_sub_index); if (decl.getFunction()) |_| { switch (mod.comp.bin_file.tag) { @@ -4028,6 +4058,7 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast. .val = undefined, .align_val = undefined, .linksection_val = undefined, + .@"addrspace" = undefined, .analysis = .unreferenced, .deletion_flag = false, .zir_decl_index = 0, @@ -4052,7 +4083,7 @@ pub fn allocateNewDecl(mod: *Module, namespace: *Scope.Namespace, src_node: Ast. .generation = 0, .is_pub = false, .is_exported = false, - .has_linksection = false, + .has_linksection_or_addrspace = false, .has_align = false, .alive = false, .is_usingnamespace = false, @@ -4185,6 +4216,9 @@ pub fn createAnonymousDeclFromDeclNamed( new_decl.src_line = owner_decl.src_line; new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; + new_decl.align_val = Value.initTag(.null_value); + new_decl.linksection_val = Value.initTag(.null_value); + new_decl.@"addrspace" = .generic; // default global addrspace new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -4330,10 +4364,59 @@ pub fn simplePtrType( elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, + @"addrspace": std.builtin.AddressSpace, ) Allocator.Error!Type { + return ptrType( + arena, + elem_ty, + null, + 0, + @"addrspace", + 0, + 0, + mutable, + false, + false, + size, + ); +} + +pub fn ptrType( + arena: *Allocator, + elem_ty: Type, + sentinel: ?Value, + @"align": u32, + @"addrspace": std.builtin.AddressSpace, + bit_offset: u16, + host_size: u16, + mutable: bool, + @"allowzero": bool, + @"volatile": bool, + size: std.builtin.TypeInfo.Pointer.Size, +) Allocator.Error!Type { + assert(host_size == 0 or bit_offset < host_size * 8); + + if (sentinel != null or @"align" != 0 or @"addrspace" != .generic or + bit_offset != 0 or host_size != 0 or @"allowzero" or @"volatile") + { + return Type.Tag.pointer.create(arena, .{ + .pointee_type = elem_ty, + .sentinel = sentinel, + .@"align" = @"align", + .@"addrspace" = @"addrspace", + .bit_offset = bit_offset, + .host_size = host_size, + .@"allowzero" = @"allowzero", + .mutable = mutable, + .@"volatile" = @"volatile", + .size = size, + }); + } + if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } + // TODO stage1 type inference bug const T = Type.Tag; @@ -4352,34 +4435,6 @@ pub fn simplePtrType( return Type.initPayload(&type_payload.base); } -pub fn ptrType( - arena: *Allocator, - elem_ty: Type, - sentinel: ?Value, - @"align": u32, - bit_offset: u16, - host_size: u16, - mutable: bool, - @"allowzero": bool, - @"volatile": bool, - size: std.builtin.TypeInfo.Pointer.Size, -) Allocator.Error!Type { - assert(host_size == 0 or bit_offset < host_size * 8); - - // TODO check if type can be represented by simplePtrType - return Type.Tag.pointer.create(arena, .{ - .pointee_type = elem_ty, - .sentinel = sentinel, - .@"align" = @"align", - .bit_offset = bit_offset, - .host_size = host_size, - .@"allowzero" = @"allowzero", - .mutable = mutable, - .@"volatile" = @"volatile", - .size = size, - }); -} - pub fn optionalType(arena: *Allocator, child_type: Type) Allocator.Error!Type { switch (child_type.tag()) { .single_const_pointer => return Type.Tag.optional_single_const_pointer.create( @@ -4709,7 +4764,7 @@ pub fn populateTestFunctions(mod: *Module) !void { const builtin_file = (mod.importPkg(builtin_pkg) catch unreachable).file; const builtin_namespace = builtin_file.root_decl.?.namespace; const decl = builtin_namespace.decls.get("test_functions").?; - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const tmp_test_fn_ty = decl.ty.slicePtrFieldType(&buf).elemType(); const array_decl = d: { diff --git a/src/Sema.zig b/src/Sema.zig index 990aa4ddf0..ff32ce49ca 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1373,7 +1373,13 @@ fn zirRetPtr( return sema.analyzeComptimeAlloc(block, sema.fn_ret_ty); } - const ptr_type = try Module.simplePtrType(sema.arena, sema.fn_ret_ty, true, .One); + const ptr_type = try Module.simplePtrType( + sema.arena, + sema.fn_ret_ty, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); return block.addTy(.alloc, ptr_type); } @@ -1521,7 +1527,13 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType( + sema.arena, + var_type, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); try sema.requireRuntimeBlock(block, var_decl_src); return block.addTy(.alloc, ptr_type); } @@ -1538,7 +1550,13 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr return sema.analyzeComptimeAlloc(block, var_type); } try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType( + sema.arena, + var_type, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); try sema.requireRuntimeBlock(block, var_decl_src); return block.addTy(.alloc, ptr_type); } @@ -1598,7 +1616,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde try sema.mod.declareDeclDependency(sema.owner_decl, decl); const final_elem_ty = try decl.ty.copy(sema.arena); - const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType( + sema.arena, + final_elem_ty, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); const final_ptr_ty_inst = try sema.addType(final_ptr_ty); sema.air_instructions.items(.data)[ptr_inst].ty_pl.ty = final_ptr_ty_inst; @@ -1620,7 +1644,13 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde try sema.validateVarType(block, ty_src, final_elem_ty); } // Change it to a normal alloc. - const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType( + sema.arena, + final_elem_ty, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); sema.air_instructions.set(ptr_inst, .{ .tag = .alloc, .data = .{ .ty = final_ptr_ty }, @@ -1774,7 +1804,14 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co } const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const ptr_ty = try Module.simplePtrType(sema.arena, sema.typeOf(value), true, .One); + const ptr_ty = try Module.simplePtrType( + sema.arena, + sema.typeOf(value), + true, + .One, + // TODO figure out which address space is appropriate here + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1821,7 +1858,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, operand); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try Module.simplePtrType(sema.arena, operand_ty, true, .One); + const ptr_ty = try Module.simplePtrType( + sema.arena, + operand_ty, + true, + .One, + // TODO figure out which address space is appropriate here + target_util.defaultAddressSpace(sema.mod.getTarget(), .local), + ); const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, operand); } @@ -3004,7 +3048,7 @@ fn analyzeCall( new_decl.is_pub = module_fn.owner_decl.is_pub; new_decl.is_exported = module_fn.owner_decl.is_exported; new_decl.has_align = module_fn.owner_decl.has_align; - new_decl.has_linksection = module_fn.owner_decl.has_linksection; + new_decl.has_linksection_or_addrspace = module_fn.owner_decl.has_linksection_or_addrspace; new_decl.zir_decl_index = module_fn.owner_decl.zir_decl_index; new_decl.alive = true; // This Decl is called at runtime. new_decl.has_tv = true; @@ -3658,7 +3702,13 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr_ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType( + sema.arena, + child_type, + !optional_ptr_ty.isConstPtr(), + .One, + optional_ptr_ty.ptrAddressSpace(), + ); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { if (try pointer_val.pointerDeref(sema.arena)) |val| { @@ -3773,7 +3823,13 @@ fn zirErrUnionPayloadPtr( return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()}); const payload_ty = operand_ty.elemType().errorUnionPayload(); - const operand_pointer_ty = try Module.simplePtrType(sema.arena, payload_ty, !operand_ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType( + sema.arena, + payload_ty, + !operand_ty.isConstPtr(), + .One, + operand_ty.ptrAddressSpace(), + ); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { if (try pointer_val.pointerDeref(sema.arena)) |val| { @@ -6879,6 +6935,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp elem_type, null, 0, + .generic, 0, 0, inst_data.is_mutable, @@ -6911,6 +6968,12 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr break :blk try sema.resolveAlreadyCoercedInt(block, .unneeded, ref, u32); } else 0; + const address_space = if (inst_data.flags.has_addrspace) blk: { + const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); + extra_i += 1; + break :blk try sema.analyzeAddrspace(block, .unneeded, ref, .pointer); + } else .generic; + const bit_start = if (inst_data.flags.has_bit_range) blk: { const ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_i]); extra_i += 1; @@ -6933,6 +6996,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr elem_type, sentinel, abi_align, + address_space, bit_start, bit_end, inst_data.flags.is_mutable, @@ -8339,7 +8403,13 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType( + arena, + stack_trace_ty, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), // TODO might need a place that is more dynamic + ); const null_stack_trace = try sema.addConstant( try Module.optionalType(arena, ptr_stack_trace_ty), Value.initTag(.null_value), @@ -8423,7 +8493,7 @@ fn fieldVal( .Pointer => switch (object_ty.ptrSize()) { .Slice => { if (mem.eql(u8, field_name, "ptr")) { - const buf = try arena.create(Type.Payload.ElemType); + const buf = try arena.create(Type.SlicePtrFieldTypeBuffer); const result_ty = object_ty.slicePtrFieldType(buf); if (try sema.resolveMaybeUndefVal(block, object_src, object)) |val| { if (val.isUndef()) return sema.addConstUndef(result_ty); @@ -8457,21 +8527,32 @@ fn fieldVal( } }, .One => { - const elem_ty = object_ty.elemType(); - if (elem_ty.zigTypeTag() == .Array) { - if (mem.eql(u8, field_name, "len")) { - return sema.addConstant( - Type.initTag(.comptime_int), - try Value.Tag.int_u64.create(arena, elem_ty.arrayLen()), - ); - } else { - return mod.fail( - &block.base, - field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty }, - ); - } + const ptr_child = object_ty.elemType(); + switch (ptr_child.zigTypeTag()) { + .Array => { + if (mem.eql(u8, field_name, "len")) { + return sema.addConstant( + Type.initTag(.comptime_int), + try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()), + ); + } else { + return mod.fail( + &block.base, + field_name_src, + "no member named '{s}' in '{}'", + .{ field_name, object_ty }, + ); + } + }, + .Struct => { + const struct_ptr_deref = try sema.analyzeLoad(block, src, object, object_src); + return sema.unionFieldVal(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child); + }, + .Union => { + const union_ptr_deref = try sema.analyzeLoad(block, src, object, object_src); + return sema.unionFieldVal(block, src, union_ptr_deref, field_name, field_name_src, ptr_child); + }, + else => {}, } }, .Many, .C => {}, @@ -8595,9 +8676,8 @@ fn fieldPtr( ); } }, - .Pointer => { - const ptr_child = object_ty.elemType(); - if (ptr_child.isSlice()) { + .Pointer => switch (object_ty.ptrSize()) { + .Slice => { // Here for the ptr and len fields what we need to do is the situation // when a temporary has its address taken, e.g. `&a[c..d].len`. // This value may be known at compile-time or runtime. In the former @@ -8627,26 +8707,39 @@ fn fieldPtr( .{ field_name, object_ty }, ); } - } else switch (ptr_child.zigTypeTag()) { - .Array => { - if (mem.eql(u8, field_name, "len")) { - var anon_decl = try block.startAnonDecl(); - defer anon_decl.deinit(); - return sema.analyzeDeclRef(try anon_decl.finish( - Type.initTag(.comptime_int), - try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()), - )); - } else { - return mod.fail( - &block.base, - field_name_src, - "no member named '{s}' in '{}'", - .{ field_name, object_ty }, - ); - } - }, - else => {}, - } + }, + .One => { + const ptr_child = object_ty.elemType(); + switch (ptr_child.zigTypeTag()) { + .Array => { + if (mem.eql(u8, field_name, "len")) { + var anon_decl = try block.startAnonDecl(); + defer anon_decl.deinit(); + return sema.analyzeDeclRef(try anon_decl.finish( + Type.initTag(.comptime_int), + try Value.Tag.int_u64.create(anon_decl.arena(), ptr_child.arrayLen()), + )); + } else { + return mod.fail( + &block.base, + field_name_src, + "no member named '{s}' in '{}'", + .{ field_name, object_ty }, + ); + } + }, + .Struct => { + const struct_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); + return sema.structFieldPtr(block, src, struct_ptr_deref, field_name, field_name_src, ptr_child); + }, + .Union => { + const union_ptr_deref = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); + return sema.unionFieldPtr(block, src, union_ptr_deref, field_name, field_name_src, ptr_child); + }, + else => {}, + } + }, + .Many, .C => {}, }, .Type => { _ = try sema.resolveConstValue(block, object_ptr_src, object_ptr); @@ -8788,13 +8881,20 @@ fn structFieldPtr( const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); + const struct_ptr_ty = sema.typeOf(struct_ptr); const struct_ty = try sema.resolveTypeFields(block, src, unresolved_struct_ty); const struct_obj = struct_ty.castTag(.@"struct").?.data; const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType( + arena, + field.ty, + struct_ptr_ty.ptrIsMutable(), + .One, + struct_ptr_ty.ptrAddressSpace(), + ); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return sema.addConstant( @@ -8885,6 +8985,7 @@ fn unionFieldPtr( const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); + const union_ptr_ty = sema.typeOf(union_ptr); const union_ty = try sema.resolveTypeFields(block, src, unresolved_union_ty); const union_obj = union_ty.cast(Type.Payload.Union).?.data; @@ -8892,7 +8993,13 @@ fn unionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType( + arena, + field.ty, + union_ptr_ty.ptrIsMutable(), + .One, + union_ptr_ty.ptrAddressSpace(), + ); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -9068,10 +9175,13 @@ fn elemPtrArray( ) CompileError!Air.Inst.Ref { const array_ptr_ty = sema.typeOf(array_ptr); const pointee_type = array_ptr_ty.elemType().elemType(); - const result_ty = if (array_ptr_ty.ptrIsMutable()) - try Type.Tag.single_mut_pointer.create(sema.arena, pointee_type) - else - try Type.Tag.single_const_pointer.create(sema.arena, pointee_type); + const result_ty = try Module.simplePtrType( + sema.arena, + pointee_type, + array_ptr_ty.ptrIsMutable(), + .One, + array_ptr_ty.ptrAddressSpace(), + ); if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { if (try sema.resolveDefinedValue(block, elem_index_src, elem_index)) |index_val| { @@ -9162,6 +9272,7 @@ fn coerce( const dest_is_mut = !dest_type.isConstPtr(); if (inst_ty.isConstPtr() and dest_is_mut) break :src_array_ptr; if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.ptrAddressSpace() != dest_type.ptrAddressSpace()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type, dest_is_mut)) { @@ -9297,6 +9408,10 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type, dest_is_mut: bool) InM return child; } + if (dest_info.@"addrspace" != src_info.@"addrspace") { + return .no_match; + } + const ok_sent = dest_info.sentinel == null or src_info.size == .C or (src_info.sentinel != null and dest_info.sentinel.?.eql(src_info.sentinel.?, dest_info.pointee_type)); @@ -9590,11 +9705,11 @@ fn analyzeDeclRef(sema: *Sema, decl: *Decl) CompileError!Air.Inst.Ref { const decl_tv = try decl.typedValue(); if (decl_tv.val.castTag(.variable)) |payload| { const variable = payload.data; - const ty = try Module.simplePtrType(sema.arena, decl_tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, decl_tv.ty, variable.is_mutable, .One, decl.@"addrspace"); return sema.addConstant(ty, try Value.Tag.decl_ref.create(sema.arena, decl)); } return sema.addConstant( - try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One, decl.@"addrspace"), try Value.Tag.decl_ref.create(sema.arena, decl), ); } @@ -9617,8 +9732,9 @@ fn analyzeRef( } try sema.requireRuntimeBlock(block, src); - const ptr_type = try Module.simplePtrType(sema.arena, operand_ty, false, .One); - const mut_ptr_type = try Module.simplePtrType(sema.arena, operand_ty, true, .One); + const address_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); + const ptr_type = try Module.simplePtrType(sema.arena, operand_ty, false, .One, address_space); + const mut_ptr_type = try Module.simplePtrType(sema.arena, operand_ty, true, .One, address_space); const alloc = try block.addTy(.alloc, mut_ptr_type); try sema.storePtr(block, src, alloc, operand); @@ -9779,6 +9895,7 @@ fn analyzeSlice( return_elem_type, if (end_opt == .none) slice_sentinel else null, 0, // TODO alignment + if (ptr_child.zigTypeTag() == .Pointer) ptr_child.ptrAddressSpace() else .generic, 0, 0, !ptr_child.isConstPtr(), @@ -10286,6 +10403,7 @@ fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type .atomic_order => return sema.resolveBuiltinTypeFields(block, src, "AtomicOrder"), .atomic_rmw_op => return sema.resolveBuiltinTypeFields(block, src, "AtomicRmwOp"), .calling_convention => return sema.resolveBuiltinTypeFields(block, src, "CallingConvention"), + .address_space => return sema.resolveBuiltinTypeFields(block, src, "AddressSpace"), .float_mode => return sema.resolveBuiltinTypeFields(block, src, "FloatMode"), .reduce_op => return sema.resolveBuiltinTypeFields(block, src, "ReduceOp"), .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"), @@ -10680,6 +10798,7 @@ fn typeHasOnePossibleValue( .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -10865,6 +10984,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .atomic_order => return .atomic_order_type, .atomic_rmw_op => return .atomic_rmw_op_type, .calling_convention => return .calling_convention_type, + .address_space => return .address_space_type, .float_mode => return .float_mode_type, .reduce_op => return .reduce_op_type, .call_options => return .call_options_type, @@ -10960,7 +11080,13 @@ fn analyzeComptimeAlloc( block: *Scope.Block, var_type: Type, ) CompileError!Air.Inst.Ref { - const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType( + sema.arena, + var_type, + true, + .One, + target_util.defaultAddressSpace(sema.mod.getTarget(), .global_constant), + ); var anon_decl = try block.startAnonDecl(); defer anon_decl.deinit(); @@ -10976,3 +11102,58 @@ fn analyzeComptimeAlloc( .decl = decl, })); } + +/// The places where a user can specify an address space attribute +pub const AddressSpaceContext = enum { + /// A function is specificed to be placed in a certain address space. + function, + + /// A (global) variable is specified to be placed in a certain address space. + /// In contrast to .constant, these values (and thus the address space they will be + /// placed in) are required to be mutable. + variable, + + /// A (global) constant value is specified to be placed in a certain address space. + /// In contrast to .variable, values placed in this address space are not required to be mutable. + constant, + + /// A pointer is ascripted to point into a certian address space. + pointer, +}; + +pub fn analyzeAddrspace( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + zir_ref: Zir.Inst.Ref, + ctx: AddressSpaceContext, +) !std.builtin.AddressSpace { + const addrspace_tv = try sema.resolveInstConst(block, src, zir_ref); + const address_space = addrspace_tv.val.toEnum(std.builtin.AddressSpace); + const target = sema.mod.getTarget(); + const arch = target.cpu.arch; + + const supported = switch (address_space) { + .generic => true, + .gs, .fs, .ss => (arch == .i386 or arch == .x86_64) and ctx == .pointer, + }; + + if (!supported) { + // TODO error messages could be made more elaborate here + const entity = switch (ctx) { + .function => "functions", + .variable => "mutable values", + .constant => "constant values", + .pointer => "pointers", + }; + + return sema.mod.fail( + &block.base, + src, + "{s} with address space '{s}' are not supported on {s}", + .{ entity, @tagName(address_space), arch.genericName() }, + ); + } + + return address_space; +} diff --git a/src/Zir.zig b/src/Zir.zig index f4c3e58eb0..7f752bcced 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -443,10 +443,10 @@ pub const Inst = struct { /// this instruction; a following 'ret' instruction will do the diversion. /// Uses the `str_tok` union field. ret_err_value_code, - /// Create a pointer type that does not have a sentinel, alignment, or bit range specified. + /// Create a pointer type that does not have a sentinel, alignment, address space, or bit range specified. /// Uses the `ptr_type_simple` union field. ptr_type_simple, - /// Create a pointer type which can have a sentinel, alignment, and/or bit range. + /// Create a pointer type which can have a sentinel, alignment, address space, and/or bit range. /// Uses the `ptr_type` union field. ptr_type, /// Slice operation `lhs[rhs..]`. No sentinel and no end offset. @@ -1672,6 +1672,7 @@ pub const Inst = struct { atomic_order_type, atomic_rmw_op_type, calling_convention_type, + address_space_type, float_mode_type, reduce_op_type, call_options_type, @@ -1928,6 +1929,10 @@ pub const Inst = struct { .ty = Type.initTag(.type), .val = Value.initTag(.calling_convention_type), }, + .address_space_type = .{ + .ty = Type.initTag(.type), + .val = Value.initTag(.address_space_type), + }, .float_mode_type = .{ .ty = Type.initTag(.type), .val = Value.initTag(.float_mode_type), @@ -2129,8 +2134,9 @@ pub const Inst = struct { is_volatile: bool, has_sentinel: bool, has_align: bool, + has_addrspace: bool, has_bit_range: bool, - _: u2 = undefined, + _: u1 = undefined, }, size: std.builtin.TypeInfo.Pointer.Size, /// Index into extra. See `PtrType`. @@ -2360,12 +2366,13 @@ pub const Inst = struct { else_body_len: u32, }; - /// Stored in extra. Depending on the flags in Data, there will be up to 4 + /// Stored in extra. Depending on the flags in Data, there will be up to 5 /// trailing Ref fields: /// 0. sentinel: Ref // if `has_sentinel` flag is set /// 1. align: Ref // if `has_align` flag is set - /// 2. bit_start: Ref // if `has_bit_range` flag is set - /// 3. bit_end: Ref // if `has_bit_range` flag is set + /// 2. address_space: Ref // if `has_addrspace` flag is set + /// 3. bit_start: Ref // if `has_bit_range` flag is set + /// 4. bit_end: Ref // if `has_bit_range` flag is set pub const PtrType = struct { elem_type: Ref, }; @@ -2483,7 +2490,7 @@ pub const Inst = struct { /// 0b000X: whether corresponding decl is pub /// 0b00X0: whether corresponding decl is exported /// 0b0X00: whether corresponding decl has an align expression - /// 0bX000: whether corresponding decl has a linksection expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression /// 5. decl: { // for every decls_len /// src_hash: [4]u32, // hash of source bytes /// line: u32, // line number of decl, relative to parent @@ -2495,7 +2502,10 @@ pub const Inst = struct { /// this is a test decl, and the name starts at `name+1`. /// value: Index, /// align: Ref, // if corresponding bit is set - /// link_section: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } /// } /// 6. inst: Index // for every body_len /// 7. flags: u32 // for every 8 fields @@ -2547,7 +2557,7 @@ pub const Inst = struct { /// 0b000X: whether corresponding decl is pub /// 0b00X0: whether corresponding decl is exported /// 0b0X00: whether corresponding decl has an align expression - /// 0bX000: whether corresponding decl has a linksection expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression /// 6. decl: { // for every decls_len /// src_hash: [4]u32, // hash of source bytes /// line: u32, // line number of decl, relative to parent @@ -2559,7 +2569,10 @@ pub const Inst = struct { /// this is a test decl, and the name starts at `name+1`. /// value: Index, /// align: Ref, // if corresponding bit is set - /// link_section: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } /// } /// 7. inst: Index // for every body_len /// 8. has_bits: u32 // for every 32 fields @@ -2592,7 +2605,7 @@ pub const Inst = struct { /// 0b000X: whether corresponding decl is pub /// 0b00X0: whether corresponding decl is exported /// 0b0X00: whether corresponding decl has an align expression - /// 0bX000: whether corresponding decl has a linksection expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression /// 6. decl: { // for every decls_len /// src_hash: [4]u32, // hash of source bytes /// line: u32, // line number of decl, relative to parent @@ -2604,7 +2617,10 @@ pub const Inst = struct { /// this is a test decl, and the name starts at `name+1`. /// value: Index, /// align: Ref, // if corresponding bit is set - /// link_section: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } /// } /// 7. inst: Index // for every body_len /// 8. has_bits: u32 // for every 8 fields @@ -2641,7 +2657,7 @@ pub const Inst = struct { /// 0b000X: whether corresponding decl is pub /// 0b00X0: whether corresponding decl is exported /// 0b0X00: whether corresponding decl has an align expression - /// 0bX000: whether corresponding decl has a linksection expression + /// 0bX000: whether corresponding decl has a linksection or an address space expression /// 1. decl: { // for every decls_len /// src_hash: [4]u32, // hash of source bytes /// line: u32, // line number of decl, relative to parent @@ -2653,7 +2669,10 @@ pub const Inst = struct { /// this is a test decl, and the name starts at `name+1`. /// value: Index, /// align: Ref, // if corresponding bit is set - /// link_section: Ref, // if corresponding bit is set + /// link_section_or_address_space: { // if corresponding bit is set. + /// link_section: Ref, + /// address_space: Ref, + /// } /// } pub const OpaqueDecl = struct { decls_len: u32, diff --git a/src/codegen.zig b/src/codegen.zig index 2a33795d0f..bf2166d797 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -4895,7 +4895,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); const slice_len = typed_value.val.sliceLen(); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 6da791cb46..fb8211f6b8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -251,7 +251,7 @@ pub const DeclGen = struct { try writer.writeByte('('); try dg.renderType(writer, t); try writer.writeAll("){"); - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; try dg.renderValue(writer, t.slicePtrFieldType(&buf), val); try writer.writeAll(", "); try writer.print("{d}", .{val.sliceLen()}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 2f703e2d68..29efa27685 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -558,7 +558,8 @@ pub const DeclGen = struct { llvm_params_len, .False, ); - const llvm_fn = self.llvmModule().addFunction(decl.name, fn_type); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + const llvm_fn = self.llvmModule().addFunctionInAddressSpace(decl.name, fn_type, llvm_addrspace); const is_extern = decl.val.tag() == .extern_fn; if (!is_extern) { @@ -580,7 +581,24 @@ pub const DeclGen = struct { if (llvm_module.getNamedGlobal(decl.name)) |val| return val; // TODO: remove this redundant `llvmType`, it is also called in `genTypedValue`. const llvm_type = try self.llvmType(decl.ty); - return llvm_module.addGlobal(llvm_type, decl.name); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + return llvm_module.addGlobalInAddressSpace(llvm_type, decl.name, llvm_addrspace); + } + + fn llvmAddressSpace(self: DeclGen, address_space: std.builtin.AddressSpace) c_uint { + const target = self.module.getTarget(); + return switch (target.cpu.arch) { + .i386, .x86_64 => switch (address_space) { + .generic => llvm.address_space.default, + .gs => llvm.address_space.x86.gs, + .fs => llvm.address_space.x86.fs, + .ss => llvm.address_space.x86.ss, + }, + else => switch (address_space) { + .generic => llvm.address_space.default, + else => unreachable, + }, + }; } fn llvmType(self: *DeclGen, t: Type) error{ OutOfMemory, CodegenFail }!*const llvm.Type { @@ -609,7 +627,7 @@ pub const DeclGen = struct { .Bool => return self.context.intType(1), .Pointer => { if (t.isSlice()) { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_type = t.slicePtrFieldType(&buf); const fields: [2]*const llvm.Type = .{ @@ -619,7 +637,8 @@ pub const DeclGen = struct { return self.context.structType(&fields, fields.len, .False); } else { const elem_type = try self.llvmType(t.elemType()); - return elem_type.pointerType(0); + const llvm_addrspace = self.llvmAddressSpace(t.ptrAddressSpace()); + return elem_type.pointerType(llvm_addrspace); } }, .Array => { @@ -685,7 +704,9 @@ pub const DeclGen = struct { @intCast(c_uint, llvm_params.len), llvm.Bool.fromBool(is_var_args), ); - return llvm_fn_ty.pointerType(0); + // TODO make .Fn not both a pointer type and a prototype + const llvm_addrspace = self.llvmAddressSpace(.generic); + return llvm_fn_ty.pointerType(llvm_addrspace); }, .ComptimeInt => unreachable, .ComptimeFloat => unreachable, @@ -753,7 +774,7 @@ pub const DeclGen = struct { .Pointer => switch (tv.val.tag()) { .decl_ref => { if (tv.ty.isSlice()) { - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const ptr_ty = tv.ty.slicePtrFieldType(&buf); var slice_len: Value.Payload.U64 = .{ .base = .{ .tag = .int_u64 }, @@ -783,12 +804,13 @@ pub const DeclGen = struct { decl.alive = true; const val = try self.resolveGlobalDecl(decl); const llvm_var_type = try self.llvmType(tv.ty); - const llvm_type = llvm_var_type.pointerType(0); + const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace"); + const llvm_type = llvm_var_type.pointerType(llvm_addrspace); return val.constBitCast(llvm_type); }, .slice => { const slice = tv.val.castTag(.slice).?.data; - var buf: Type.Payload.ElemType = undefined; + var buf: Type.SlicePtrFieldTypeBuffer = undefined; const fields: [2]*const llvm.Value = .{ try self.genTypedValue(.{ .ty = tv.ty.slicePtrFieldType(&buf), diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index 16445fa2d1..039232426b 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -197,6 +197,9 @@ pub const Module = opaque { pub const addFunction = LLVMAddFunction; extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value; + pub const addFunctionInAddressSpace = ZigLLVMAddFunctionInAddressSpace; + extern fn ZigLLVMAddFunctionInAddressSpace(*const Module, Name: [*:0]const u8, FunctionTy: *const Type, AddressSpace: c_uint) *const Value; + pub const getNamedFunction = LLVMGetNamedFunction; extern fn LLVMGetNamedFunction(*const Module, Name: [*:0]const u8) ?*const Value; @@ -209,6 +212,9 @@ pub const Module = opaque { pub const addGlobal = LLVMAddGlobal; extern fn LLVMAddGlobal(M: *const Module, Ty: *const Type, Name: [*:0]const u8) *const Value; + pub const addGlobalInAddressSpace = LLVMAddGlobalInAddressSpace; + extern fn LLVMAddGlobalInAddressSpace(M: *const Module, Ty: *const Type, Name: [*:0]const u8, AddressSpace: c_uint) *const Value; + pub const getNamedGlobal = LLVMGetNamedGlobal; extern fn LLVMGetNamedGlobal(M: *const Module, Name: [*:0]const u8) ?*const Value; @@ -1005,3 +1011,65 @@ pub const TypeKind = enum(c_int) { BFloat, X86_AMX, }; + +pub const address_space = struct { + pub const default: c_uint = 0; + + // See llvm/lib/Target/X86/X86.h + pub const x86_64 = x86; + pub const x86 = struct { + pub const gs: c_uint = 256; + pub const fs: c_uint = 257; + pub const ss: c_uint = 258; + + pub const ptr32_sptr: c_uint = 270; + pub const ptr32_uptr: c_uint = 271; + pub const ptr64: c_uint = 272; + }; + + // See llvm/lib/Target/AVR/AVR.h + pub const avr = struct { + pub const data_memory: c_uint = 0; + pub const program_memory: c_uint = 1; + }; + + // See llvm/lib/Target/NVPTX/NVPTX.h + pub const nvptx = struct { + pub const generic: c_uint = 0; + pub const global: c_uint = 1; + pub const constant: c_uint = 2; + pub const shared: c_uint = 3; + pub const param: c_uint = 4; + pub const local: c_uint = 5; + }; + + // See llvm/lib/Target/AMDGPU/AMDGPU.h + pub const amdgpu = struct { + pub const flat: c_uint = 0; + pub const global: c_uint = 1; + pub const region: c_uint = 2; + pub const local: c_uint = 3; + pub const constant: c_uint = 4; + pub const private: c_uint = 5; + pub const constant_32bit: c_uint = 6; + pub const buffer_fat_pointer: c_uint = 7; + pub const param_d: c_uint = 6; + pub const param_i: c_uint = 7; + pub const constant_buffer_0: c_uint = 8; + pub const constant_buffer_1: c_uint = 9; + pub const constant_buffer_2: c_uint = 10; + pub const constant_buffer_3: c_uint = 11; + pub const constant_buffer_4: c_uint = 12; + pub const constant_buffer_5: c_uint = 13; + pub const constant_buffer_6: c_uint = 14; + pub const constant_buffer_7: c_uint = 15; + pub const constant_buffer_8: c_uint = 16; + pub const constant_buffer_9: c_uint = 17; + pub const constant_buffer_10: c_uint = 18; + pub const constant_buffer_11: c_uint = 19; + pub const constant_buffer_12: c_uint = 20; + pub const constant_buffer_13: c_uint = 21; + pub const constant_buffer_14: c_uint = 22; + pub const constant_buffer_15: c_uint = 23; + }; +}; diff --git a/src/print_zir.zig b/src/print_zir.zig index 911cf05baf..94fa0307bd 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1147,7 +1147,7 @@ const Writer = struct { cur_bit_bag >>= 1; const has_align = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; - const has_section = @truncate(u1, cur_bit_bag) != 0; + const has_section_or_addrspace = @truncate(u1, cur_bit_bag) != 0; cur_bit_bag >>= 1; const sub_index = extra_index; @@ -1165,7 +1165,12 @@ const Writer = struct { extra_index += 1; break :inst inst; }; - const section_inst: Zir.Inst.Ref = if (!has_section) .none else inst: { + const section_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { + const inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]); + extra_index += 1; + break :inst inst; + }; + const addrspace_inst: Zir.Inst.Ref = if (!has_section_or_addrspace) .none else inst: { const inst = @intToEnum(Zir.Inst.Ref, self.code.extra[extra_index]); extra_index += 1; break :inst inst; @@ -1196,6 +1201,11 @@ const Writer = struct { try self.writeInstRef(stream, align_inst); try stream.writeAll(")"); } + if (addrspace_inst != .none) { + try stream.writeAll(" addrspace("); + try self.writeInstRef(stream, addrspace_inst); + try stream.writeAll(")"); + } if (section_inst != .none) { try stream.writeAll(" linksection("); try self.writeInstRef(stream, section_inst); diff --git a/src/stage1/all_types.hpp b/src/stage1/all_types.hpp index 4004199eb6..13c37fc839 100644 --- a/src/stage1/all_types.hpp +++ b/src/stage1/all_types.hpp @@ -86,6 +86,14 @@ enum CallingConvention { CallingConventionSysV }; +// Stage 1 supports only the generic address space +enum AddressSpace { + AddressSpaceGeneric, + AddressSpaceGS, + AddressSpaceFS, + AddressSpaceSS, +}; + // This one corresponds to the builtin.zig enum. enum BuiltinPtrSize { BuiltinPtrSizeOne, diff --git a/src/stage1/analyze.cpp b/src/stage1/analyze.cpp index 2eb609ef1a..320d8ff9b2 100644 --- a/src/stage1/analyze.cpp +++ b/src/stage1/analyze.cpp @@ -1019,6 +1019,16 @@ bool calling_convention_allows_zig_types(CallingConvention cc) { zig_unreachable(); } +const char *address_space_name(AddressSpace as) { + switch (as) { + case AddressSpaceGeneric: return "generic"; + case AddressSpaceGS: return "gs"; + case AddressSpaceFS: return "fs"; + case AddressSpaceSS: return "ss"; + } + zig_unreachable(); +} + ZigType *get_stack_trace_type(CodeGen *g) { if (g->stack_trace_type == nullptr) { g->stack_trace_type = get_builtin_type(g, "StackTrace"); diff --git a/src/stage1/analyze.hpp b/src/stage1/analyze.hpp index 8290ef572c..6d584ff361 100644 --- a/src/stage1/analyze.hpp +++ b/src/stage1/analyze.hpp @@ -242,6 +242,8 @@ Error get_primitive_type(CodeGen *g, Buf *name, ZigType **result); bool calling_convention_allows_zig_types(CallingConvention cc); const char *calling_convention_name(CallingConvention cc); +const char *address_space_name(AddressSpace as); + Error ATTRIBUTE_MUST_USE file_fetch(CodeGen *g, Buf *resolved_path, Buf *contents); void walk_function_params(CodeGen *g, ZigType *fn_type, FnWalk *fn_walk); diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index 0604c05c46..87dfee1bf2 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -16124,7 +16124,7 @@ static Stage1AirInst *ir_analyze_instruction_optional_unwrap_ptr(IrAnalyze *ira, static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCtz *instruction) { Error err; - + ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child); if (type_is_invalid(int_type)) return ira->codegen->invalid_inst_gen; @@ -16166,7 +16166,7 @@ static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCt return ira->codegen->invalid_inst_gen; if (val->special == ConstValSpecialUndef) return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int); - + if (is_vector) { ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type); Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type); @@ -16200,7 +16200,7 @@ static Stage1AirInst *ir_analyze_instruction_ctz(IrAnalyze *ira, Stage1ZirInstCt static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstClz *instruction) { Error err; - + ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child); if (type_is_invalid(int_type)) return ira->codegen->invalid_inst_gen; @@ -16242,7 +16242,7 @@ static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstCl return ira->codegen->invalid_inst_gen; if (val->special == ConstValSpecialUndef) return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int); - + if (is_vector) { ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type); Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type); @@ -16276,7 +16276,7 @@ static Stage1AirInst *ir_analyze_instruction_clz(IrAnalyze *ira, Stage1ZirInstCl static Stage1AirInst *ir_analyze_instruction_pop_count(IrAnalyze *ira, Stage1ZirInstPopCount *instruction) { Error err; - + ZigType *int_type = ir_resolve_int_type(ira, instruction->type->child); if (type_is_invalid(int_type)) return ira->codegen->invalid_inst_gen; @@ -16318,7 +16318,7 @@ static Stage1AirInst *ir_analyze_instruction_pop_count(IrAnalyze *ira, Stage1Zir return ira->codegen->invalid_inst_gen; if (val->special == ConstValSpecialUndef) return ir_const_undef(ira, instruction->base.scope, instruction->base.source_node, ira->codegen->builtin_types.entry_num_lit_int); - + if (is_vector) { ZigType *smallest_vec_type = get_vector_type(ira->codegen, vector_len, smallest_type); Stage1AirInst *result = ir_const(ira, instruction->base.scope, instruction->base.source_node, smallest_vec_type); @@ -17904,7 +17904,7 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, Scope *scope, AstNode result->special = ConstValSpecialStatic; result->type = type_info_pointer_type; - ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 7); + ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 8); result->data.x_struct.fields = fields; // size: Size @@ -17939,24 +17939,29 @@ static ZigValue *create_ptr_like_type_info(IrAnalyze *ira, Scope *scope, AstNode lazy_align_of->base.id = LazyValueIdAlignOf; lazy_align_of->target_type = ir_const_type(ira, scope, source_node, attrs_type->data.pointer.child_type); } - // child: type - ensure_field_index(result->type, "child", 4); + // address_space: AddressSpace, + ensure_field_index(result->type, "address_space", 4); fields[4]->special = ConstValSpecialStatic; - fields[4]->type = ira->codegen->builtin_types.entry_type; - fields[4]->data.x_type = attrs_type->data.pointer.child_type; - // is_allowzero: bool - ensure_field_index(result->type, "is_allowzero", 5); + fields[4]->type = get_builtin_type(ira->codegen, "AddressSpace"); + bigint_init_unsigned(&fields[4]->data.x_enum_tag, AddressSpaceGeneric); + // child: type + ensure_field_index(result->type, "child", 5); fields[5]->special = ConstValSpecialStatic; - fields[5]->type = ira->codegen->builtin_types.entry_bool; - fields[5]->data.x_bool = attrs_type->data.pointer.allow_zero; - // sentinel: anytype - ensure_field_index(result->type, "sentinel", 6); + fields[5]->type = ira->codegen->builtin_types.entry_type; + fields[5]->data.x_type = attrs_type->data.pointer.child_type; + // is_allowzero: bool + ensure_field_index(result->type, "is_allowzero", 6); fields[6]->special = ConstValSpecialStatic; + fields[6]->type = ira->codegen->builtin_types.entry_bool; + fields[6]->data.x_bool = attrs_type->data.pointer.allow_zero; + // sentinel: anytype + ensure_field_index(result->type, "sentinel", 7); + fields[7]->special = ConstValSpecialStatic; if (attrs_type->data.pointer.sentinel != nullptr) { - fields[6]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type); - set_optional_payload(fields[6], attrs_type->data.pointer.sentinel); + fields[7]->type = get_optional_type(ira->codegen, attrs_type->data.pointer.child_type); + set_optional_payload(fields[7], attrs_type->data.pointer.sentinel); } else { - fields[6]->type = ira->codegen->builtin_types.entry_null; + fields[7]->type = ira->codegen->builtin_types.entry_null; } return result; @@ -18465,7 +18470,7 @@ static Error ir_make_type_info_value(IrAnalyze *ira, Scope *scope, AstNode *sour result->special = ConstValSpecialStatic; result->type = ir_type_info_get_type(ira, "Fn", nullptr); - ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 6); + ZigValue **fields = alloc_const_vals_ptrs(ira->codegen, 7); result->data.x_struct.fields = fields; // calling_convention: TypeInfo.CallingConvention @@ -18826,11 +18831,11 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ assert(size_value->type == ir_type_info_get_type(ira, "Size", type_info_pointer_type)); BuiltinPtrSize size_enum_index = (BuiltinPtrSize)bigint_as_u32(&size_value->data.x_enum_tag); PtrLen ptr_len = size_enum_index_to_ptr_len(size_enum_index); - ZigType *elem_type = get_const_field_meta_type(ira, source_node, payload, "child", 4); + ZigType *elem_type = get_const_field_meta_type(ira, source_node, payload, "child", 5); if (type_is_invalid(elem_type)) return ira->codegen->invalid_inst_gen->value->type; ZigValue *sentinel; - if ((err = get_const_field_sentinel(ira, scope, source_node, payload, "sentinel", 6, + if ((err = get_const_field_sentinel(ira, scope, source_node, payload, "sentinel", 7, elem_type, &sentinel))) { return ira->codegen->invalid_inst_gen->value->type; @@ -18845,6 +18850,19 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ if (alignment == nullptr) return ira->codegen->invalid_inst_gen->value->type; + ZigValue *as_value = get_const_field(ira, source_node, payload, "address_space", 4); + if (as_value == nullptr) + return ira->codegen->invalid_inst_gen->value->type; + assert(as_value->special == ConstValSpecialStatic); + assert(as_value->type == get_builtin_type(ira->codegen, "AddressSpace")); + AddressSpace as = (AddressSpace)bigint_as_u32(&as_value->data.x_enum_tag); + if (as != AddressSpaceGeneric) { + ir_add_error_node(ira, source_node, buf_sprintf( + "address space '%s' not available in stage 1 compiler, must be .generic", + address_space_name(as))); + return ira->codegen->invalid_inst_gen->value->type; + } + bool is_const; if ((err = get_const_field_bool(ira, source_node, payload, "is_const", 1, &is_const))) return ira->codegen->invalid_inst_gen->value->type; @@ -18857,13 +18875,12 @@ static ZigType *type_info_to_type(IrAnalyze *ira, Scope *scope, AstNode *source_ } bool is_allowzero; - if ((err = get_const_field_bool(ira, source_node, payload, "is_allowzero", 5, + if ((err = get_const_field_bool(ira, source_node, payload, "is_allowzero", 6, &is_allowzero))) { return ira->codegen->invalid_inst_gen->value->type; } - ZigType *ptr_type = get_pointer_to_type_extra2(ira->codegen, elem_type, is_const, diff --git a/src/target.zig b/src/target.zig index c9d7e1742b..09e65ff909 100644 --- a/src/target.zig +++ b/src/target.zig @@ -544,3 +544,21 @@ pub fn largestAtomicBits(target: std.Target) u32 { .x86_64 => 128, }; } + +pub fn defaultAddressSpace( + target: std.Target, + context: enum { + /// Query the default address space for global constant values. + global_constant, + /// Query the default address space for global mutable values. + global_mutable, + /// Query the default address space for function-local values. + local, + /// Query the default address space for functions themselves. + function, + }, +) std.builtin.AddressSpace { + _ = target; + _ = context; + return .generic; +} diff --git a/src/translate_c/ast.zig b/src/translate_c/ast.zig index 93acd464f4..d0fe6d1b31 100644 --- a/src/translate_c/ast.zig +++ b/src/translate_c/ast.zig @@ -2614,6 +2614,7 @@ fn renderVar(c: *Context, node: Node) !NodeIndex { .type_node = type_node, .align_node = align_node, .section_node = section_node, + .addrspace_node = 0, }), .rhs = init_node, }, @@ -2705,6 +2706,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex { .lhs = try c.addExtra(std.zig.Ast.Node.FnProtoOne{ .param = params.items[0], .align_expr = align_expr, + .addrspace_expr = 0, // TODO .section_expr = section_expr, .callconv_expr = callconv_expr, }), @@ -2720,6 +2722,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex { .params_start = span.start, .params_end = span.end, .align_expr = align_expr, + .addrspace_expr = 0, // TODO .section_expr = section_expr, .callconv_expr = callconv_expr, }), diff --git a/src/type.zig b/src/type.zig index c2dc150347..db193639a7 100644 --- a/src/type.zig +++ b/src/type.zig @@ -127,6 +127,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, => return .Enum, @@ -288,6 +289,7 @@ pub const Type = extern union { .pointee_type = Type.initTag(.comptime_int), .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -299,6 +301,7 @@ pub const Type = extern union { .pointee_type = Type.initTag(.u8), .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -310,6 +313,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -321,6 +325,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -332,6 +337,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -343,6 +349,7 @@ pub const Type = extern union { .pointee_type = Type.initTag(.u8), .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -354,6 +361,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -365,6 +373,7 @@ pub const Type = extern union { .pointee_type = Type.initTag(.u8), .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -376,6 +385,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -387,6 +397,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -398,6 +409,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -409,6 +421,7 @@ pub const Type = extern union { .pointee_type = self.castPointer().?.data, .sentinel = null, .@"align" = 0, + .@"addrspace" = .generic, .bit_offset = 0, .host_size = 0, .@"allowzero" = false, @@ -461,6 +474,8 @@ pub const Type = extern union { return false; if (info_a.host_size != info_b.host_size) return false; + if (info_a.@"addrspace" != info_b.@"addrspace") + return false; const sentinel_a = info_a.sentinel; const sentinel_b = info_b.sentinel; @@ -746,6 +761,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -835,6 +851,7 @@ pub const Type = extern union { .pointee_type = try payload.pointee_type.copy(allocator), .sentinel = sent, .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", .bit_offset = payload.bit_offset, .host_size = payload.host_size, .@"allowzero" = payload.@"allowzero", @@ -958,6 +975,7 @@ pub const Type = extern union { .atomic_order => return writer.writeAll("std.builtin.AtomicOrder"), .atomic_rmw_op => return writer.writeAll("std.builtin.AtomicRmwOp"), .calling_convention => return writer.writeAll("std.builtin.CallingConvention"), + .address_space => return writer.writeAll("std.builtin.AddressSpace"), .float_mode => return writer.writeAll("std.builtin.FloatMode"), .reduce_op => return writer.writeAll("std.builtin.ReduceOp"), .call_options => return writer.writeAll("std.builtin.CallOptions"), @@ -1111,6 +1129,9 @@ pub const Type = extern union { } try writer.writeAll(") "); } + if (payload.@"addrspace" != .generic) { + try writer.print("addrspace(.{s}) ", .{@tagName(payload.@"addrspace")}); + } if (!payload.mutable) try writer.writeAll("const "); if (payload.@"volatile") try writer.writeAll("volatile "); if (payload.@"allowzero") try writer.writeAll("allowzero "); @@ -1186,6 +1207,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -1301,6 +1323,7 @@ pub const Type = extern union { .atomic_order => return Value.initTag(.atomic_order_type), .atomic_rmw_op => return Value.initTag(.atomic_rmw_op_type), .calling_convention => return Value.initTag(.calling_convention_type), + .address_space => return Value.initTag(.address_space_type), .float_mode => return Value.initTag(.float_mode_type), .reduce_op => return Value.initTag(.reduce_op_type), .call_options => return Value.initTag(.call_options_type), @@ -1362,6 +1385,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -1496,6 +1520,30 @@ pub const Type = extern union { } } + pub fn ptrAddressSpace(self: Type) std.builtin.AddressSpace { + return switch (self.tag()) { + .single_const_pointer_to_comptime_int, + .const_slice_u8, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .inferred_alloc_const, + .inferred_alloc_mut, + .manyptr_u8, + .manyptr_const_u8, + => .generic, + + .pointer => self.castTag(.pointer).?.data.@"addrspace", + + else => unreachable, + }; + } + /// Asserts that hasCodeGenBits() is true. pub fn abiAlignment(self: Type, target: Target) u32 { return switch (self.tag()) { @@ -1508,6 +1556,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -1734,6 +1783,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -2019,6 +2069,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -2105,42 +2156,82 @@ pub const Type = extern union { }; } - pub fn slicePtrFieldType(self: Type, buffer: *Payload.ElemType) Type { + pub const SlicePtrFieldTypeBuffer = union { + elem_type: Payload.ElemType, + pointer: Payload.Pointer, + }; + + pub fn slicePtrFieldType(self: Type, buffer: *SlicePtrFieldTypeBuffer) Type { switch (self.tag()) { .const_slice_u8 => return Type.initTag(.manyptr_const_u8), .const_slice => { const elem_type = self.castTag(.const_slice).?.data; buffer.* = .{ - .base = .{ .tag = .many_const_pointer }, - .data = elem_type, + .elem_type = .{ + .base = .{ .tag = .many_const_pointer }, + .data = elem_type, + }, }; - return Type.initPayload(&buffer.base); + return Type.initPayload(&buffer.elem_type.base); }, .mut_slice => { const elem_type = self.castTag(.mut_slice).?.data; buffer.* = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = elem_type, + .elem_type = .{ + .base = .{ .tag = .many_mut_pointer }, + .data = elem_type, + }, }; - return Type.initPayload(&buffer.base); + return Type.initPayload(&buffer.elem_type.base); }, .pointer => { const payload = self.castTag(.pointer).?.data; assert(payload.size == .Slice); - if (payload.mutable) { + + if (payload.sentinel != null or + payload.@"align" != 0 or + payload.@"addrspace" != .generic or + payload.bit_offset != 0 or + payload.host_size != 0 or + payload.@"allowzero" or + payload.@"volatile") + { buffer.* = .{ - .base = .{ .tag = .many_mut_pointer }, - .data = payload.pointee_type, + .pointer = .{ + .data = .{ + .pointee_type = payload.pointee_type, + .sentinel = payload.sentinel, + .@"align" = payload.@"align", + .@"addrspace" = payload.@"addrspace", + .bit_offset = payload.bit_offset, + .host_size = payload.host_size, + .@"allowzero" = payload.@"allowzero", + .mutable = payload.mutable, + .@"volatile" = payload.@"volatile", + .size = .Many, + }, + }, }; + return Type.initPayload(&buffer.pointer.base); + } else if (payload.mutable) { + buffer.* = .{ + .elem_type = .{ + .base = .{ .tag = .many_mut_pointer }, + .data = payload.pointee_type, + }, + }; + return Type.initPayload(&buffer.elem_type.base); } else { buffer.* = .{ - .base = .{ .tag = .many_const_pointer }, - .data = payload.pointee_type, + .elem_type = .{ + .base = .{ .tag = .many_const_pointer }, + .data = payload.pointee_type, + }, }; + return Type.initPayload(&buffer.elem_type.base); } - return Type.initPayload(&buffer.base); }, else => unreachable, @@ -2793,6 +2884,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3000,6 +3092,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3024,6 +3117,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3047,6 +3141,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3100,6 +3195,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3155,6 +3251,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3192,6 +3289,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3242,6 +3340,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3302,6 +3401,7 @@ pub const Type = extern union { atomic_order, atomic_rmw_op, calling_convention, + address_space, float_mode, reduce_op, call_options, @@ -3425,6 +3525,7 @@ pub const Type = extern union { .atomic_order, .atomic_rmw_op, .calling_convention, + .address_space, .float_mode, .reduce_op, .call_options, @@ -3580,6 +3681,7 @@ pub const Type = extern union { sentinel: ?Value, /// If zero use pointee_type.AbiAlign() @"align": u32, + @"addrspace": std.builtin.AddressSpace, bit_offset: u16, host_size: u16, @"allowzero": bool, diff --git a/src/value.zig b/src/value.zig index 934aab7bc8..1075d2bb26 100644 --- a/src/value.zig +++ b/src/value.zig @@ -63,6 +63,7 @@ pub const Value = extern union { atomic_order_type, atomic_rmw_op_type, calling_convention_type, + address_space_type, float_mode_type, reduce_op_type, call_options_type, @@ -226,6 +227,7 @@ pub const Value = extern union { .atomic_order_type, .atomic_rmw_op_type, .calling_convention_type, + .address_space_type, .float_mode_type, .reduce_op_type, .call_options_type, @@ -412,6 +414,7 @@ pub const Value = extern union { .atomic_order_type, .atomic_rmw_op_type, .calling_convention_type, + .address_space_type, .float_mode_type, .reduce_op_type, .call_options_type, @@ -625,6 +628,7 @@ pub const Value = extern union { .atomic_order_type => return out_stream.writeAll("std.builtin.AtomicOrder"), .atomic_rmw_op_type => return out_stream.writeAll("std.builtin.AtomicRmwOp"), .calling_convention_type => return out_stream.writeAll("std.builtin.CallingConvention"), + .address_space_type => return out_stream.writeAll("std.builtin.AddressSpace"), .float_mode_type => return out_stream.writeAll("std.builtin.FloatMode"), .reduce_op_type => return out_stream.writeAll("std.builtin.ReduceOp"), .call_options_type => return out_stream.writeAll("std.builtin.CallOptions"), @@ -792,6 +796,7 @@ pub const Value = extern union { .atomic_order_type => Type.initTag(.atomic_order), .atomic_rmw_op_type => Type.initTag(.atomic_rmw_op), .calling_convention_type => Type.initTag(.calling_convention), + .address_space_type => Type.initTag(.address_space), .float_mode_type => Type.initTag(.float_mode), .reduce_op_type => Type.initTag(.reduce_op), .call_options_type => Type.initTag(.call_options), diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index e1ab74f423..6e136161a6 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -416,6 +416,11 @@ ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref) { return wrap(Type::getTokenTy(*unwrap(context_ref))); } +LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, LLVMTypeRef FunctionTy, unsigned AddressSpace) { + Function* func = Function::Create(unwrap<FunctionType>(FunctionTy), GlobalValue::ExternalLinkage, AddressSpace, Name, unwrap(M)); + return wrap(func); +} + LLVMValueRef ZigLLVMBuildCall(LLVMBuilderRef B, LLVMValueRef Fn, LLVMValueRef *Args, unsigned NumArgs, ZigLLVM_CallingConv CC, ZigLLVM_CallAttr attr, const char *Name) { diff --git a/src/zig_llvm.h b/src/zig_llvm.h index be279d86e1..49a4c0e8fd 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -65,6 +65,9 @@ ZIG_EXTERN_C LLVMTargetMachineRef ZigLLVMCreateTargetMachine(LLVMTargetRef T, co ZIG_EXTERN_C LLVMTypeRef ZigLLVMTokenTypeInContext(LLVMContextRef context_ref); +ZIG_EXTERN_C LLVMValueRef ZigLLVMAddFunctionInAddressSpace(LLVMModuleRef M, const char *Name, + LLVMTypeRef FunctionTy, unsigned AddressSpace); + enum ZigLLVM_CallingConv { ZigLLVM_C = 0, ZigLLVM_Fast = 8, |
