From 319548c772330cd4e80319fe0c167a2c938025a8 Mon Sep 17 00:00:00 2001 From: Matthew Lugg Date: Thu, 19 Feb 2026 13:05:25 +0000 Subject: [PATCH] cbe: rework CType and other major refactors The goal of these changes is to allow the C backend to support the new lazier type resolution system implemented by the frontend. This required a full rewrite of the `CType` abstraction, and major changes to the C backend "linker". The `DebugConstPool` abstraction introduced in a previous commit turns out to be useful for the C backend to codegen types. Because this use case is not debug information but rather general linking (albeit when targeting an unusual object format), I have renamed the abstraction to `ConstPool`. With it, the C linker is told when a type's layout becomes known, and can at that point generate the corresponding C definitions, rather than deferring this work until `flush`. The work done in `flush` is now more-or-less *solely* focused on collecting all of the buffers into a big array for a vectored write. This does unfortunately involve a non-trivial graph traversal to emit type definitions in an appropriate order, but it's still quite fast in practice, and it operates on fairly compact dependency data. We don't generate the actual type *definitions* in `flush`; that happens during compilation using `ConstPool` as discussed above. (We do generate the typedefs for underaligned types in `flush`, but that's a trivial amount of work in most cases.) `CType` is now an ephemeral type: it is created only when we render a type (the logic for which has been pushed into just 2 or 3 functions in `codegen.c`---most of the backend now operates on unmolested Zig `Type`s instead). C types are no longer stored in a "pool", although the type "dependencies" of generated C code (that is, the struct, unions, and typedefs which the generated code references) are tracked (in some simple hash sets) and given to the linker so it can codegen the types. --- lib/zig.h | 2 +- src/Compilation.zig | 3 - src/InternPool.zig | 2 +- src/Type.zig | 4 +- src/Value.zig | 8 +- src/Zcu.zig | 4 +- src/codegen/c.zig | 5297 +++++++---------- src/codegen/c/Type.zig | 3471 ----------- src/codegen/c/type.zig | 1013 ++++ src/codegen/c/type/render_defs.zig | 651 ++ src/codegen/llvm.zig | 32 +- src/link.zig | 4 +- src/link/C.zig | 1909 ++++-- .../{DebugConstPool.zig => ConstPool.zig} | 70 +- src/link/Dwarf.zig | 37 +- src/link/Elf.zig | 11 - 16 files changed, 5312 insertions(+), 7206 deletions(-) delete mode 100644 src/codegen/c/Type.zig create mode 100644 src/codegen/c/type.zig create mode 100644 src/codegen/c/type/render_defs.zig rename src/link/{DebugConstPool.zig => ConstPool.zig} (83%) diff --git a/lib/zig.h b/lib/zig.h index 81a815ab55..f8744966c6 100644 --- a/lib/zig.h +++ b/lib/zig.h @@ -259,7 +259,7 @@ #endif #if zig_has_attribute(packed) || defined(zig_tinyc) -#define zig_packed(definition) __attribute__((packed)) definition +#define zig_packed(definition) definition __attribute__((packed)) #elif defined(zig_msvc) #define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack()) #else diff --git a/src/Compilation.zig b/src/Compilation.zig index 9b0cde4089..c51bed14fb 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3382,9 +3382,6 @@ fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancel error.OutOfMemory, error.Canceled => |e| return e, }; } - if (comp.zcu) |zcu| { - try link.File.C.flushEmitH(zcu); - } } /// This function is called by the frontend before flush(). It communicates that diff --git a/src/InternPool.zig b/src/InternPool.zig index 388d0a04f7..ffabc7371b 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -3403,7 +3403,7 @@ pub const LoadedStructType = struct { /// Iterates over non-comptime fields in the order they are laid out in memory at runtime. /// May or may not include zero-bit fields. /// Asserts the struct is not packed. - pub fn iterateRuntimeOrder(s: *const LoadedStructType, ip: *InternPool) RuntimeOrderIterator { + pub fn iterateRuntimeOrder(s: *const LoadedStructType, ip: *const InternPool) RuntimeOrderIterator { switch (s.layout) { .auto => { const ro = std.mem.sliceTo(s.field_runtime_order.get(ip), .omitted); diff --git a/src/Type.zig b/src/Type.zig index 0fe87e983b..1eab734882 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -789,7 +789,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool { /// Determines whether a function type has runtime bits, i.e. whether a /// function with this type can exist at runtime. /// Asserts that `ty` is a function type. -pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool { +pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *const Zcu) bool { assertHasLayout(fn_ty, zcu); const fn_info = zcu.typeToFunc(fn_ty).?; if (fn_info.comptime_bits != 0) return false; @@ -830,7 +830,7 @@ pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool { } /// Like `hasRuntimeBits`, but also returns `true` for runtime functions. -pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool { +pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *const Zcu) bool { switch (ty.zigTypeTag(zcu)) { .@"fn" => return ty.fnHasRuntimeBits(zcu), else => return ty.hasRuntimeBits(zcu), diff --git a/src/Value.zig b/src/Value.zig index cf8d0acc67..6a474c2821 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -151,7 +151,7 @@ pub fn intFromEnum(val: Value, zcu: *const Zcu) Value { } /// Asserts that `val` is an integer. -pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *Zcu) BigIntConst { +pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *const Zcu) BigIntConst { if (val.getUnsignedInt(zcu)) |x| { return BigIntMutable.init(&space.limbs, x).toConst(); } @@ -669,7 +669,7 @@ pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value { } /// Asserts the value is comparable. Supports comparisons between heterogeneous types. -pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *Zcu) bool { +pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *const Zcu) bool { if (lhs.pointerNav(zcu)) |lhs_nav| { if (rhs.pointerNav(zcu)) |rhs_nav| { switch (op) { @@ -695,7 +695,7 @@ pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: return order(lhs, rhs, zcu).compare(op); } -pub fn order(lhs: Value, rhs: Value, zcu: *Zcu) std.math.Order { +pub fn order(lhs: Value, rhs: Value, zcu: *const Zcu) std.math.Order { if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) { const lhs_f128 = lhs.toFloat(f128, zcu); const rhs_f128 = rhs.toFloat(f128, zcu); @@ -805,7 +805,7 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool { /// Gets the `Nav` referenced by this pointer. If the pointer does not point /// to a `Nav`, or if it points to some part of one (like a field or element), /// returns null. -pub fn pointerNav(val: Value, zcu: *Zcu) ?InternPool.Nav.Index { +pub fn pointerNav(val: Value, zcu: *const Zcu) ?InternPool.Nav.Index { return switch (zcu.intern_pool.indexToKey(val.toIntern())) { // TODO: these 3 cases are weird; these aren't pointer values! .variable => |v| v.owner_nav, diff --git a/src/Zcu.zig b/src/Zcu.zig index 481ea0d62a..faa535da5d 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -4113,13 +4113,13 @@ pub const ResolvedReference = struct { /// If an `AnalUnit` is not in the returned map, it is unreferenced. /// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller. /// This hashmap is cached, so repeated calls to this function are cheap. -pub fn resolveReferences(zcu: *Zcu) !*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) { +pub fn resolveReferences(zcu: *Zcu) Allocator.Error!*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) { if (zcu.resolved_references == null) { zcu.resolved_references = try zcu.resolveReferencesInner(); } return &zcu.resolved_references.?; } -fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) { +fn resolveReferencesInner(zcu: *Zcu) Allocator.Error!std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) { const gpa = zcu.gpa; const comp = zcu.comp; const ip = &zcu.intern_pool; diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c8904405e2..b31b0a40da 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -50,32 +50,39 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features { /// * The types used, so declarations can be emitted in `flush` /// * The lazy functions used, so definitions can be emitted in `flush` pub const Mir = struct { + // These remaining fields are essentially just an owned version of `link.C.AvBlock`. + fwd_decl: []u8, + code_header: []u8, + code: []u8, /// This map contains all the UAVs we saw generating this function. /// `link.C` will merge them into its `uavs`/`aligned_uavs` fields. /// Key is the value of the UAV; value is the UAV's alignment, or /// `.none` for natural alignment. The specified alignment is never /// less than the natural alignment. - uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), - // These remaining fields are essentially just an owned version of `link.C.AvBlock`. - code_header: []u8, - code: []u8, - fwd_decl: []u8, - ctype_pool: CType.Pool, - lazy_fns: LazyFnMap, + need_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), + ctype_deps: CType.Dependencies, + /// Key is an enum type for which we need a generated `@tagName` function. + need_tag_name_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + /// Key is a function Nav for which we need a generated `zig_never_tail` wrapper. + need_never_tail_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), + /// Key is a function Nav for which we need a generated `zig_never_inline` wrapper. + need_never_inline_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), pub fn deinit(mir: *Mir, gpa: Allocator) void { - mir.uavs.deinit(gpa); + gpa.free(mir.fwd_decl); gpa.free(mir.code_header); gpa.free(mir.code); - gpa.free(mir.fwd_decl); - mir.ctype_pool.deinit(gpa); - mir.lazy_fns.deinit(gpa); + mir.need_uavs.deinit(gpa); + mir.ctype_deps.deinit(gpa); + mir.need_tag_name_funcs.deinit(gpa); + mir.need_never_tail_funcs.deinit(gpa); + mir.need_never_inline_funcs.deinit(gpa); } }; -pub const Error = Writer.Error || std.mem.Allocator.Error || error{AnalysisFail}; +pub const Error = Writer.Error || Allocator.Error || error{AnalysisFail}; -pub const CType = @import("c/Type.zig"); +pub const CType = @import("c/type.zig").CType; pub const CValue = union(enum) { none: void, @@ -87,8 +94,6 @@ pub const CValue = union(enum) { constant: Value, /// Index into the parameters arg: usize, - /// The array field of a parameter - arg_array: usize, /// Index into a tuple's fields field: usize, /// By-value @@ -100,8 +105,6 @@ pub const CValue = union(enum) { identifier: []const u8, /// Rendered as "payload." followed by as identifier (using fmtIdent) payload_identifier: []const u8, - /// Rendered with fmtCTypePoolString - ctype_pool_string: CType.Pool.String, fn eql(lhs: CValue, rhs: CValue) bool { return switch (lhs) { @@ -122,10 +125,6 @@ pub const CValue = union(enum) { .arg => |rhs_arg_index| lhs_arg_index == rhs_arg_index, else => false, }, - .arg_array => |lhs_arg_index| switch (rhs) { - .arg_array => |rhs_arg_index| lhs_arg_index == rhs_arg_index, - else => false, - }, .field => |lhs_field_index| switch (rhs) { .field => |rhs_field_index| lhs_field_index == rhs_field_index, else => false, @@ -150,10 +149,6 @@ pub const CValue = union(enum) { .payload_identifier => |rhs_id| std.mem.eql(u8, lhs_id, rhs_id), else => false, }, - .ctype_pool_string => |lhs_str| switch (rhs) { - .ctype_pool_string => |rhs_str| lhs_str.index == rhs_str.index, - else => false, - }, }; } }; @@ -163,53 +158,24 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(Air.Inst.Ref, CValue); - -pub const LazyFnKey = union(enum) { - tag_name: InternPool.Index, - never_tail: InternPool.Nav.Index, - never_inline: InternPool.Nav.Index, -}; -pub const LazyFnValue = struct { - fn_name: CType.Pool.String, -}; -pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); - -const Local = struct { - ctype: CType, - flags: packed struct(u32) { - alignas: CType.AlignAs, - _: u20 = undefined, - }, - - fn getType(local: Local) LocalType { - return .{ .ctype = local.ctype, .alignas = local.flags.alignas }; - } +const LocalType = struct { + type: Type, + alignment: Alignment, }; const LocalIndex = u16; -const LocalType = struct { ctype: CType, alignas: CType.AlignAs }; const LocalsList = std.AutoArrayHashMapUnmanaged(LocalIndex, void); const LocalsMap = std.AutoArrayHashMapUnmanaged(LocalType, LocalsList); const ValueRenderLocation = enum { - FunctionArgument, - Initializer, - StaticInitializer, - Other, + initializer, + static_initializer, + other, fn isInitializer(loc: ValueRenderLocation) bool { return switch (loc) { - .Initializer, .StaticInitializer => true, - else => false, - }; - } - - fn toCTypeKind(loc: ValueRenderLocation) CType.Kind { - return switch (loc) { - .FunctionArgument => .parameter, - .Initializer, .Other => .complete, - .StaticInitializer => .global, + .initializer, .static_initializer => true, + .other => false, }; } }; @@ -334,16 +300,31 @@ const reserved_idents = std.StaticStringMap(void).initComptime(.{ }); fn isReservedIdent(ident: []const u8) bool { - if (ident.len >= 2 and ident[0] == '_') { // C language + // C language + if (ident.len >= 2 and ident[0] == '_') { switch (ident[1]) { 'A'...'Z', '_' => return true, - else => return false, + else => {}, } - } else if (mem.startsWith(u8, ident, "DUMMYSTRUCTNAME") or + } + + // windows.h + if (mem.startsWith(u8, ident, "DUMMYSTRUCTNAME") or mem.startsWith(u8, ident, "DUMMYUNIONNAME")) - { // windows.h + { return true; - } else return reserved_idents.has(ident); + } + + // CType + if (mem.startsWith(u8, ident, "enum__") or + mem.startsWith(u8, ident, "bitpack__") or + mem.startsWith(u8, ident, "aligned__") or + mem.startsWith(u8, ident, "fn__")) + { + return true; + } + + return reserved_idents.has(ident); } fn formatIdentSolo(ident: []const u8, w: *Writer) Writer.Error!void { @@ -361,7 +342,7 @@ fn formatIdentOptions(ident: []const u8, w: *Writer, solo: bool) Writer.Error!vo for (ident, 0..) |c, i| { switch (c) { 'a'...'z', 'A'...'Z', '_' => try w.writeByte(c), - '.' => try w.writeByte('_'), + '.', ' ' => try w.writeByte('_'), '0'...'9' => if (i == 0) { try w.print("_{x:2}", .{c}); } else { @@ -380,29 +361,6 @@ pub fn fmtIdentUnsolo(ident: []const u8) std.fmt.Alt([]const u8, formatIdentUnso return .{ .data = ident }; } -const CTypePoolStringFormatData = struct { - ctype_pool_string: CType.Pool.String, - ctype_pool: *const CType.Pool, - solo: bool, -}; -fn formatCTypePoolString(data: CTypePoolStringFormatData, w: *Writer) Writer.Error!void { - if (data.ctype_pool_string.toSlice(data.ctype_pool)) |slice| - try formatIdentOptions(slice, w, data.solo) - else - try w.print("{f}", .{data.ctype_pool_string.fmt(data.ctype_pool)}); -} -pub fn fmtCTypePoolString( - ctype_pool_string: CType.Pool.String, - ctype_pool: *const CType.Pool, - solo: bool, -) std.fmt.Alt(CTypePoolStringFormatData, formatCTypePoolString) { - return .{ .data = .{ - .ctype_pool_string = ctype_pool_string, - .ctype_pool = ctype_pool, - .solo = solo, - } }; -} - // Returns true if `formatIdent` would make any edits to ident. // This must be kept in sync with `formatIdent`. pub fn isMangledIdent(ident: []const u8, solo: bool) bool { @@ -417,21 +375,26 @@ pub fn isMangledIdent(ident: []const u8, solo: bool) bool { return false; } -/// This data is available when outputting .c code for a `InternPool.Index` -/// that corresponds to `func`. -/// It is not available when generating .h file. +/// This data is available when rendering C source code for an interned function. pub const Function = struct { air: Air, liveness: Air.Liveness, - value_map: CValueMap, + value_map: std.AutoHashMap(Air.Inst.Ref, CValue), blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, next_arg_index: u32 = 0, next_block_index: u32 = 0, - object: Object, - lazy_fns: LazyFnMap, + dg: DeclGen, + code: Writer.Allocating, + indent_counter: usize, + /// Key is an enum type for which we need a generated `@tagName` function. + need_tag_name_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + /// Key is a function Nav for which we need a generated `zig_never_tail` wrapper. + need_never_tail_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), + /// Key is a function Nav for which we need a generated `zig_never_inline` wrapper. + need_never_inline_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), func_index: InternPool.Index, /// All the locals, to be emitted at the top of the function. - locals: std.ArrayList(Local) = .empty, + locals: std.ArrayList(LocalType) = .empty, /// Which locals are available for reuse, based on Type. free_locals_map: LocalsMap = .{}, /// Locals which will not be freed by Liveness. This is used after a @@ -445,37 +408,41 @@ pub const Function = struct { /// for the switch cond. Dispatches should set this local to the new cond. loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .empty, + const indent_width = 1; + const indent_char = ' '; + + fn newline(f: *Function) !void { + const w = &f.code.writer; + try w.writeByte('\n'); + try w.splatByteAll(indent_char, f.indent_counter); + } + fn indent(f: *Function) void { + f.indent_counter += indent_width; + } + fn outdent(f: *Function) !void { + f.indent_counter -= indent_width; + const written = f.code.written(); + switch (written[written.len - 1]) { + indent_char => f.code.shrinkRetainingCapacity(written.len - indent_width), + '\n' => try f.code.writer.splatByteAll(indent_char, f.indent_counter), + else => { + std.debug.print("\"{f}\"\n", .{std.zig.fmtString(written[written.len -| 100..])}); + unreachable; + }, + } + } + fn resolveInst(f: *Function, ref: Air.Inst.Ref) !CValue { const gop = try f.value_map.getOrPut(ref); - if (gop.found_existing) return gop.value_ptr.*; - - const pt = f.object.dg.pt; - const zcu = pt.zcu; - const val = (try f.air.value(ref, pt)).?; - const ty = f.typeOf(ref); - - const result: CValue = if (lowersToArray(ty, zcu)) result: { - const ch = &f.object.code_header.writer; - const decl_c_value = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)), - }); - const gpa = f.object.dg.gpa; - try f.allocs.put(gpa, decl_c_value.new_local, false); - try ch.writeAll("static "); - try f.object.dg.renderTypeAndName(ch, ty, decl_c_value, Const, .none, .complete); - try ch.writeAll(" = "); - try f.object.dg.renderValue(ch, val, .StaticInitializer); - try ch.writeAll(";\n "); - break :result .{ .local = decl_c_value.new_local }; - } else .{ .constant = val }; - - gop.value_ptr.* = result; - return result; + if (!gop.found_existing) { + const val = try f.air.value(ref, f.dg.pt); + gop.value_ptr.* = .{ .constant = val.? }; + } + return gop.value_ptr.*; } fn wantSafety(f: *Function) bool { - return switch (f.object.dg.pt.zcu.optimizeMode()) { + return switch (f.dg.pt.zcu.optimizeMode()) { .Debug, .ReleaseSafe => true, .ReleaseFast, .ReleaseSmall => false, }; @@ -485,18 +452,16 @@ pub const Function = struct { /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. fn allocLocalValue(f: *Function, local_type: LocalType) !CValue { - try f.locals.ensureUnusedCapacity(f.object.dg.gpa, 1); - defer f.locals.appendAssumeCapacity(.{ - .ctype = local_type.ctype, - .flags = .{ .alignas = local_type.alignas }, - }); - return .{ .new_local = @intCast(f.locals.items.len) }; + try f.locals.ensureUnusedCapacity(f.dg.gpa, 1); + const index = f.locals.items.len; + f.locals.appendAssumeCapacity(local_type); + return .{ .new_local = @intCast(index) }; } fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { return f.allocAlignedLocal(inst, .{ - .ctype = try f.ctypeFromType(ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.pt.zcu)), + .type = ty, + .alignment = .none, }); } @@ -524,11 +489,10 @@ pub const Function = struct { .none => unreachable, .new_local, .local => |i| try w.print("t{d}", .{i}), .local_ref => |i| try w.print("&t{d}", .{i}), - .constant => |val| try f.object.dg.renderValue(w, val, location), + .constant => |val| try f.dg.renderValue(w, val, location), .arg => |i| try w.print("a{d}", .{i}), - .arg_array => |i| try f.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }), - .undef => |ty| try f.object.dg.renderUndefValue(w, ty, location), - else => try f.object.dg.writeCValue(w, c_value), + .undef => |ty| try f.dg.renderUndefValue(w, ty, location), + else => try f.dg.writeCValue(w, c_value), } } @@ -537,17 +501,12 @@ pub const Function = struct { .none => unreachable, .new_local, .local, .constant => { try w.writeAll("(*"); - try f.writeCValue(w, c_value, .Other); + try f.writeCValue(w, c_value, .other); try w.writeByte(')'); }, .local_ref => |i| try w.print("t{d}", .{i}), .arg => |i| try w.print("(*a{d})", .{i}), - .arg_array => |i| { - try w.writeAll("(*"); - try f.writeCValueMember(w, .{ .arg = i }, .{ .identifier = "array" }); - try w.writeByte(')'); - }, - else => try f.object.dg.writeCValueDeref(w, c_value), + else => try f.dg.writeCValueDeref(w, c_value), } } @@ -558,119 +517,77 @@ pub const Function = struct { member: CValue, ) Error!void { switch (c_value) { - .new_local, .local, .local_ref, .constant, .arg, .arg_array => { - try f.writeCValue(w, c_value, .Other); + .new_local, .local, .local_ref, .constant, .arg => { + try f.writeCValue(w, c_value, .other); try w.writeByte('.'); - try f.writeCValue(w, member, .Other); + try f.writeCValue(w, member, .other); }, - else => return f.object.dg.writeCValueMember(w, c_value, member), + else => return f.dg.writeCValueMember(w, c_value, member), } } fn writeCValueDerefMember(f: *Function, w: *Writer, c_value: CValue, member: CValue) !void { switch (c_value) { - .new_local, .local, .arg, .arg_array => { - try f.writeCValue(w, c_value, .Other); + .new_local, .local, .arg => { + try f.writeCValue(w, c_value, .other); try w.writeAll("->"); }, .constant => { try w.writeByte('('); - try f.writeCValue(w, c_value, .Other); + try f.writeCValue(w, c_value, .other); try w.writeAll(")->"); }, .local_ref => { try f.writeCValueDeref(w, c_value); try w.writeByte('.'); }, - else => return f.object.dg.writeCValueDerefMember(w, c_value, member), + else => return f.dg.writeCValueDerefMember(w, c_value, member), } - try f.writeCValue(w, member, .Other); + try f.writeCValue(w, member, .other); } fn fail(f: *Function, comptime format: []const u8, args: anytype) Error { - return f.object.dg.fail(format, args); + return f.dg.fail(format, args); } - fn ctypeFromType(f: *Function, ty: Type, kind: CType.Kind) !CType { - return f.object.dg.ctypeFromType(ty, kind); - } - - fn byteSize(f: *Function, ctype: CType) u64 { - return f.object.dg.byteSize(ctype); - } - - fn renderType(f: *Function, w: *Writer, ctype: Type) !void { - return f.object.dg.renderType(w, ctype); - } - - fn renderCType(f: *Function, w: *Writer, ctype: CType) !void { - return f.object.dg.renderCType(w, ctype); + fn renderType(f: *Function, w: *Writer, ty: Type) !void { + return f.dg.renderType(w, ty); } fn renderIntCast(f: *Function, w: *Writer, dest_ty: Type, src: CValue, v: Vectorize, src_ty: Type, location: ValueRenderLocation) !void { - return f.object.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location); + return f.dg.renderIntCast(w, dest_ty, .{ .c_value = .{ .f = f, .value = src, .v = v } }, src_ty, location); } fn fmtIntLiteralDec(f: *Function, val: Value) !std.fmt.Alt(FormatIntLiteralContext, formatIntLiteral) { - return f.object.dg.fmtIntLiteralDec(val, .Other); + return f.dg.fmtIntLiteralDec(val, .other); } fn fmtIntLiteralHex(f: *Function, val: Value) !std.fmt.Alt(FormatIntLiteralContext, formatIntLiteral) { - return f.object.dg.fmtIntLiteralHex(val, .Other); - } - - fn getLazyFnName(f: *Function, key: LazyFnKey) ![]const u8 { - const gpa = f.object.dg.gpa; - const pt = f.object.dg.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const ctype_pool = &f.object.dg.ctype_pool; - - const gop = try f.lazy_fns.getOrPut(gpa, key); - if (!gop.found_existing) { - errdefer _ = f.lazy_fns.pop(); - - gop.value_ptr.* = .{ - .fn_name = switch (key) { - .tag_name, - => |enum_ty| try ctype_pool.fmt(gpa, "zig_{s}_{f}__{d}", .{ - @tagName(key), - fmtIdentUnsolo(ip.loadEnumType(enum_ty).name.toSlice(ip)), - @intFromEnum(enum_ty), - }), - .never_tail, - .never_inline, - => |owner_nav| try ctype_pool.fmt(gpa, "zig_{s}_{f}__{d}", .{ - @tagName(key), - fmtIdentUnsolo(ip.getNav(owner_nav).name.toSlice(ip)), - @intFromEnum(owner_nav), - }), - }, - }; - } - return gop.value_ptr.fn_name.toSlice(ctype_pool).?; + return f.dg.fmtIntLiteralHex(val, .other); } pub fn deinit(f: *Function) void { - const gpa = f.object.dg.gpa; + const gpa = f.dg.gpa; f.allocs.deinit(gpa); f.locals.deinit(gpa); deinitFreeLocalsMap(gpa, &f.free_locals_map); f.blocks.deinit(gpa); f.value_map.deinit(); - f.lazy_fns.deinit(gpa); + f.need_tag_name_funcs.deinit(gpa); + f.need_never_tail_funcs.deinit(gpa); + f.need_never_inline_funcs.deinit(gpa); f.loop_switch_conds.deinit(gpa); } fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { - return f.air.typeOf(inst, &f.object.dg.pt.zcu.intern_pool); + return f.air.typeOf(inst, &f.dg.pt.zcu.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { - return f.air.typeOfIndex(inst, &f.object.dg.pt.zcu.intern_pool); + return f.air.typeOfIndex(inst, &f.dg.pt.zcu.intern_pool); } - fn copyCValue(f: *Function, ctype: CType, dst: CValue, src: CValue) !void { + fn copyCValue(f: *Function, dst: CValue, src: CValue) !void { switch (dst) { .new_local, .local => |dst_local_index| switch (src) { .new_local, .local => |src_local_index| if (dst_local_index == src_local_index) return, @@ -678,12 +595,12 @@ pub const Function = struct { }, else => {}, } - const w = &f.object.code.writer; - const a = try Assignment.start(f, w, ctype); - try f.writeCValue(w, dst, .Other); - try a.assign(f, w); - try f.writeCValue(w, src, .Other); - try a.end(f, w); + const w = &f.code.writer; + try f.writeCValue(w, dst, .other); + try w.writeAll(" = "); + try f.writeCValue(w, src, .other); + try w.writeByte(';'); + try f.newline(); } fn moveCValue(f: *Function, inst: Air.Inst.Index, ty: Type, src: CValue) !CValue { @@ -694,7 +611,7 @@ pub const Function = struct { else => { try freeCValue(f, inst, src); const dst = try f.allocLocal(inst, ty); - try f.copyCValue(try f.ctypeFromType(ty, .complete), dst, src); + try f.copyCValue(dst, src); return dst; }, } @@ -708,51 +625,17 @@ pub const Function = struct { } }; -/// This data is available when outputting .c code for a `Zcu`. -/// It is not available when generating .h file. -pub const Object = struct { - dg: DeclGen, - code_header: Writer.Allocating, - code: Writer.Allocating, - indent_counter: usize, - - const indent_width = 1; - const indent_char = ' '; - - fn newline(o: *Object) !void { - const w = &o.code.writer; - try w.writeByte('\n'); - try w.splatByteAll(indent_char, o.indent_counter); - } - fn indent(o: *Object) void { - o.indent_counter += indent_width; - } - fn outdent(o: *Object) !void { - o.indent_counter -= indent_width; - const written = o.code.written(); - switch (written[written.len - 1]) { - indent_char => o.code.shrinkRetainingCapacity(written.len - indent_width), - '\n' => try o.code.writer.splatByteAll(indent_char, o.indent_counter), - else => { - std.debug.print("\"{f}\"\n", .{std.zig.fmtString(written[written.len -| 100..])}); - unreachable; - }, - } - } -}; - -/// This data is available both when outputting .c code and when outputting an .h file. +/// This data is available when rendering *any* C source code (function or otherwise). pub const DeclGen = struct { gpa: Allocator, + arena: Allocator, pt: Zcu.PerThread, mod: *Module, - pass: Pass, + owner_nav: InternPool.Nav.Index.Optional, is_naked_fn: bool, expected_block: ?u32, - fwd_decl: Writer.Allocating, error_msg: ?*Zcu.ErrorMsg, - ctype_pool: CType.Pool, - scratch: std.ArrayList(u32), + ctype_deps: CType.Dependencies, /// This map contains all the UAVs we saw generating this function. /// `link.C` will merge them into its `uavs`/`aligned_uavs` fields. /// Key is the value of the UAV; value is the UAV's alignment, or @@ -760,16 +643,10 @@ pub const DeclGen = struct { /// less than the natural alignment. uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), - pub const Pass = union(enum) { - nav: InternPool.Nav.Index, - uav: InternPool.Index, - flush, - }; - fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) Error { @branchHint(.cold); const zcu = dg.pt.zcu; - const src_loc = zcu.navSrcLoc(dg.pass.nav); + const src_loc = zcu.navSrcLoc(dg.owner_nav.unwrap().?); dg.error_msg = try Zcu.ErrorMsg.create(dg.gpa, src_loc, format, args); return error.AnalysisFail; } @@ -783,14 +660,13 @@ pub const DeclGen = struct { const pt = dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const ctype_pool = &dg.ctype_pool; const uav_val = Value.fromInterned(uav.val); const uav_ty = uav_val.typeOf(zcu); // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const ptr_ty: Type = .fromInterned(uav.orig_ty); if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { - return dg.writeCValue(w, .{ .undef = ptr_ty }); + return dg.renderUndefValue(w, ptr_ty, location); } // Chase function values in order to be able to reference the original function. @@ -805,14 +681,12 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete); - const elem_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype; - const uav_ctype = try dg.ctypeFromType(uav_ty, .complete); - const need_cast = !elem_ctype.eql(uav_ctype) and - (elem_ctype.info(ctype_pool) != .function or uav_ctype.info(ctype_pool) != .function); + const elem_ty = ptr_ty.childType(zcu); + const need_cast = elem_ty.toIntern() != uav_ty.toIntern() and + elem_ty.zigTypeTag(zcu) != .@"fn" or uav_ty.zigTypeTag(zcu) != .@"fn"; if (need_cast) { try w.writeAll("(("); - try dg.renderCType(w, ptr_ctype); + try dg.renderType(w, ptr_ty); try w.writeByte(')'); } try w.writeByte('&'); @@ -842,11 +716,9 @@ pub const DeclGen = struct { nav_index: InternPool.Nav.Index, location: ValueRenderLocation, ) Error!void { - _ = location; const pt = dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const ctype_pool = &dg.ctype_pool; // Chase function values in order to be able to reference the original function. const owner_nav = switch (ip.getNav(nav_index).status) { @@ -863,25 +735,23 @@ pub const DeclGen = struct { const nav_ty: Type = .fromInterned(ip.getNav(owner_nav).typeOf(ip)); const ptr_ty = try pt.navPtrType(owner_nav); if (!nav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { - return dg.writeCValue(w, .{ .undef = ptr_ty }); + return dg.renderUndefValue(w, ptr_ty, location); } // We shouldn't cast C function pointers as this is UB (when you call // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const ctype = try dg.ctypeFromType(ptr_ty, .complete); - const elem_ctype = ctype.info(ctype_pool).pointer.elem_ctype; - const nav_ctype = try dg.ctypeFromType(nav_ty, .complete); - const need_cast = !elem_ctype.eql(nav_ctype) and - (elem_ctype.info(ctype_pool) != .function or nav_ctype.info(ctype_pool) != .function); + const elem_ty = ptr_ty.childType(zcu); + const need_cast = elem_ty.toIntern() != nav_ty.toIntern() and + elem_ty.zigTypeTag(zcu) != .@"fn" or nav_ty.zigTypeTag(zcu) != .@"fn"; if (need_cast) { try w.writeAll("(("); - try dg.renderCType(w, ctype); + try dg.renderType(w, ptr_ty); try w.writeByte(')'); } try w.writeByte('&'); - try dg.renderNavName(w, owner_nav); + try renderNavName(w, owner_nav, ip); if (need_cast) try w.writeByte(')'); } @@ -896,11 +766,10 @@ pub const DeclGen = struct { switch (derivation) { .comptime_alloc_ptr, .comptime_field_ptr => unreachable, .int => |int| { - const ptr_ctype = try dg.ctypeFromType(int.ptr_ty, .complete); const addr_val = try pt.intValue(.usize, int.addr); try w.writeByte('('); - try dg.renderCType(w, ptr_ctype); - try w.print("){f}", .{try dg.fmtIntLiteralHex(addr_val, .Other)}); + try dg.renderType(w, int.ptr_ty); + try w.print("){f}", .{try dg.fmtIntLiteralHex(addr_val, .other)}); }, .nav_ptr => |nav| try dg.renderNav(w, nav, location), @@ -915,14 +784,10 @@ pub const DeclGen = struct { .field_ptr => |field| { const parent_ptr_ty = try field.parent.ptrType(pt); - // Ensure complete type definition is available before accessing fields. - _ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete); - switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) { .begin => { - const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); try w.writeByte('('); - try dg.renderCType(w, ptr_ctype); + try dg.renderType(w, field.result_ptr_ty); try w.writeByte(')'); try dg.renderPointer(w, field.parent.*, location); }, @@ -933,51 +798,40 @@ pub const DeclGen = struct { try dg.writeCValue(w, name); }, .byte_offset => |byte_offset| { - const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete); try w.writeByte('('); - try dg.renderCType(w, ptr_ctype); + try dg.renderType(w, field.result_ptr_ty); try w.writeByte(')'); const offset_val = try pt.intValue(.usize, byte_offset); try w.writeAll("((char *)"); try dg.renderPointer(w, field.parent.*, location); - try w.print(" + {f})", .{try dg.fmtIntLiteralDec(offset_val, .Other)}); + try w.print(" + {f})", .{try dg.fmtIntLiteralDec(offset_val, .other)}); }, } }, .elem_ptr => |elem| if (!(try elem.parent.ptrType(pt)).childType(zcu).hasRuntimeBits(zcu)) { // Element type is zero-bit, so lowers to `void`. The index is irrelevant; just cast the pointer. - const ptr_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); try w.writeByte('('); - try dg.renderCType(w, ptr_ctype); + try dg.renderType(w, elem.result_ptr_ty); try w.writeByte(')'); try dg.renderPointer(w, elem.parent.*, location); } else { const index_val = try pt.intValue(.usize, elem.elem_idx); - // We want to do pointer arithmetic on a pointer to the element type. - // We might have a pointer-to-array. In this case, we must cast first. - const result_ctype = try dg.ctypeFromType(elem.result_ptr_ty, .complete); - const parent_ctype = try dg.ctypeFromType(try elem.parent.ptrType(pt), .complete); - if (result_ctype.eql(parent_ctype)) { - // The pointer already has an appropriate type - just do the arithmetic. + try w.writeByte('('); + // We want to do pointer arithmetic on a pointer to the element type, but the parent + // might be a pointer-to-array, in which case we must cast it. + if (elem.result_ptr_ty.toIntern() != (try elem.parent.ptrType(pt)).toIntern()) { try w.writeByte('('); - try dg.renderPointer(w, elem.parent.*, location); - try w.print(" + {f})", .{try dg.fmtIntLiteralDec(index_val, .Other)}); - } else { - // We probably have an array pointer `T (*)[n]`. Cast to an element pointer, - // and *then* apply the index. - try w.writeAll("(("); - try dg.renderCType(w, result_ctype); + try dg.renderType(w, elem.result_ptr_ty); try w.writeByte(')'); - try dg.renderPointer(w, elem.parent.*, location); - try w.print(" + {f})", .{try dg.fmtIntLiteralDec(index_val, .Other)}); } + try dg.renderPointer(w, elem.parent.*, location); + try w.print(" + {f})", .{try dg.fmtIntLiteralDec(index_val, .other)}); }, .offset_and_cast => |oac| { - const ptr_ctype = try dg.ctypeFromType(oac.new_ptr_ty, .complete); try w.writeByte('('); - try dg.renderCType(w, ptr_ctype); + try dg.renderType(w, oac.new_ptr_ty); try w.writeByte(')'); if (oac.byte_offset == 0) { try dg.renderPointer(w, oac.parent.*, location); @@ -985,14 +839,40 @@ pub const DeclGen = struct { const offset_val = try pt.intValue(.usize, oac.byte_offset); try w.writeAll("((char *)"); try dg.renderPointer(w, oac.parent.*, location); - try w.print(" + {f})", .{try dg.fmtIntLiteralDec(offset_val, .Other)}); + try w.print(" + {f})", .{try dg.fmtIntLiteralDec(offset_val, .other)}); } }, } } - fn renderErrorName(dg: *DeclGen, w: *Writer, err_name: InternPool.NullTerminatedString) !void { - try w.print("zig_error_{f}", .{fmtIdentUnsolo(err_name.toSlice(&dg.pt.zcu.intern_pool))}); + fn renderValueAsLvalue( + dg: *DeclGen, + w: *Writer, + val: Value, + ) Error!void { + const zcu = dg.pt.zcu; + + // If the type of `val` lowers to a C struct or union type, then `renderValue` will render + // it as a compound literal, and compound literals are already lvalues. + const ty = val.typeOf(zcu); + const is_aggregate: bool = switch (ty.zigTypeTag(zcu)) { + .@"struct", .@"union" => switch (ty.containerLayout(zcu)) { + .auto, .@"extern" => true, + .@"packed" => false, + }, + .array, + .vector, + .error_union, + .optional, + => true, + else => false, + }; + if (is_aggregate) return renderValue(dg, w, val, .other); + + // Otherwise, use a UAV. + const gop = try dg.uavs.getOrPut(dg.gpa, val.toIntern()); + if (!gop.found_existing) gop.value_ptr.* = .none; + try renderUavName(w, val); } fn renderValue( @@ -1005,16 +885,13 @@ pub const DeclGen = struct { const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; - const ctype_pool = &dg.ctype_pool; const initializer_type: ValueRenderLocation = switch (location) { - .StaticInitializer => .StaticInitializer, - else => .Initializer, + .static_initializer => .static_initializer, + else => .initializer, }; const ty = val.typeOf(zcu); - if (val.isUndef(zcu)) return dg.renderUndefValue(w, ty, location); - const ctype = try dg.ctypeFromType(ty, location.toCTypeKind()); switch (ip.indexToKey(val.toIntern())) { // types, not values .int_type, @@ -1037,7 +914,7 @@ pub const DeclGen = struct { .memoized_call, => unreachable, - .undef => unreachable, // handled above + .undef => try dg.renderUndefValue(w, ty, location), .simple_value => |simple_value| switch (simple_value) { // non-runtime values .void => unreachable, @@ -1053,46 +930,28 @@ pub const DeclGen = struct { .enum_literal, => unreachable, // non-runtime values .int => try w.print("{f}", .{try dg.fmtIntLiteralDec(val, location)}), - .err => |err| try dg.renderErrorName(w, err.name), - .error_union => |error_union| switch (ctype.info(ctype_pool)) { - .basic => switch (error_union.val) { - .err_name => |err_name| try dg.renderErrorName(w, err_name), + .err => |err| try renderErrorName(w, err.name.toSlice(ip)), + .error_union => |error_union| { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); + } + try w.writeAll("{ .error = "); + switch (error_union.val) { + .err_name => |err_name| try renderErrorName(w, err_name.toSlice(ip)), .payload => try w.writeByte('0'), - }, - .pointer, .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| { - if (!location.isInitializer()) { - try w.writeByte('('); - try dg.renderCType(w, ctype); - try w.writeByte(')'); + } + if (ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) { + try w.writeAll(", .payload = "); + switch (error_union.val) { + .err_name => try dg.renderUndefValue(w, ty.errorUnionPayload(zcu), initializer_type), + .payload => |payload| try dg.renderValue(w, .fromInterned(payload), initializer_type), } - try w.writeByte('{'); - for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeByte(','); - switch (aggregate.fields.at(field_index, ctype_pool).name.index) { - .@"error" => switch (error_union.val) { - .err_name => |err_name| try dg.renderErrorName(w, err_name), - .payload => try w.writeByte('0'), - }, - .payload => switch (error_union.val) { - .err_name => try dg.renderUndefValue( - w, - ty.errorUnionPayload(zcu), - initializer_type, - ), - .payload => |payload| try dg.renderValue( - w, - Value.fromInterned(payload), - initializer_type, - ), - }, - else => unreachable, - } - } - try w.writeByte('}'); - }, + } + try w.writeAll(" }"); }, - .enum_tag => |enum_tag| try dg.renderValue(w, Value.fromInterned(enum_tag.int), location), + .enum_tag => |enum_tag| try dg.renderValue(w, .fromInterned(enum_tag.int), location), .float => { const bits = ty.floatBits(target); const f128_val = val.toFloat(f128, zcu); @@ -1143,7 +1002,7 @@ pub const DeclGen = struct { else unreachable; - if (location == .StaticInitializer) { + if (location == .static_initializer) { if (!std.math.isNan(f128_val) and std.math.isSignalNan(f128_val)) return dg.fail("TODO: C backend: implement nans rendering in static initializers", .{}); @@ -1154,9 +1013,11 @@ pub const DeclGen = struct { // return dg.fail("Only quiet nans are supported in global variable initializers", .{}); } - try w.writeAll("zig_"); - try w.writeAll(if (location == .StaticInitializer) "init" else "make"); - try w.writeAll("_special_"); + if (location == .static_initializer) { + try w.writeAll("zig_init_special_"); + } else { + try w.writeAll("zig_make_special_"); + } try dg.renderTypeForBuiltinFnName(w, ty); try w.writeByte('('); if (std.math.signbit(f128_val)) try w.writeByte('-'); @@ -1183,105 +1044,85 @@ pub const DeclGen = struct { if (!empty) try w.writeByte(')'); }, .slice => |slice| { - const aggregate = ctype.info(ctype_pool).aggregate; if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } try w.writeByte('{'); - for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeByte(','); - try dg.renderValue(w, Value.fromInterned( - switch (aggregate.fields.at(field_index, ctype_pool).name.index) { - .ptr => slice.ptr, - .len => slice.len, - else => unreachable, - }, - ), initializer_type); - } + try dg.renderValue(w, .fromInterned(slice.ptr), initializer_type); + try w.writeByte(','); + try dg.renderValue(w, .fromInterned(slice.len), initializer_type); try w.writeByte('}'); }, .ptr => { - var arena = std.heap.ArenaAllocator.init(zcu.gpa); - defer arena.deinit(); - const derivation = try val.pointerDerivation(arena.allocator(), pt, null); + const derivation = try val.pointerDerivation(dg.arena, pt, null); + try w.writeByte('('); try dg.renderPointer(w, derivation, location); + try w.writeByte(')'); }, - .opt => |opt| switch (ctype.info(ctype_pool)) { - .basic => if (ctype.isBool()) try w.writeAll(switch (opt.val) { - .none => "true", - else => "false", - }) else switch (opt.val) { - .none => try w.writeByte('0'), - else => |payload| switch (ip.indexToKey(payload)) { - .undef => |err_ty| try dg.renderUndefValue( - w, - .fromInterned(err_ty), - location, - ), - .err => |err| try dg.renderErrorName(w, err.name), - else => unreachable, - }, - }, - .pointer => switch (opt.val) { - .none => try w.writeAll("NULL"), - else => |payload| try dg.renderValue(w, Value.fromInterned(payload), location), - }, - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| { - switch (opt.val) { - .none => {}, - else => |payload| switch (aggregate.fields.at(0, ctype_pool).name.index) { - .is_null, .payload => {}, - .ptr, .len => return dg.renderValue( - w, - Value.fromInterned(payload), - location, - ), - else => unreachable, - }, - } + .opt => |opt| switch (CType.classifyOptional(ty, zcu)) { + .npv_payload => unreachable, // opv optional + .opv_payload => { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - try w.writeByte('{'); - for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeByte(','); - switch (aggregate.fields.at(field_index, ctype_pool).name.index) { - .is_null => try w.writeAll(switch (opt.val) { - .none => "true", - else => "false", - }), - .payload => switch (opt.val) { - .none => try dg.renderUndefValue( - w, - ty.optionalChild(zcu), - initializer_type, - ), - else => |payload| try dg.renderValue( - w, - Value.fromInterned(payload), - initializer_type, - ), - }, - .ptr => try w.writeAll("NULL"), - .len => try dg.renderUndefValue(w, .usize, initializer_type), - else => unreachable, + try w.writeAll(switch (opt.val) { + .none => "{.is_null = true}", + else => "{.is_null = false}", + }); + }, + .error_set => switch (opt.val) { + .none => try w.writeByte('0'), + else => |payload_val| try dg.renderValue(w, .fromInterned(payload_val), location), + }, + .ptr_like => switch (opt.val) { + .none => try w.writeAll("NULL"), + else => |payload_val| try dg.renderValue(w, .fromInterned(payload_val), location), + }, + .slice_like => switch (opt.val) { + .none => { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); } + try w.writeAll("{NULL,"); + try dg.renderUndefValue(w, .usize, initializer_type); + try w.writeByte('}'); + }, + else => |payload_val| try dg.renderValue(w, .fromInterned(payload_val), location), + }, + .@"struct" => { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); + } + switch (opt.val) { + .none => { + try w.writeAll("{ .is_null = true, .payload = "); + try dg.renderUndefValue(w, ty.optionalChild(zcu), initializer_type); + try w.writeAll(" }"); + }, + else => |payload_val| { + try w.writeAll("{ .is_null = false, .payload = "); + try dg.renderValue(w, .fromInterned(payload_val), initializer_type); + try w.writeAll(" }"); + }, } - try w.writeByte('}'); }, }, .aggregate => switch (ip.indexToKey(ty.toIntern())) { .array_type, .vector_type => { - if (location == .FunctionArgument) { + if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } + try w.writeByte('{'); const ai = ty.arrayInfo(zcu); if (ai.elem_type.eql(.u8, zcu)) { var literal: StringLiteral = .init(w, @intCast(ty.arrayLenIncludingSentinel(zcu))); @@ -1314,11 +1155,12 @@ pub const DeclGen = struct { } try w.writeByte('}'); } + try w.writeByte('}'); }, .tuple_type => |tuple| { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } @@ -1354,7 +1196,7 @@ pub const DeclGen = struct { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } @@ -1385,69 +1227,60 @@ pub const DeclGen = struct { .un => |un| { const loaded_union = ip.loadUnionType(ty.toIntern()); if (un.tag == .none) { - const backing_ty = try ty.externUnionBackingType(pt); assert(loaded_union.layout == .@"extern"); - if (location == .StaticInitializer) { + if (location == .static_initializer) { return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{}); } const ptr_ty = try pt.singleConstPtrType(ty); - try w.writeAll("*(("); + try w.writeAll("*("); try dg.renderType(w, ptr_ty); - try w.writeAll(")("); - try dg.renderType(w, backing_ty); - try w.writeAll("){"); - try dg.renderValue(w, Value.fromInterned(un.val), location); - try w.writeAll("})"); + try w.writeAll(")&"); + // We need an lvalue for '&'. + try dg.renderValueAsLvalue(w, .fromInterned(un.val)); } else { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - - const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?; - const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]); - const field_name = ip.loadEnumType(loaded_union.enum_tag_type).field_names.get(ip)[field_index]; - - const has_tag = loaded_union.has_runtime_tag; - if (has_tag) try w.writeByte('{'); - const aggregate = ctype.info(ctype_pool).aggregate; - for (0..if (has_tag) aggregate.fields.len else 1) |outer_field_index| { - if (outer_field_index > 0) try w.writeByte(','); - switch (if (has_tag) - aggregate.fields.at(outer_field_index, ctype_pool).name.index - else - .payload) { - .tag => try dg.renderValue( - w, - Value.fromInterned(un.tag), - initializer_type, - ), - .payload => { - try w.writeByte('{'); - if (field_ty.hasRuntimeBits(zcu)) { - try w.print(" .{f} = ", .{fmtIdentSolo(field_name.toSlice(ip))}); - try dg.renderValue( - w, - Value.fromInterned(un.val), - initializer_type, - ); - try w.writeByte(' '); - } else for (0..loaded_union.field_types.len) |inner_field_index| { - const inner_field_ty: Type = .fromInterned( - loaded_union.field_types.get(ip)[inner_field_index], - ); - if (!inner_field_ty.hasRuntimeBits(zcu)) continue; - try dg.renderUndefValue(w, inner_field_ty, initializer_type); - break; - } - try w.writeByte('}'); - }, - else => unreachable, - } + if (ty.unionHasAllZeroBitFieldTypes(zcu)) { + assert(loaded_union.has_runtime_tag); // otherwise it does not have runtime bits + try w.writeAll("{ .tag = "); + try dg.renderValue(w, .fromInterned(un.tag), initializer_type); + try w.writeAll(" }"); + return; } - if (has_tag) try w.writeByte('}'); + + if (loaded_union.layout == .auto) try w.writeByte('{'); + + if (loaded_union.has_runtime_tag) { + try w.writeAll(" .tag = "); + try dg.renderValue(w, .fromInterned(un.tag), initializer_type); + try w.writeAll(", .payload = "); + } + + const enum_tag_ty: Type = .fromInterned(loaded_union.enum_tag_type); + const active_field_index = enum_tag_ty.enumTagFieldIndex(.fromInterned(un.tag), zcu).?; + const active_field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[active_field_index]); + if (active_field_ty.hasRuntimeBits(zcu)) { + const active_field_name = enum_tag_ty.enumFieldName(active_field_index, zcu); + try w.print("{{ .{f} = ", .{fmtIdentSolo(active_field_name.toSlice(ip))}); + try dg.renderValue(w, .fromInterned(un.val), initializer_type); + try w.writeAll(" }"); + } else { + const first_field_ty: Type = for (loaded_union.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(pt.zcu)) continue; + break field_ty; + } else unreachable; + try w.writeByte('{'); + try dg.renderUndefValue(w, first_field_ty, initializer_type); + try w.writeByte('}'); + } + + if (loaded_union.has_runtime_tag) try w.writeByte(' '); + if (loaded_union.layout == .auto) try w.writeByte('}'); } }, } @@ -1463,11 +1296,10 @@ pub const DeclGen = struct { const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = &dg.mod.resolved_target.result; - const ctype_pool = &dg.ctype_pool; const initializer_type: ValueRenderLocation = switch (location) { - .StaticInitializer => .StaticInitializer, - else => .Initializer, + .static_initializer => .static_initializer, + else => .initializer, }; const safety_on = switch (zcu.optimizeMode()) { @@ -1475,7 +1307,6 @@ pub const DeclGen = struct { .ReleaseFast, .ReleaseSmall => false, }; - const ctype = try dg.ctypeFromType(ty, location.toCTypeKind()); switch (ty.toIntern()) { .c_longdouble_type, .f16_type, @@ -1500,76 +1331,109 @@ pub const DeclGen = struct { else => unreachable, } try w.writeAll(", "); - try dg.renderUndefValue(w, repr_ty, .FunctionArgument); + try dg.renderUndefValue(w, repr_ty, .other); return w.writeByte(')'); }, .bool_type => try w.writeAll(if (safety_on) "0xaa" else "false"), else => switch (ip.indexToKey(ty.toIntern())) { - .simple_type, + .simple_type, // anyerror, c_char (etc), usize, isize .int_type, .enum_type, .error_set_type, .inferred_error_set_type, - => return w.print("{f}", .{ - try dg.fmtIntLiteralHex(try pt.undefValue(ty), location), - }), + => switch (CType.classifyInt(ty, zcu)) { + .void => unreachable, // opv + .small => |s| { + const int = ty.intInfo(zcu); + var buf: [std.math.big.int.calcTwosCompLimbCount(128)]std.math.big.Limb = undefined; + var bigint: std.math.big.int.Mutable = .init(&buf, undefPattern(u128)); + bigint.truncate(bigint.toConst(), int.signedness, int.bits); + const fmt_undef: FormatInt128 = .{ + .target = zcu.getTarget(), + .int_cty = s, + .val = bigint.toConst(), + .is_global = location == .static_initializer, + .base = 16, + .case = .lower, + }; + try w.print("{f}", .{fmt_undef}); + }, + .big => |big| { + var buf: [std.math.big.int.calcTwosCompLimbCount(128)]std.math.big.Limb = undefined; + var limb_bigint: std.math.big.int.Mutable = .init(&buf, undefPattern(u128)); + limb_bigint.truncate(limb_bigint.toConst(), .unsigned, big.limb_size.bits()); + const fmt_undef_limb: FormatInt128 = .{ + .target = zcu.getTarget(), + .int_cty = big.limb_size.unsigned(), + .val = limb_bigint.toConst(), + .is_global = location == .static_initializer, + .base = 16, + .case = .lower, + }; + + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); + } + try w.writeAll("{{"); + try w.print("{f}", .{fmt_undef_limb}); + for (1..big.limbs_len) |_| { + try w.print(",{f}", .{fmt_undef_limb}); + } + try w.writeAll("}}"); + }, + }, .ptr_type => |ptr_type| switch (ptr_type.flags.size) { .one, .many, .c => { try w.writeAll("(("); - try dg.renderCType(w, ctype); - return w.print("){f})", .{ - try dg.fmtIntLiteralHex(.undef_usize, .Other), - }); + try dg.renderType(w, ty); + try w.writeByte(')'); + try dg.renderUndefValue(w, .usize, location); + try w.writeByte(')'); }, .slice => { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - try w.writeAll("{("); - const ptr_ty = ty.slicePtrFieldType(zcu); - try dg.renderType(w, ptr_ty); - return w.print("){f}, {0f}}}", .{ - try dg.fmtIntLiteralHex(.undef_usize, .Other), - }); + try w.writeByte('{'); + try dg.renderUndefValue(w, ty.slicePtrFieldType(zcu), initializer_type); + try w.writeByte(','); + try dg.renderUndefValue(w, .usize, initializer_type); + try w.writeByte('}'); }, }, - .opt_type => |child_type| switch (ctype.info(ctype_pool)) { - .basic, .pointer => try dg.renderUndefValue( - w, - .fromInterned(if (ctype.isBool()) .bool_type else child_type), - location, - ), - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| { - switch (aggregate.fields.at(0, ctype_pool).name.index) { - .is_null, .payload => {}, - .ptr, .len => return dg.renderUndefValue( - w, - .fromInterned(child_type), - location, - ), - else => unreachable, - } + .opt_type => |child_type| switch (CType.classifyOptional(ty, zcu)) { + .npv_payload => unreachable, // opv optional + + .error_set, + .ptr_like, + .slice_like, + => try dg.renderUndefValue(w, .fromInterned(child_type), location), + + .opv_payload => { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - try w.writeByte('{'); - for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeByte(','); - try dg.renderUndefValue(w, .fromInterned( - switch (aggregate.fields.at(field_index, ctype_pool).name.index) { - .is_null => .bool_type, - .payload => child_type, - else => unreachable, - }, - ), initializer_type); + try w.writeAll(if (safety_on) "{.is_null=0xaa}" else "{.is_null=false}"); + }, + + .@"struct" => { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); } - try w.writeByte('}'); + try w.writeAll("{ .is_null = "); + try dg.renderUndefValue(w, .bool, initializer_type); + try w.writeAll(", .payload = "); + try dg.renderUndefValue(w, .fromInterned(child_type), initializer_type); + try w.writeAll(" }"); }, }, .struct_type => { @@ -1578,10 +1442,9 @@ pub const DeclGen = struct { .auto, .@"extern" => { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - try w.writeByte('{'); var field_it = loaded_struct.iterateRuntimeOrder(ip); var need_comma = false; @@ -1601,7 +1464,7 @@ pub const DeclGen = struct { .tuple_type => |tuple_info| { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } @@ -1624,80 +1487,61 @@ pub const DeclGen = struct { .auto, .@"extern" => { if (!location.isInitializer()) { try w.writeByte('('); - try dg.renderCType(w, ctype); + try dg.renderType(w, ty); try w.writeByte(')'); } - const has_tag = loaded_union.has_runtime_tag; - if (has_tag) try w.writeByte('{'); - const aggregate = ctype.info(ctype_pool).aggregate; - for (0..if (has_tag) aggregate.fields.len else 1) |outer_field_index| { - if (outer_field_index > 0) try w.writeByte(','); - switch (if (has_tag) - aggregate.fields.at(outer_field_index, ctype_pool).name.index - else - .payload) { - .tag => try dg.renderUndefValue( - w, - .fromInterned(loaded_union.enum_tag_type), - initializer_type, - ), - .payload => { - try w.writeByte('{'); - for (0..loaded_union.field_types.len) |inner_field_index| { - const inner_field_ty: Type = .fromInterned( - loaded_union.field_types.get(ip)[inner_field_index], - ); - if (!inner_field_ty.hasRuntimeBits(pt.zcu)) continue; - try dg.renderUndefValue( - w, - inner_field_ty, - initializer_type, - ); - break; - } - try w.writeByte('}'); - }, - else => unreachable, - } + const first_field_ty: Type = for (loaded_union.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(pt.zcu)) continue; + break field_ty; + } else { + assert(loaded_union.has_runtime_tag); // otherwise it does not have runtime bits + try w.writeAll("{ .tag = "); + try dg.renderUndefValue(w, .fromInterned(loaded_union.enum_tag_type), initializer_type); + try w.writeAll(" }"); + return; + }; + + if (loaded_union.layout == .auto) try w.writeByte('{'); + + if (loaded_union.has_runtime_tag) { + try w.writeAll(" .tag = "); + try dg.renderUndefValue(w, .fromInterned(loaded_union.enum_tag_type), initializer_type); + try w.writeAll(", .payload = "); } - if (has_tag) try w.writeByte('}'); + + try w.writeByte('{'); + try dg.renderUndefValue(w, first_field_ty, initializer_type); + try w.writeByte('}'); + + if (loaded_union.has_runtime_tag) try w.writeByte(' '); + if (loaded_union.layout == .auto) try w.writeByte('}'); }, .@"packed" => return dg.renderUndefValue(w, ty.bitpackBackingInt(zcu), location), } }, - .error_union_type => |error_union_type| switch (ctype.info(ctype_pool)) { - .basic => try dg.renderUndefValue( - w, - .fromInterned(error_union_type.error_set_type), - location, - ), - .pointer, .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| { - if (!location.isInitializer()) { - try w.writeByte('('); - try dg.renderCType(w, ctype); - try w.writeByte(')'); - } - try w.writeByte('{'); - for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeByte(','); - try dg.renderUndefValue( - w, - .fromInterned( - switch (aggregate.fields.at(field_index, ctype_pool).name.index) { - .@"error" => error_union_type.error_set_type, - .payload => error_union_type.payload_type, - else => unreachable, - }, - ), - initializer_type, - ); - } - try w.writeByte('}'); - }, + .error_union_type => |error_union| { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); + } + try w.writeAll("{ .error = "); + try dg.renderUndefValue(w, .fromInterned(error_union.error_set_type), initializer_type); + if (Type.fromInterned(error_union.payload_type).hasRuntimeBits(zcu)) { + try w.writeAll(", .payload = "); + try dg.renderUndefValue(w, .fromInterned(error_union.payload_type), initializer_type); + } + try w.writeAll(" }"); }, .array_type, .vector_type => { + if (!location.isInitializer()) { + try w.writeByte('('); + try dg.renderType(w, ty); + try w.writeByte(')'); + } + try w.writeByte('{'); const ai = ty.arrayInfo(zcu); if (ai.elem_type.eql(.u8, zcu)) { var literal: StringLiteral = .init(w, @intCast(ty.arrayLenIncludingSentinel(zcu))); @@ -1708,14 +1552,8 @@ pub const DeclGen = struct { const s_u8: u8 = @intCast(s.toUnsignedInt(zcu)); if (s_u8 != 0) try literal.writeChar(s_u8); } - return literal.end(); + try literal.end(); } else { - if (!location.isInitializer()) { - try w.writeByte('('); - try dg.renderCType(w, ctype); - try w.writeByte(')'); - } - try w.writeByte('{'); var index: u64 = 0; while (index < ai.len) : (index += 1) { @@ -1726,8 +1564,9 @@ pub const DeclGen = struct { if (index > 0) try w.writeAll(", "); try dg.renderValue(w, s, location); } - return w.writeByte('}'); + try w.writeByte('}'); } + try w.writeByte('}'); }, .anyframe_type, .opaque_type, @@ -1762,10 +1601,11 @@ pub const DeclGen = struct { w: *Writer, fn_val: Value, fn_align: InternPool.Alignment, - kind: CType.Kind, + kind: enum { forward_decl, definition }, name: union(enum) { nav: InternPool.Nav.Index, - fmt_ctype_pool_string: std.fmt.Alt(CTypePoolStringFormatData, formatCTypePoolString), + nav_never_tail: InternPool.Nav.Index, + nav_never_inline: InternPool.Nav.Index, @"export": struct { main_name: InternPool.NullTerminatedString, extern_name: InternPool.NullTerminatedString, @@ -1776,14 +1616,12 @@ pub const DeclGen = struct { const ip = &zcu.intern_pool; const fn_ty = fn_val.typeOf(zcu); - const fn_ctype = try dg.ctypeFromType(fn_ty, kind); const fn_info = zcu.typeToFunc(fn_ty).?; if (fn_info.cc == .naked) { switch (kind) { - .forward => try w.writeAll("zig_naked_decl "), - .complete => try w.writeAll("zig_naked "), - else => unreachable, + .forward_decl => try w.writeAll("zig_naked_decl "), + .definition => try w.writeAll("zig_naked "), } } @@ -1793,45 +1631,63 @@ pub const DeclGen = struct { if (func_analysis.branch_hint == .cold) try w.writeAll("zig_cold "); - if (kind == .complete and func_analysis.disable_intrinsics or dg.mod.no_builtin) + if (kind == .definition and func_analysis.disable_intrinsics or dg.mod.no_builtin) try w.writeAll("zig_no_builtin "); } if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); - var trailing = try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, fn_ctype, .suffix, .{}); + // While incomplete types are usually an acceptable substitute for "void", this is not true + // in function return types, where "void" is the only incomplete type permitted. + const actual_return_type: Type = .fromInterned(fn_info.return_type); + const effective_return_type: Type = switch (actual_return_type.classify(zcu)) { + .no_possible_value => .noreturn, + .one_possible_value, .fully_comptime => .void, // no runtime bits + .partially_comptime, .runtime => actual_return_type, // yes runtime bits + }; + const ret_cty: CType = try .lower(effective_return_type, &dg.ctype_deps, dg.arena, zcu); + try w.print("{f}", .{ret_cty.fmtDeclaratorPrefix(zcu)}); if (toCallingConvention(fn_info.cc, zcu)) |call_conv| { - try w.print("{f}zig_callconv({s})", .{ trailing, call_conv }); - trailing = .maybe_space; + try w.print("zig_callconv({s}) ", .{call_conv}); } - - try w.print("{f}", .{trailing}); switch (name) { - .nav => |nav| try dg.renderNavName(w, nav), - .fmt_ctype_pool_string => |fmt| try w.print("{f}", .{fmt}), + .nav => |nav| try renderNavName(w, nav, ip), + .nav_never_tail => |nav| try w.print("zig_never_tail_{f}__{d}", .{ + fmtIdentUnsolo(ip.getNav(nav).name.toSlice(ip)), @intFromEnum(nav), + }), + .nav_never_inline => |nav| try w.print("zig_never_inline_{f}__{d}", .{ + fmtIdentUnsolo(ip.getNav(nav).name.toSlice(ip)), @intFromEnum(nav), + }), .@"export" => |@"export"| try w.print("{f}", .{fmtIdentSolo(@"export".extern_name.toSlice(ip))}), } - - try renderTypeSuffix( - dg.pass, - &dg.ctype_pool, - zcu, - w, - fn_ctype, - .suffix, - CQualifiers.init(.{ .@"const" = switch (kind) { - .forward => false, - .complete => true, - else => unreachable, - } }), - ); + { + try w.writeByte('('); + var c_param_index: u32 = 0; + for (fn_info.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + if (!param_ty.hasRuntimeBits(zcu)) continue; + if (c_param_index != 0) try w.writeAll(", "); + try dg.renderTypeAndName(w, param_ty, .{ .arg = c_param_index }, .{ + .@"const" = kind == .definition, + }, .none); + c_param_index += 1; + } + if (fn_info.is_var_args) { + if (c_param_index != 0) try w.writeAll(", "); + try w.writeAll("..."); + } else if (c_param_index == 0) { + try w.writeAll("void"); + } + try w.writeByte(')'); + } + try w.print("{f}", .{ret_cty.fmtDeclaratorSuffixIgnoreNonstring(zcu)}); switch (kind) { - .forward => { + .forward_decl => { if (fn_align.toByteUnits()) |a| try w.print(" zig_align_fn({})", .{a}); switch (name) { - .nav, .fmt_ctype_pool_string => {}, + .nav, .nav_never_tail, .nav_never_inline => {}, .@"export" => |@"export"| { const extern_name = @"export".extern_name.toSlice(ip); const is_mangled = isMangledIdent(extern_name, true); @@ -1855,38 +1711,16 @@ pub const DeclGen = struct { }, } }, - .complete => {}, - else => unreachable, + .definition => {}, } } - fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { - defer std.debug.assert(dg.scratch.items.len == 0); - return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.pt, dg.mod, kind); - } - - fn byteSize(dg: *DeclGen, ctype: CType) u64 { - return ctype.byteSize(&dg.ctype_pool, dg.mod); - } - - /// Renders a type as a single identifier, generating intermediate typedefs - /// if necessary. - /// - /// This is guaranteed to be valid in both typedefs and declarations/definitions. - /// - /// There are three type formats in total that we support rendering: - /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | - /// |---------------------|-----------------|---------------------| - /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | - /// - fn renderType(dg: *DeclGen, w: *Writer, t: Type) Error!void { - try dg.renderCType(w, try dg.ctypeFromType(t, .complete)); - } - - fn renderCType(dg: *DeclGen, w: *Writer, ctype: CType) Error!void { - _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.pt.zcu, w, ctype, .suffix, .{}); + /// Renders the C lowering of the given Zig type to `w`. This renders the type name---to render + /// a declarator with this type, see instead `renderTypeAndName`. + fn renderType(dg: *DeclGen, w: *Writer, ty: Type) (Writer.Error || Allocator.Error)!void { + const zcu = dg.pt.zcu; + const cty: CType = try .lower(ty, &dg.ctype_deps, dg.arena, zcu); + try w.print("{f}", .{cty.fmtTypeName(zcu)}); } const IntCastContext = union(enum) { @@ -1990,7 +1824,7 @@ pub const DeclGen = struct { try w.writeAll("zig_lo_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); try w.writeByte('('); - try context.writeValue(dg, w, .FunctionArgument); + try context.writeValue(dg, w, .other); try w.writeByte(')'); } else if (dest_bits > 64 and src_bits <= 64) { try w.writeAll("zig_make_"); @@ -2001,7 +1835,7 @@ pub const DeclGen = struct { try dg.renderType(w, src_eff_ty); try w.writeByte(')'); } - try context.writeValue(dg, w, .FunctionArgument); + try context.writeValue(dg, w, .other); try w.writeByte(')'); } else { assert(!src_is_ptr); @@ -2010,23 +1844,16 @@ pub const DeclGen = struct { try w.writeAll("(zig_hi_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); try w.writeByte('('); - try context.writeValue(dg, w, .FunctionArgument); + try context.writeValue(dg, w, .other); try w.writeAll("), zig_lo_"); try dg.renderTypeForBuiltinFnName(w, src_eff_ty); try w.writeByte('('); - try context.writeValue(dg, w, .FunctionArgument); + try context.writeValue(dg, w, .other); try w.writeAll("))"); } } - /// Renders a type and name in field declaration/definition format. - /// - /// There are three type formats in total that we support rendering: - /// | Function | Example 1 (*u8) | Example 2 ([10]*u8) | - /// |---------------------|-----------------|---------------------| - /// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" | - /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | - /// + /// Renders to `w` a C declarator whose type is the C lowering of the given Zig type. fn renderTypeAndName( dg: *DeclGen, w: *Writer, @@ -2034,73 +1861,47 @@ pub const DeclGen = struct { name: CValue, qualifiers: CQualifiers, alignment: Alignment, - kind: CType.Kind, - ) !void { - try dg.renderCTypeAndName( - w, - try dg.ctypeFromType(ty, kind), - name, - qualifiers, - CType.AlignAs.fromAlignment(.{ - .@"align" = alignment, - .abi = ty.abiAlignment(dg.pt.zcu), - }), - ); - } - - fn renderCTypeAndName( - dg: *DeclGen, - w: *Writer, - ctype: CType, - name: CValue, - qualifiers: CQualifiers, - alignas: CType.AlignAs, ) !void { const zcu = dg.pt.zcu; - switch (alignas.abiOrder()) { - .lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}), + const ip = &zcu.intern_pool; + const cty: CType = try .lower(ty, &dg.ctype_deps, dg.arena, zcu); + try w.print("{f}", .{cty.fmtDeclaratorPrefix(zcu)}); + if (alignment != .none) switch (alignment.order(ty.abiAlignment(zcu))) { + .lt => try w.print("zig_under_align({d}) ", .{alignment.toByteUnits().?}), .eq => {}, - .gt => try w.print("zig_align({}) ", .{alignas.toByteUnits()}), - } - - try w.print("{f}", .{ - try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, qualifiers), - }); - try dg.writeName(w, name); - try renderTypeSuffix(dg.pass, &dg.ctype_pool, zcu, w, ctype, .suffix, .{}); - if (ctype.isNonString(&dg.ctype_pool)) try w.writeAll(" zig_nonstring"); - } - - fn writeName(dg: *DeclGen, w: *Writer, c_value: CValue) !void { - switch (c_value) { + .gt => try w.print("zig_align({d}) ", .{alignment.toByteUnits().?}), + }; + if (qualifiers.@"const") try w.writeAll("const "); + if (qualifiers.@"volatile") try w.writeAll("volatile "); + if (qualifiers.restrict) try w.writeAll("restrict "); + switch (name) { .new_local, .local => |i| try w.print("t{d}", .{i}), + .arg => |i| try w.print("a{d}", .{i}), .constant => |uav| try renderUavName(w, uav), - .nav => |nav| try dg.renderNavName(w, nav), + .nav => |nav| try renderNavName(w, nav, ip), .identifier => |ident| try w.print("{f}", .{fmtIdentSolo(ident)}), else => unreachable, } + try w.print("{f}", .{cty.fmtDeclaratorSuffix(zcu)}); } fn writeCValue(dg: *DeclGen, w: *Writer, c_value: CValue) Error!void { switch (c_value) { .none, .new_local, .local, .local_ref => unreachable, .constant => |uav| try renderUavName(w, uav), - .arg, .arg_array => unreachable, + .arg => unreachable, .field => |i| try w.print("f{d}", .{i}), - .nav => |nav| try dg.renderNavName(w, nav), + .nav => |nav| try renderNavName(w, nav, &dg.pt.zcu.intern_pool), .nav_ref => |nav| { try w.writeByte('&'); - try dg.renderNavName(w, nav); + try renderNavName(w, nav, &dg.pt.zcu.intern_pool); }, - .undef => |ty| try dg.renderUndefValue(w, ty, .Other), + .undef => |ty| try dg.renderUndefValue(w, ty, .other), .identifier => |ident| try w.print("{f}", .{fmtIdentSolo(ident)}), .payload_identifier => |ident| try w.print("{f}.{f}", .{ fmtIdentSolo("payload"), fmtIdentSolo(ident), }), - .ctype_pool_string => |string| try w.print("{f}", .{ - fmtCTypePoolString(string, &dg.ctype_pool, true), - }), } } @@ -2112,16 +1913,14 @@ pub const DeclGen = struct { .local_ref, .constant, .arg, - .arg_array, - .ctype_pool_string, => unreachable, .field => |i| try w.print("f{d}", .{i}), .nav => |nav| { try w.writeAll("(*"); - try dg.renderNavName(w, nav); + try renderNavName(w, nav, &dg.pt.zcu.intern_pool); try w.writeByte(')'); }, - .nav_ref => |nav| try dg.renderNavName(w, nav), + .nav_ref => |nav| try renderNavName(w, nav, &dg.pt.zcu.intern_pool), .undef => unreachable, .identifier => |ident| try w.print("(*{f})", .{fmtIdentSolo(ident)}), .payload_identifier => |ident| try w.print("(*{f}.{f})", .{ @@ -2157,8 +1956,6 @@ pub const DeclGen = struct { .field, .undef, .arg, - .arg_array, - .ctype_pool_string, => unreachable, .nav, .identifier, .payload_identifier => { try dg.writeCValue(w, c_value); @@ -2172,101 +1969,36 @@ pub const DeclGen = struct { try dg.writeCValue(w, member); } - fn renderFwdDecl( - dg: *DeclGen, - nav_index: InternPool.Nav.Index, - flags: packed struct { - is_const: bool, - is_threadlocal: bool, - linkage: std.builtin.GlobalLinkage, - visibility: std.builtin.SymbolVisibility, - }, - ) !void { - const zcu = dg.pt.zcu; - const ip = &zcu.intern_pool; - const nav = ip.getNav(nav_index); - const fwd = &dg.fwd_decl.writer; - try fwd.writeAll(switch (flags.linkage) { - .internal => "static ", - .strong, .weak, .link_once => "zig_extern ", - }); - switch (flags.linkage) { - .internal, .strong => {}, - .weak => try fwd.writeAll("zig_weak_linkage "), - .link_once => return dg.fail("TODO: CBE: implement linkonce linkage?", .{}), - } - switch (flags.linkage) { - .internal => {}, - .strong, .weak, .link_once => try fwd.print("zig_visibility({s}) ", .{@tagName(flags.visibility)}), - } - if (flags.is_threadlocal and !dg.mod.single_threaded) try fwd.writeAll("zig_threadlocal "); - try dg.renderTypeAndName( - fwd, - .fromInterned(nav.typeOf(ip)), - .{ .nav = nav_index }, - CQualifiers.init(.{ .@"const" = flags.is_const }), - nav.getAlignment(), - .complete, - ); - try fwd.writeAll(";\n"); - } - - fn renderNavName(dg: *DeclGen, w: *Writer, nav_index: InternPool.Nav.Index) !void { - const zcu = dg.pt.zcu; - const ip = &zcu.intern_pool; - const nav = ip.getNav(nav_index); - if (nav.getExtern(ip)) |@"extern"| { - try w.print("{f}", .{ - fmtIdentSolo(ip.getNav(@"extern".owner_nav).name.toSlice(ip)), - }); - } else { - // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), - // expand to 3x the length of its input, but let's cut it off at a much shorter limit. - const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip); - try w.print("{f}__{d}", .{ - fmtIdentUnsolo(fqn_slice[0..@min(fqn_slice.len, 100)]), - @intFromEnum(nav_index), - }); - } - } - - fn renderUavName(w: *Writer, uav: Value) !void { - try w.print("__anon_{d}", .{@intFromEnum(uav.toIntern())}); - } - fn renderTypeForBuiltinFnName(dg: *DeclGen, w: *Writer, ty: Type) !void { - try dg.renderCTypeForBuiltinFnName(w, try dg.ctypeFromType(ty, .complete)); - } - - fn renderCTypeForBuiltinFnName(dg: *DeclGen, w: *Writer, ctype: CType) !void { - switch (ctype.info(&dg.ctype_pool)) { - else => |ctype_info| try w.print("{c}{d}", .{ - if (ctype.isBool()) - signAbbrev(.unsigned) - else if (ctype.isInteger()) - signAbbrev(ctype.signedness(dg.mod)) - else if (ctype.isFloat()) - @as(u8, 'f') - else if (ctype_info == .pointer) - @as(u8, 'p') - else - return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for {s} type", .{@tagName(ctype_info)}), - if (ctype.isFloat()) ctype.floatActiveBits(dg.mod) else dg.byteSize(ctype) * 8, + const zcu = dg.pt.zcu; + switch (ty.zigTypeTag(zcu)) { + .bool => return w.writeAll("u8"), + .float => return w.print("f{d}", .{ty.floatBits(zcu.getTarget())}), + else => {}, + } + if (ty.isPtrAtRuntime(zcu)) { + return w.print("p{d}", .{zcu.getTarget().ptrBitWidth()}); + } + switch (CType.classifyInt(ty, zcu)) { + .void => unreachable, // opv + .small => try w.print("{c}{d}", .{ + signAbbrev(ty.intInfo(zcu).signedness), + ty.abiSize(zcu) * 8, }), - .array => try w.writeAll("big"), + .big => try w.writeAll("big"), } } fn renderBuiltinInfo(dg: *DeclGen, w: *Writer, ty: Type, info: BuiltinInfo) !void { - const ctype = try dg.ctypeFromType(ty, .complete); - const is_big = ctype.info(&dg.ctype_pool) == .array; + const pt = dg.pt; + const zcu = pt.zcu; + + const is_big = lowersToBigInt(ty, zcu); switch (info) { .none => if (!is_big) return, .bits => {}, } - const pt = dg.pt; - const zcu = pt.zcu; const int_info: std.builtin.Type.Int = if (ty.isAbiInt(zcu)) ty.intInfo(zcu) else .{ .signedness = .unsigned, .bits = @intCast(ty.bitSize(zcu)), @@ -2275,7 +2007,7 @@ pub const DeclGen = struct { if (is_big) try w.print(", {}", .{int_info.signedness == .signed}); try w.print(", {f}", .{try dg.fmtIntLiteralDec( try pt.intValue(if (is_big) .u16 else .u8, int_info.bits), - .FunctionArgument, + .other, )}); } @@ -2286,15 +2018,13 @@ pub const DeclGen = struct { base: u8, case: std.fmt.Case, ) !std.fmt.Alt(FormatIntLiteralContext, formatIntLiteral) { - const zcu = dg.pt.zcu; - const kind = loc.toCTypeKind(); - const ty = val.typeOf(zcu); + // If there's a bigint type involved, mark a dependency on it. + const cty: CType = try .lower(val.typeOf(dg.pt.zcu), &dg.ctype_deps, dg.arena, dg.pt.zcu); return .{ .data = .{ .dg = dg, - .int_info = ty.intInfo(zcu), - .kind = kind, - .ctype = try dg.ctypeFromType(ty, kind), + .loc = loc, .val = val, + .cty = cty, .base = base, .case = case, } }; @@ -2317,339 +2047,11 @@ pub const DeclGen = struct { } }; -const CTypeFix = enum { prefix, suffix }; -const CQualifiers = std.enums.EnumSet(enum { @"const", @"volatile", restrict }); -const Const = CQualifiers.init(.{ .@"const" = true }); -const RenderCTypeTrailing = enum { - no_space, - maybe_space, - - pub fn format(self: @This(), w: *Writer) Writer.Error!void { - switch (self) { - .no_space => {}, - .maybe_space => try w.writeByte(' '), - } - } +const CQualifiers = packed struct { + @"const": bool = false, + @"volatile": bool = false, + restrict: bool = false, }; -fn renderAlignedTypeName(w: *Writer, ctype: CType) !void { - try w.print("anon__aligned_{d}", .{@intFromEnum(ctype.index)}); -} -fn renderFwdDeclTypeName( - zcu: *Zcu, - w: *Writer, - ctype: CType, - fwd_decl: CType.Info.FwdDecl, - attributes: []const u8, -) !void { - const ip = &zcu.intern_pool; - try w.print("{s} {s}", .{ @tagName(fwd_decl.tag), attributes }); - switch (fwd_decl.name) { - .anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}), - .index => |index| try w.print("{f}__{d}", .{ - fmtIdentUnsolo(Type.fromInterned(index).containerTypeName(ip).toSlice(&zcu.intern_pool)), - @intFromEnum(index), - }), - } -} -fn renderTypePrefix( - pass: DeclGen.Pass, - ctype_pool: *const CType.Pool, - zcu: *Zcu, - w: *Writer, - ctype: CType, - parent_fix: CTypeFix, - qualifiers: CQualifiers, -) Writer.Error!RenderCTypeTrailing { - var trailing = RenderCTypeTrailing.maybe_space; - switch (ctype.info(ctype_pool)) { - .basic => |basic_info| try w.writeAll(@tagName(basic_info)), - - .pointer => |pointer_info| { - try w.print("{f}*", .{try renderTypePrefix( - pass, - ctype_pool, - zcu, - w, - pointer_info.elem_ctype, - .prefix, - CQualifiers.init(.{ - .@"const" = pointer_info.@"const", - .@"volatile" = pointer_info.@"volatile", - }), - )}); - trailing = .no_space; - }, - - .aligned => switch (pass) { - .nav => |nav| try w.print("nav__{d}_{d}", .{ - @intFromEnum(nav), @intFromEnum(ctype.index), - }), - .uav => |uav| try w.print("uav__{d}_{d}", .{ - @intFromEnum(uav), @intFromEnum(ctype.index), - }), - .flush => try renderAlignedTypeName(w, ctype), - }, - - .array, .vector => |sequence_info| { - const child_trailing = try renderTypePrefix( - pass, - ctype_pool, - zcu, - w, - sequence_info.elem_ctype, - .suffix, - qualifiers, - ); - switch (parent_fix) { - .prefix => { - try w.print("{f}(", .{child_trailing}); - return .no_space; - }, - .suffix => return child_trailing, - } - }, - - .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { - .anon => switch (pass) { - .nav => |nav| try w.print("nav__{d}_{d}", .{ - @intFromEnum(nav), @intFromEnum(ctype.index), - }), - .uav => |uav| try w.print("uav__{d}_{d}", .{ - @intFromEnum(uav), @intFromEnum(ctype.index), - }), - .flush => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), - }, - .index => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), - }, - - .aggregate => |aggregate_info| switch (aggregate_info.name) { - .anon => { - try w.print("{s} {s}", .{ - @tagName(aggregate_info.tag), - if (aggregate_info.@"packed") "zig_packed(" else "", - }); - try renderFields(zcu, w, ctype_pool, aggregate_info, 1); - if (aggregate_info.@"packed") try w.writeByte(')'); - }, - .fwd_decl => |fwd_decl| return renderTypePrefix( - pass, - ctype_pool, - zcu, - w, - fwd_decl, - parent_fix, - qualifiers, - ), - }, - - .function => |function_info| { - const child_trailing = try renderTypePrefix( - pass, - ctype_pool, - zcu, - w, - function_info.return_ctype, - .suffix, - .{}, - ); - switch (parent_fix) { - .prefix => { - try w.print("{f}(", .{child_trailing}); - return .no_space; - }, - .suffix => return child_trailing, - } - }, - } - var qualifier_it = qualifiers.iterator(); - while (qualifier_it.next()) |qualifier| { - try w.print("{f}{s}", .{ trailing, @tagName(qualifier) }); - trailing = .maybe_space; - } - return trailing; -} -fn renderTypeSuffix( - pass: DeclGen.Pass, - ctype_pool: *const CType.Pool, - zcu: *Zcu, - w: *Writer, - ctype: CType, - parent_fix: CTypeFix, - qualifiers: CQualifiers, -) Writer.Error!void { - switch (ctype.info(ctype_pool)) { - .basic, .aligned, .fwd_decl, .aggregate => {}, - .pointer => |pointer_info| try renderTypeSuffix( - pass, - ctype_pool, - zcu, - w, - pointer_info.elem_ctype, - .prefix, - .{}, - ), - .array, .vector => |sequence_info| { - switch (parent_fix) { - .prefix => try w.writeByte(')'), - .suffix => {}, - } - - try w.print("[{}]", .{sequence_info.len}); - try renderTypeSuffix(pass, ctype_pool, zcu, w, sequence_info.elem_ctype, .suffix, .{}); - }, - .function => |function_info| { - switch (parent_fix) { - .prefix => try w.writeByte(')'), - .suffix => {}, - } - - try w.writeByte('('); - var need_comma = false; - for (0..function_info.param_ctypes.len) |param_index| { - const param_type = function_info.param_ctypes.at(param_index, ctype_pool); - if (need_comma) try w.writeAll(", "); - need_comma = true; - const trailing = - try renderTypePrefix(pass, ctype_pool, zcu, w, param_type, .suffix, qualifiers); - if (qualifiers.contains(.@"const")) try w.print("{f}a{d}", .{ trailing, param_index }); - try renderTypeSuffix(pass, ctype_pool, zcu, w, param_type, .suffix, .{}); - } - if (function_info.varargs) { - if (need_comma) try w.writeAll(", "); - need_comma = true; - try w.writeAll("..."); - } - if (!need_comma) try w.writeAll("void"); - try w.writeByte(')'); - - try renderTypeSuffix(pass, ctype_pool, zcu, w, function_info.return_ctype, .suffix, .{}); - }, - } -} -fn renderFields( - zcu: *Zcu, - w: *Writer, - ctype_pool: *const CType.Pool, - aggregate_info: CType.Info.Aggregate, - indent: usize, -) !void { - try w.writeAll("{\n"); - for (0..aggregate_info.fields.len) |field_index| { - const field_info = aggregate_info.fields.at(field_index, ctype_pool); - try w.splatByteAll(' ', indent + 1); - switch (field_info.alignas.abiOrder()) { - .lt => { - std.debug.assert(aggregate_info.@"packed"); - if (field_info.alignas.@"align" != .@"1") try w.print("zig_under_align({}) ", .{ - field_info.alignas.toByteUnits(), - }); - }, - .eq => if (aggregate_info.@"packed" and field_info.alignas.@"align" != .@"1") - try w.print("zig_align({}) ", .{field_info.alignas.toByteUnits()}), - .gt => { - std.debug.assert(field_info.alignas.@"align" != .@"1"); - try w.print("zig_align({}) ", .{field_info.alignas.toByteUnits()}); - }, - } - const trailing = try renderTypePrefix( - .flush, - ctype_pool, - zcu, - w, - field_info.ctype, - .suffix, - .{}, - ); - try w.print("{f}{f}", .{ trailing, fmtCTypePoolString(field_info.name, ctype_pool, true) }); - try renderTypeSuffix(.flush, ctype_pool, zcu, w, field_info.ctype, .suffix, .{}); - if (field_info.ctype.isNonString(ctype_pool)) try w.writeAll(" zig_nonstring"); - try w.writeAll(";\n"); - } - try w.splatByteAll(' ', indent); - try w.writeByte('}'); -} - -pub fn genTypeDecl( - zcu: *Zcu, - w: *Writer, - global_ctype_pool: *const CType.Pool, - global_ctype: CType, - pass: DeclGen.Pass, - decl_ctype_pool: *const CType.Pool, - decl_ctype: CType, - found_existing: bool, -) !void { - switch (global_ctype.info(global_ctype_pool)) { - .basic, .pointer, .array, .vector, .function => {}, - .aligned => |aligned_info| { - if (!found_existing) { - std.debug.assert(aligned_info.alignas.abiOrder().compare(.lt)); - try w.print("typedef zig_under_align({d}) ", .{aligned_info.alignas.toByteUnits()}); - try w.print("{f}", .{try renderTypePrefix( - .flush, - global_ctype_pool, - zcu, - w, - aligned_info.ctype, - .suffix, - .{}, - )}); - try renderAlignedTypeName(w, global_ctype); - try renderTypeSuffix(.flush, global_ctype_pool, zcu, w, aligned_info.ctype, .suffix, .{}); - try w.writeAll(";\n"); - } - switch (pass) { - .nav, .uav => { - try w.writeAll("typedef "); - _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, w, global_ctype, .suffix, .{}); - try w.writeByte(' '); - _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, w, decl_ctype, .suffix, .{}); - try w.writeAll(";\n"); - }, - .flush => {}, - } - }, - .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { - .anon => switch (pass) { - .nav, .uav => { - try w.writeAll("typedef "); - _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, w, global_ctype, .suffix, .{}); - try w.writeByte(' '); - _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, w, decl_ctype, .suffix, .{}); - try w.writeAll(";\n"); - }, - .flush => {}, - }, - .index => |index| if (!found_existing) { - const ip = &zcu.intern_pool; - const ty: Type = .fromInterned(index); - _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, w, global_ctype, .suffix, .{}); - try w.writeByte(';'); - const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip); - if (!zcu.fileByIndex(file_scope).mod.?.strip) try w.print(" /* {f} */", .{ - ty.containerTypeName(ip).fmt(ip), - }); - try w.writeByte('\n'); - }, - }, - .aggregate => |aggregate_info| switch (aggregate_info.name) { - .anon => {}, - .fwd_decl => |fwd_decl| if (!found_existing) { - try renderFwdDeclTypeName( - zcu, - w, - fwd_decl, - fwd_decl.info(global_ctype_pool).fwd_decl, - if (aggregate_info.@"packed") "zig_packed(" else "", - ); - try w.writeByte(' '); - try renderFields(zcu, w, global_ctype_pool, aggregate_info, 0); - if (aggregate_info.@"packed") try w.writeByte(')'); - try w.writeAll(";\n"); - }, - }, - } -} pub fn genGlobalAsm(zcu: *Zcu, w: *Writer) !void { for (zcu.global_assembly.values()) |asm_source| { @@ -2657,200 +2059,128 @@ pub fn genGlobalAsm(zcu: *Zcu, w: *Writer) !void { } } -pub fn genErrDecls(o: *Object) Error!void { - const pt = o.dg.pt; - const zcu = pt.zcu; +pub fn genErrDecls( + zcu: *const Zcu, + w: *Writer, + slice_const_u8_sentinel_0_type_name: []const u8, +) Writer.Error!void { const ip = &zcu.intern_pool; - const w = &o.code.writer; - var max_name_len: usize = 0; - // do not generate an invalid empty enum when the global error set is empty const names = ip.global_error_set.getNamesFromMainThread(); + // Don't generate an invalid empty enum if the global error set is empty! if (names.len > 0) { - try w.writeAll("enum {"); - o.indent(); - try o.newline(); + try w.writeAll("enum {\n"); for (names, 1..) |name_nts, value| { - const name = name_nts.toSlice(ip); - max_name_len = @max(name.len, max_name_len); - const err_val = try pt.intern(.{ .err = .{ - .ty = .anyerror_type, - .name = name_nts, - } }); - try o.dg.renderValue(w, Value.fromInterned(err_val), .Other); - try w.print(" = {d}u,", .{value}); - try o.newline(); + try w.writeByte(' '); + try renderErrorName(w, name_nts.toSlice(ip)); + try w.print(" = {d}u,\n", .{value}); } - try o.outdent(); - try w.writeAll("};"); - try o.newline(); - } - const array_identifier = "zig_errorName"; - const name_prefix = array_identifier ++ "_"; - const name_buf = try o.dg.gpa.alloc(u8, name_prefix.len + max_name_len); - defer o.dg.gpa.free(name_buf); - - @memcpy(name_buf[0..name_prefix.len], name_prefix); - for (names) |name| { - const name_slice = name.toSlice(ip); - @memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice); - const identifier = name_buf[0 .. name_prefix.len + name_slice.len]; - - const name_ty = try pt.arrayType(.{ - .len = name_slice.len, - .child = .u8_type, - .sentinel = .zero_u8, - }); - const name_val = try pt.intern(.{ .aggregate = .{ - .ty = name_ty.toIntern(), - .storage = .{ .bytes = name.toString() }, - } }); - - try w.writeAll("static "); - try o.dg.renderTypeAndName( - w, - name_ty, - .{ .identifier = identifier }, - Const, - .none, - .complete, - ); - try w.writeAll(" = "); - try o.dg.renderValue(w, Value.fromInterned(name_val), .StaticInitializer); - try w.writeByte(';'); - try o.newline(); + try w.writeAll("};\n"); } - const name_array_ty = try pt.arrayType(.{ - .len = 1 + names.len, - .child = .slice_const_u8_sentinel_0_type, - }); - - try w.writeAll("static "); - try o.dg.renderTypeAndName( - w, - name_array_ty, - .{ .identifier = array_identifier }, - Const, - .none, - .complete, - ); - try w.writeAll(" = {"); - for (names, 1..) |name_nts, val| { + for (names) |name_nts| { const name = name_nts.toSlice(ip); - if (val > 1) try w.writeAll(", "); - try w.print("{{" ++ name_prefix ++ "{f}, {f}}}", .{ - fmtIdentUnsolo(name), - try o.dg.fmtIntLiteralDec(try pt.intValue(.usize, name.len), .StaticInitializer), - }); + try w.print( + "static uint8_t const zig_errorName_{f}[] = {f};\n", + .{ fmtIdentUnsolo(name), fmtStringLiteral(name, 0) }, + ); } - try w.writeAll("};"); - try o.newline(); + + try w.print( + "static {s} const zig_errorName[{d}] = {{", + .{ slice_const_u8_sentinel_0_type_name, names.len }, + ); + if (names.len > 0) try w.writeByte('\n'); + for (names) |name_nts| { + const name = name_nts.toSlice(ip); + try w.print( + " {{zig_errorName_{f},{d}}},\n", + .{ fmtIdentUnsolo(name), name.len }, + ); + } + try w.writeAll("};\n"); } -pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) Error!void { - const pt = o.dg.pt; - const zcu = pt.zcu; +pub fn genTagNameFn( + zcu: *const Zcu, + w: *Writer, + slice_const_u8_sentinel_0_type_name: []const u8, + enum_ty: Type, + enum_type_name: []const u8, +) Writer.Error!void { const ip = &zcu.intern_pool; - const ctype_pool = &o.dg.ctype_pool; - const w = &o.code.writer; - const key = lazy_fn.key_ptr.*; - const val = lazy_fn.value_ptr; - switch (key) { - .tag_name => |enum_ty_ip| { - const enum_ty: Type = .fromInterned(enum_ty_ip); - const name_slice_ty: Type = .slice_const_u8_sentinel_0; - - try w.writeAll("static "); - try o.dg.renderType(w, name_slice_ty); - try w.print(" {f}(", .{val.fn_name.fmt(lazy_ctype_pool)}); - try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete); - try w.writeAll(") {"); - o.indent(); - try o.newline(); - try w.writeAll("switch (tag) {"); - o.indent(); - try o.newline(); - const tag_names = enum_ty.enumFields(zcu); - for (0..tag_names.len) |tag_index| { - const tag_name = tag_names.get(ip)[tag_index]; - const tag_name_len = tag_name.length(ip); - const tag_val = try pt.enumValueFieldIndex(enum_ty, @intCast(tag_index)); - - const name_ty = try pt.arrayType(.{ - .len = tag_name_len, - .child = .u8_type, - .sentinel = .zero_u8, - }); - const name_val = try pt.intern(.{ .aggregate = .{ - .ty = name_ty.toIntern(), - .storage = .{ .bytes = tag_name.toString() }, - } }); - - try w.print("case {f}: {{", .{ - try o.dg.fmtIntLiteralDec(tag_val.intFromEnum(zcu), .Other), - }); - o.indent(); - try o.newline(); - try w.writeAll("static "); - try o.dg.renderTypeAndName(w, name_ty, .{ .identifier = "name" }, Const, .none, .complete); - try w.writeAll(" = "); - try o.dg.renderValue(w, Value.fromInterned(name_val), .StaticInitializer); - try w.writeByte(';'); - try o.newline(); - try w.writeAll("return ("); - try o.dg.renderType(w, name_slice_ty); - try w.print("){{{f}, {f}}};", .{ - fmtIdentUnsolo("name"), - try o.dg.fmtIntLiteralDec(try pt.intValue(.usize, tag_name_len), .Other), - }); - try o.newline(); - try o.outdent(); - try w.writeByte('}'); - try o.newline(); - } - try o.outdent(); - try w.writeByte('}'); - try o.newline(); - try airUnreach(o); - try o.outdent(); - try w.writeByte('}'); - try o.newline(); - }, - .never_tail, .never_inline => |fn_nav_index| { - const fn_val = zcu.navValue(fn_nav_index); - const fn_ctype = try o.dg.ctypeFromType(fn_val.typeOf(zcu), .complete); - const fn_info = fn_ctype.info(ctype_pool).function; - const fn_name = fmtCTypePoolString(val.fn_name, lazy_ctype_pool, true); - - const fwd = &o.dg.fwd_decl.writer; - try fwd.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(fwd, fn_val, ip.getNav(fn_nav_index).getAlignment(), .forward, .{ - .fmt_ctype_pool_string = fn_name, - }); - try fwd.writeAll(";\n"); - - try w.print("zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_val, .none, .complete, .{ - .fmt_ctype_pool_string = fn_name, - }); - try w.writeAll(" {"); - o.indent(); - try o.newline(); - try w.writeAll("return "); - try o.dg.renderNavName(w, fn_nav_index); - try w.writeByte('('); - for (0..fn_info.param_ctypes.len) |arg| { - if (arg > 0) try w.writeAll(", "); - try w.print("a{d}", .{arg}); - } - try w.writeAll(");"); - try o.newline(); - try o.outdent(); - try w.writeByte('}'); - try o.newline(); - }, + const loaded_enum = ip.loadEnumType(enum_ty.toIntern()); + assert(loaded_enum.field_names.len > 0); + if (Type.fromInterned(loaded_enum.int_tag_type).bitSize(zcu) > 64) { + @panic("TODO CBE: tagName for enum over 128 bits"); } + + try w.print("static {s} zig_tagName_{f}__{d}({s} tag) {{\n", .{ + slice_const_u8_sentinel_0_type_name, + fmtIdentUnsolo(loaded_enum.name.toSlice(ip)), + @intFromEnum(enum_ty.toIntern()), + enum_type_name, + }); + for (loaded_enum.field_names.get(ip), 0..) |field_name, field_index| { + try w.print(" static uint8_t const name{d}[] = {f};\n", .{ + field_index, fmtStringLiteral(field_name.toSlice(ip), 0), + }); + } + + try w.writeAll(" switch (tag) {\n"); + const field_values = loaded_enum.field_values.get(ip); + for (loaded_enum.field_names.get(ip), 0..) |field_name, field_index| { + const field_int: u64 = int: { + if (field_values.len == 0) break :int field_index; + const field_val: Value = .fromInterned(field_values[field_index]); + break :int field_val.toUnsignedInt(zcu); + }; + try w.print(" case {d}: return ({s}){{name{d},{d}}};\n", .{ + field_int, + slice_const_u8_sentinel_0_type_name, + field_index, + field_name.toSlice(ip).len, + }); + } + try w.writeAll( + \\ } + \\ zig_unreachable(); + \\} + \\ + ); +} + +pub fn genLazyCallModifierFn( + dg: *DeclGen, + fn_nav: InternPool.Nav.Index, + kind: enum { never_tail, never_inline }, + w: *Writer, +) Error!void { + const zcu = dg.pt.zcu; + const ip = &zcu.intern_pool; + + const fn_val = zcu.navValue(fn_nav); + + try w.print("static zig_{t} ", .{kind}); + try dg.renderFunctionSignature(w, fn_val, .none, .definition, switch (kind) { + .never_tail => .{ .nav_never_tail = fn_nav }, + .never_inline => .{ .nav_never_inline = fn_nav }, + }); + try w.writeAll(" {\n return "); + try renderNavName(w, fn_nav, ip); + try w.writeByte('('); + { + const func_type = ip.indexToKey(fn_val.typeOf(zcu).toIntern()).func_type; + var c_param_index: u32 = 0; + for (func_type.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + if (!param_ty.hasRuntimeBits(zcu)) continue; + if (c_param_index != 0) try w.writeAll(", "); + try w.print("a{d}", .{c_param_index}); + c_param_index += 1; + } + } + try w.writeAll(");\n}\n"); } pub fn generate( @@ -2869,110 +2199,109 @@ pub fn generate( const func = zcu.funcInfo(func_index); + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); + var function: Function = .{ .value_map = .init(gpa), .air = air.*, .liveness = liveness.*.?, .func_index = func_index, - .object = .{ - .dg = .{ - .gpa = gpa, - .pt = pt, - .mod = zcu.navFileScope(func.owner_nav).mod.?, - .error_msg = null, - .pass = .{ .nav = func.owner_nav }, - .is_naked_fn = Type.fromInterned(func.ty).fnCallingConvention(zcu) == .naked, - .expected_block = null, - .fwd_decl = .init(gpa), - .ctype_pool = .empty, - .scratch = .empty, - .uavs = .empty, - }, - .code_header = .init(gpa), - .code = .init(gpa), - .indent_counter = 0, + .dg = .{ + .gpa = gpa, + .arena = arena.allocator(), + .pt = pt, + .mod = zcu.navFileScope(func.owner_nav).mod.?, + .error_msg = null, + .owner_nav = func.owner_nav.toOptional(), + .is_naked_fn = Type.fromInterned(func.ty).fnCallingConvention(zcu) == .naked, + .expected_block = null, + .ctype_deps = .empty, + .uavs = .empty, }, - .lazy_fns = .empty, + .code = .init(gpa), + .indent_counter = 0, + .need_tag_name_funcs = .empty, + .need_never_tail_funcs = .empty, + .need_never_inline_funcs = .empty, }; defer { - function.object.code_header.deinit(); - function.object.code.deinit(); - function.object.dg.fwd_decl.deinit(); - function.object.dg.ctype_pool.deinit(gpa); - function.object.dg.scratch.deinit(gpa); - function.object.dg.uavs.deinit(gpa); + function.code.deinit(); + function.dg.ctype_deps.deinit(gpa); + function.dg.uavs.deinit(gpa); function.deinit(); } - try function.object.dg.ctype_pool.init(gpa); - genFunc(&function) catch |err| switch (err) { - error.AnalysisFail => return zcu.codegenFailMsg(func.owner_nav, function.object.dg.error_msg.?), - error.OutOfMemory => return error.OutOfMemory, + var fwd_decl: Writer.Allocating = .init(gpa); + defer fwd_decl.deinit(); + + var code_header: Writer.Allocating = .init(gpa); + defer code_header.deinit(); + + genFunc(&function, &fwd_decl.writer, &code_header.writer) catch |err| switch (err) { + error.AnalysisFail => return zcu.codegenFailMsg(func.owner_nav, function.dg.error_msg.?), error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, }; var mir: Mir = .{ - .uavs = .empty, - .code = &.{}, - .code_header = &.{}, .fwd_decl = &.{}, - .ctype_pool = .empty, - .lazy_fns = .empty, + .code_header = &.{}, + .code = &.{}, + .ctype_deps = function.dg.ctype_deps.move(), + .need_uavs = function.dg.uavs.move(), + .need_tag_name_funcs = function.need_tag_name_funcs.move(), + .need_never_tail_funcs = function.need_never_tail_funcs.move(), + .need_never_inline_funcs = function.need_never_inline_funcs.move(), }; errdefer mir.deinit(gpa); - mir.uavs = function.object.dg.uavs.move(); - mir.code_header = try function.object.code_header.toOwnedSlice(); - mir.code = try function.object.code.toOwnedSlice(); - mir.fwd_decl = try function.object.dg.fwd_decl.toOwnedSlice(); - mir.ctype_pool = function.object.dg.ctype_pool.move(); - mir.lazy_fns = function.lazy_fns.move(); + mir.fwd_decl = try fwd_decl.toOwnedSlice(); + mir.code_header = try code_header.toOwnedSlice(); + mir.code = try function.code.toOwnedSlice(); return mir; } -pub fn genFunc(f: *Function) Error!void { +pub fn genFunc(f: *Function, fwd_decl_writer: *Writer, header_writer: *Writer) Error!void { const tracy = trace(@src()); defer tracy.end(); - const o = &f.object; - const zcu = o.dg.pt.zcu; + const zcu = f.dg.pt.zcu; const ip = &zcu.intern_pool; - const gpa = o.dg.gpa; - const nav_index = o.dg.pass.nav; + const gpa = f.dg.gpa; + const nav_index = f.dg.owner_nav.unwrap().?; const nav_val = zcu.navValue(nav_index); const nav = ip.getNav(nav_index); - const fwd = &o.dg.fwd_decl.writer; - try fwd.writeAll("static "); - try o.dg.renderFunctionSignature( - fwd, + try fwd_decl_writer.writeAll("static "); + try f.dg.renderFunctionSignature( + fwd_decl_writer, nav_val, nav.status.fully_resolved.alignment, - .forward, + .forward_decl, .{ .nav = nav_index }, ); - try fwd.writeAll(";\n"); + try fwd_decl_writer.writeAll(";\n"); - const ch = &o.code_header.writer; if (nav.status.fully_resolved.@"linksection".toSlice(ip)) |s| - try ch.print("zig_linksection_fn({f}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderFunctionSignature( - ch, + try header_writer.print("zig_linksection_fn({f}) ", .{fmtStringLiteral(s, null)}); + try f.dg.renderFunctionSignature( + header_writer, nav_val, .none, - .complete, + .definition, .{ .nav = nav_index }, ); - try ch.writeAll(" {\n "); + try header_writer.writeAll(" {\n "); f.free_locals_map.clearRetainingCapacity(); const main_body = f.air.getMainBody(); - o.indent(); + f.indent(); try genBodyResolveState(f, undefined, &.{}, main_body, true); - try o.outdent(); - try o.code.writer.writeByte('}'); - try o.newline(); - if (o.dg.expected_block) |_| + try f.outdent(); + try f.code.writer.writeByte('}'); + try f.newline(); + if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); // Take advantage of the free_locals map to bucket locals per type. All @@ -2986,155 +2315,204 @@ pub fn genFunc(f: *Function) Error!void { if (!should_emit) continue; const local = f.locals.items[local_index]; log.debug("inserting local {d} into free_locals", .{local_index}); - const gop = try free_locals.getOrPut(gpa, local.getType()); + const gop = try free_locals.getOrPut(gpa, local); if (!gop.found_existing) gop.value_ptr.* = .{}; try gop.value_ptr.putNoClobber(gpa, local_index, {}); } const SortContext = struct { + zcu: *const Zcu, keys: []const LocalType, pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool { - const lhs_ty = ctx.keys[lhs_index]; - const rhs_ty = ctx.keys[rhs_index]; - return lhs_ty.alignas.order(rhs_ty.alignas).compare(.gt); + const lhs = ctx.keys[lhs_index]; + const rhs = ctx.keys[rhs_index]; + const lhs_align = switch (lhs.alignment) { + .none => lhs.type.abiAlignment(ctx.zcu), + else => |a| a, + }; + const rhs_align = switch (rhs.alignment) { + .none => rhs.type.abiAlignment(ctx.zcu), + else => |a| a, + }; + return Alignment.compareStrict(lhs_align, .gt, rhs_align); } }; - free_locals.sort(SortContext{ .keys = free_locals.keys() }); + free_locals.sort(SortContext{ + .zcu = zcu, + .keys = free_locals.keys(), + }); for (free_locals.values()) |list| { for (list.keys()) |local_index| { const local = f.locals.items[local_index]; - try o.dg.renderCTypeAndName(ch, local.ctype, .{ .local = local_index }, .{}, local.flags.alignas); - try ch.writeAll(";\n "); + try f.dg.renderTypeAndName(header_writer, local.type, .{ .local = local_index }, .{}, local.alignment); + try header_writer.writeAll(";\n "); } } } -pub fn genDecl(o: *Object) Error!void { +pub fn genDecl(dg: *DeclGen, w: *Writer) Error!void { const tracy = trace(@src()); defer tracy.end(); - const pt = o.dg.pt; + const pt = dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - const nav = ip.getNav(o.dg.pass.nav); + const nav = ip.getNav(dg.owner_nav.unwrap().?); const nav_ty: Type = .fromInterned(nav.typeOf(ip)); - if (!nav_ty.hasRuntimeBits(zcu)) return; - switch (ip.indexToKey(nav.status.fully_resolved.val)) { - .@"extern" => |@"extern"| { - if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{ - .is_const = @"extern".is_const, - .is_threadlocal = @"extern".is_threadlocal, - .linkage = @"extern".linkage, - .visibility = @"extern".visibility, - }); + const is_const: bool, const is_threadlocal: bool, const init_val: Value = switch (ip.indexToKey(nav.status.fully_resolved.val)) { + else => .{ true, false, .fromInterned(nav.status.fully_resolved.val) }, + .variable => |v| .{ false, v.is_threadlocal, .fromInterned(v.init) }, + .@"extern" => return, + }; - const fwd = &o.dg.fwd_decl.writer; - try fwd.writeAll("zig_extern "); - try o.dg.renderFunctionSignature( - fwd, - Value.fromInterned(nav.status.fully_resolved.val), - nav.status.fully_resolved.alignment, - .forward, - .{ .@"export" = .{ - .main_name = nav.name, - .extern_name = nav.name, - } }, - ); - try fwd.writeAll(";\n"); - }, - .variable => |variable| { - try o.dg.renderFwdDecl(o.dg.pass.nav, .{ - .is_const = false, - .is_threadlocal = variable.is_threadlocal, - .linkage = .internal, - .visibility = .default, - }); - const w = &o.code.writer; - if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal "); - if (nav.status.fully_resolved.@"linksection".toSlice(&zcu.intern_pool)) |s| - try w.print("zig_linksection({f}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderTypeAndName( - w, - nav_ty, - .{ .nav = o.dg.pass.nav }, - .{}, - nav.status.fully_resolved.alignment, - .complete, - ); - try w.writeAll(" = "); - try o.dg.renderValue(w, Value.fromInterned(variable.init), .StaticInitializer); - try w.writeByte(';'); - try o.newline(); - }, - else => try genDeclValue( - o, - Value.fromInterned(nav.status.fully_resolved.val), - .{ .nav = o.dg.pass.nav }, - nav.status.fully_resolved.alignment, - nav.status.fully_resolved.@"linksection", - ), - } -} - -pub fn genDeclValue( - o: *Object, - val: Value, - decl_c_value: CValue, - alignment: Alignment, - @"linksection": InternPool.OptionalNullTerminatedString, -) Error!void { - const zcu = o.dg.pt.zcu; - const ty = val.typeOf(zcu); - - const fwd = &o.dg.fwd_decl.writer; - try fwd.writeAll("static "); - try o.dg.renderTypeAndName(fwd, ty, decl_c_value, Const, alignment, .complete); - try fwd.writeAll(";\n"); - - const w = &o.code.writer; - if (@"linksection".toSlice(&zcu.intern_pool)) |s| + if (nav.status.fully_resolved.@"linksection".toSlice(ip)) |s| { try w.print("zig_linksection({f}) ", .{fmtStringLiteral(s, null)}); - try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete); + } + + // We don't bother underaligning---it's unnecessary and hurts compatibility. + const a = nav.status.fully_resolved.alignment; + if (a != .none and a.compareStrict(.gt, nav_ty.abiAlignment(zcu))) { + try w.print("zig_align({d}) ", .{a.toByteUnits().?}); + } + + try genDeclValue(dg, w, .{ + .name = .{ .nav = dg.owner_nav.unwrap().? }, + .@"const" = is_const, + .@"threadlocal" = is_threadlocal, + .init_val = init_val, + }); +} +pub fn genDeclFwd(dg: *DeclGen, w: *Writer) Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const pt = dg.pt; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; + const nav = ip.getNav(dg.owner_nav.unwrap().?); + const nav_ty: Type = .fromInterned(nav.typeOf(ip)); + + const is_const: bool, const is_threadlocal: bool, const init_val: Value = switch (ip.indexToKey(nav.status.fully_resolved.val)) { + else => .{ true, false, .fromInterned(nav.status.fully_resolved.val) }, + .variable => |v| .{ false, v.is_threadlocal, .fromInterned(v.init) }, + + .@"extern" => |@"extern"| switch (nav_ty.zigTypeTag(zcu)) { + .@"fn" => { + try w.writeAll("zig_extern "); + try dg.renderFunctionSignature( + w, + Value.fromInterned(nav.status.fully_resolved.val), + nav.status.fully_resolved.alignment, + .forward_decl, + .{ .@"export" = .{ + .main_name = nav.name, + .extern_name = nav.name, + } }, + ); + try w.writeAll(";\n"); + return; + }, + else => { + switch (@"extern".linkage) { + .internal => try w.writeAll("static "), + .strong => try w.print("zig_extern zig_visibility({t}) ", .{@"extern".visibility}), + .weak => try w.print("zig_extern zig_weak_linkage zig_visibility({t}) ", .{@"extern".visibility}), + .link_once => return dg.fail("TODO: CBE: implement linkonce linkage?", .{}), + } + if (@"extern".is_threadlocal and !dg.mod.single_threaded) { + try w.writeAll("zig_threadlocal "); + } + try dg.renderTypeAndName( + w, + .fromInterned(nav.typeOf(ip)), + .{ .nav = dg.owner_nav.unwrap().? }, + .{ .@"const" = @"extern".is_const }, + nav.getAlignment(), + ); + try w.writeAll(";\n"); + return; + }, + }, + }; + + // We don't bother underaligning---it's unnecessary and hurts compatibility. + const a = nav.status.fully_resolved.alignment; + if (a != .none and a.compareStrict(.gt, nav_ty.abiAlignment(zcu))) { + try w.print("zig_align({d}) ", .{a.toByteUnits().?}); + } + + try genDeclValueFwd(dg, w, .{ + .name = .{ .nav = dg.owner_nav.unwrap().? }, + .@"const" = is_const, + .@"threadlocal" = is_threadlocal, + .init_val = init_val, + }); +} +pub fn genDeclValue(dg: *DeclGen, w: *Writer, options: struct { + name: CValue, + @"const": bool, + @"threadlocal": bool, + init_val: Value, +}) Error!void { + const zcu = dg.pt.zcu; + const ty = options.init_val.typeOf(zcu); + if (options.@"threadlocal" and !dg.mod.single_threaded) { + try w.writeAll("zig_threadlocal "); + } + try dg.renderTypeAndName(w, ty, options.name, .{ .@"const" = options.@"const" }, .none); try w.writeAll(" = "); - try o.dg.renderValue(w, val, .StaticInitializer); - try w.writeByte(';'); - try o.newline(); + try dg.renderValue(w, options.init_val, .static_initializer); + try w.writeAll(";\n"); +} +pub fn genDeclValueFwd(dg: *DeclGen, w: *Writer, options: struct { + name: CValue, + @"const": bool, + @"threadlocal": bool, + init_val: Value, +}) Error!void { + const zcu = dg.pt.zcu; + const ty = options.init_val.typeOf(zcu); + try w.writeAll("static "); + if (options.@"threadlocal" and !dg.mod.single_threaded) { + try w.writeAll("zig_threadlocal "); + } + try dg.renderTypeAndName(w, ty, options.name, .{ .@"const" = options.@"const" }, .none); + try w.writeAll(";\n"); } -pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const Zcu.Export.Index) !void { +pub fn genExports(dg: *DeclGen, w: *Writer, exported: Zcu.Exported, export_indices: []const Zcu.Export.Index) !void { const zcu = dg.pt.zcu; const ip = &zcu.intern_pool; - const fwd = &dg.fwd_decl.writer; const main_name = export_indices[0].ptr(zcu).opts.name; - try fwd.writeAll("#define "); + try w.writeAll("#define "); switch (exported) { - .nav => |nav| try dg.renderNavName(fwd, nav), - .uav => |uav| try DeclGen.renderUavName(fwd, Value.fromInterned(uav)), + .nav => |nav| try renderNavName(w, nav, ip), + .uav => |uav| try renderUavName(w, Value.fromInterned(uav)), } - try fwd.writeByte(' '); - try fwd.print("{f}", .{fmtIdentSolo(main_name.toSlice(ip))}); - try fwd.writeByte('\n'); + try w.writeByte(' '); + try w.print("{f}", .{fmtIdentSolo(main_name.toSlice(ip))}); + try w.writeByte('\n'); const exported_val = exported.getValue(zcu); if (ip.isFunctionType(exported_val.typeOf(zcu).toIntern())) return for (export_indices) |export_index| { const @"export" = export_index.ptr(zcu); - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn "); + try w.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try w.writeAll("zig_weak_linkage_fn "); try dg.renderFunctionSignature( - fwd, + w, exported.getValue(zcu), exported.getAlign(zcu), - .forward, + .forward_decl, .{ .@"export" = .{ .main_name = main_name, .extern_name = @"export".opts.name, } }, ); - try fwd.writeAll(";\n"); + try w.writeAll(";\n"); }; const is_const = switch (ip.indexToKey(exported_val.toIntern())) { .func => unreachable, @@ -3144,39 +2522,38 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const }; for (export_indices) |export_index| { const @"export" = export_index.ptr(zcu); - try fwd.writeAll("zig_extern "); - if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage "); - if (@"export".opts.section.toSlice(ip)) |s| try fwd.print("zig_linksection({f}) ", .{ + try w.writeAll("zig_extern "); + if (@"export".opts.linkage == .weak) try w.writeAll("zig_weak_linkage "); + if (@"export".opts.section.toSlice(ip)) |s| try w.print("zig_linksection({f}) ", .{ fmtStringLiteral(s, null), }); const extern_name = @"export".opts.name.toSlice(ip); const is_mangled = isMangledIdent(extern_name, true); const is_export = @"export".opts.name != main_name; try dg.renderTypeAndName( - fwd, + w, exported.getValue(zcu).typeOf(zcu), .{ .identifier = extern_name }, - CQualifiers.init(.{ .@"const" = is_const }), + .{ .@"const" = is_const }, exported.getAlign(zcu), - .complete, ); if (is_mangled and is_export) { - try fwd.print(" zig_mangled_export({f}, {f}, {f})", .{ + try w.print(" zig_mangled_export({f}, {f}, {f})", .{ fmtIdentSolo(extern_name), fmtStringLiteral(extern_name, null), fmtStringLiteral(main_name.toSlice(ip), null), }); } else if (is_mangled) { - try fwd.print(" zig_mangled({f}, {f})", .{ + try w.print(" zig_mangled({f}, {f})", .{ fmtIdentSolo(extern_name), fmtStringLiteral(extern_name, null), }); } else if (is_export) { - try fwd.print(" zig_export({f}, {f})", .{ + try w.print(" zig_export({f}, {f})", .{ fmtStringLiteral(main_name.toSlice(ip), null), fmtStringLiteral(extern_name, null), }); } - try fwd.writeAll(";\n"); + try w.writeAll(";\n"); } } @@ -3185,15 +2562,15 @@ pub fn genExports(dg: *DeclGen, exported: Zcu.Exported, export_indices: []const /// have been added to `free_locals_map`. For a version of this function that restores this state, /// see `genBodyResolveState`. fn genBody(f: *Function, body: []const Air.Inst.Index) Error!void { - const w = &f.object.code.writer; + const w = &f.code.writer; if (body.len == 0) { try w.writeAll("{}"); } else { try w.writeByte('{'); - f.object.indent(); - try f.object.newline(); + f.indent(); + try f.newline(); try genBodyInner(f, body); - try f.object.outdent(); + try f.outdent(); try w.writeByte('}'); } } @@ -3207,13 +2584,13 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) Error!void { fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []const Air.Inst.Index, body: []const Air.Inst.Index, inner: bool) Error!void { if (body.len == 0) { // Don't go to the expense of cloning everything! - if (!inner) try f.object.code.writer.writeAll("{}"); + if (!inner) try f.code.writer.writeAll("{}"); return; } // TODO: we can probably avoid the copies in some other common cases too. - const gpa = f.object.dg.gpa; + const gpa = f.dg.gpa; // Save the original value_map and free_locals_map so that we can restore them after the body. var old_value_map = try f.value_map.clone(); @@ -3254,13 +2631,13 @@ fn genBodyResolveState(f: *Function, inst: Air.Inst.Index, leading_deaths: []con } fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void { - const zcu = f.object.dg.pt.zcu; + const zcu = f.dg.pt.zcu; const ip = &zcu.intern_pool; const air_tags = f.air.instructions.items(.tag); const air_datas = f.air.instructions.items(.data); for (body) |inst| { - if (f.object.dg.expected_block) |_| + if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; @@ -3529,8 +2906,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void { .ret => return airRet(f, inst, false), .ret_safe => return airRet(f, inst, false), // TODO .ret_load => return airRet(f, inst, true), - .trap => return airTrap(f, &f.object.code.writer), - .unreach => return airUnreach(&f.object), + .trap => return airTrap(f), + .unreach => return airUnreach(f), // Instructions which may be `noreturn`. .block => res: { @@ -3573,21 +2950,21 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [ const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); if (is_ptr) { try w.writeByte('&'); try f.writeCValueDerefMember(w, operand, .{ .identifier = field_name }); } else try f.writeCValueMember(w, operand, .{ .identifier = field_name }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.pt.zcu; + const zcu = f.dg.pt.zcu; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; assert(inst_ty.hasRuntimeBits(zcu)); @@ -3596,21 +2973,24 @@ fn airPtrElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try f.writeCValue(w, ptr, .Other); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); + switch (f.typeOf(bin_op.lhs).ptrSize(zcu)) { + .one => try f.writeCValueDerefMember(w, ptr, .{ .identifier = "array" }), + .many, .c => try f.writeCValue(w, ptr, .other), + .slice => unreachable, + } try w.writeByte('['); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.end(f, w); + try f.writeCValue(w, index, .other); + try w.writeAll("];"); + try f.newline(); return local; } fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3623,28 +3003,26 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try w.writeByte('('); - try f.renderType(w, inst_ty); - try w.writeByte(')'); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); try w.writeByte('&'); if (ptr_ty.ptrSize(zcu) == .one) { - // It's a pointer to an array, so we need to de-reference. - try f.writeCValueDeref(w, ptr); - } else try f.writeCValue(w, ptr, .Other); + // `*[n]T` was turned into a pointer to `struct { T array[n]; }` + try f.writeCValueDerefMember(w, ptr, .{ .identifier = "array" }); + } else { + try f.writeCValue(w, ptr, .other); + } try w.writeByte('['); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.end(f, w); + try f.writeCValue(w, index, .other); + try w.writeAll("];"); + try f.newline(); return local; } fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.pt.zcu; + const zcu = f.dg.pt.zcu; const inst_ty = f.typeOfIndex(inst); const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; assert(inst_ty.hasRuntimeBits(zcu)); @@ -3653,21 +3031,20 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); try f.writeCValueMember(w, slice, .{ .identifier = "ptr" }); try w.writeByte('['); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.end(f, w); + try f.writeCValue(w, index, .other); + try w.writeAll("];"); + try f.newline(); return local; } fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -3681,22 +3058,21 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); try w.writeByte('&'); try f.writeCValueMember(w, slice, .{ .identifier = "ptr" }); try w.writeByte('['); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.end(f, w); + try f.writeCValue(w, index, .other); + try w.writeAll("];"); + try f.newline(); return local; } fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { - const zcu = f.object.dg.pt.zcu; + const zcu = f.dg.pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const inst_ty = f.typeOfIndex(inst); assert(inst_ty.hasRuntimeBits(zcu)); @@ -3705,32 +3081,28 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { const index = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try f.writeCValue(w, array, .Other); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); + try f.writeCValueMember(w, array, .{ .identifier = "array" }); try w.writeByte('['); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.end(f, w); + try f.writeCValue(w, index, .other); + try w.writeAll("];"); + try f.newline(); return local; } fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(elem_ty, .complete), - .alignas = CType.AlignAs.fromAlignment(.{ - .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), - }), + .type = elem_ty, + .alignment = inst_ty.ptrInfo(zcu).flags.alignment, }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); try f.allocs.put(zcu.gpa, local.new_local, true); @@ -3741,11 +3113,11 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { // For packed aggregates, we zero-initialize to try and work around a design flaw // related to how `packed`, `undefined`, and RLS interact. See comment in `airStore` // for details. - const w = &f.object.code.writer; + const w = &f.code.writer; try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local}); try f.renderType(w, elem_ty); try w.writeAll("));"); - try f.object.newline(); + try f.newline(); }, .auto, .@"extern" => {}, }, @@ -3756,18 +3128,15 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { } fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(elem_ty, .complete), - .alignas = CType.AlignAs.fromAlignment(.{ - .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, - .abi = elem_ty.abiAlignment(zcu), - }), + .type = elem_ty, + .alignment = inst_ty.ptrInfo(zcu).flags.alignment, }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); try f.allocs.put(zcu.gpa, local.new_local, true); @@ -3778,11 +3147,11 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { // For packed aggregates, we zero-initialize to try and work around a design flaw // related to how `packed`, `undefined`, and RLS interact. See comment in `airStore` // for details. - const w = &f.object.code.writer; + const w = &f.code.writer; try w.print("memset(&t{d}, 0x00, sizeof(", .{local.new_local}); try f.renderType(w, elem_ty); try w.writeAll("));"); - try f.object.newline(); + try f.newline(); }, .auto, .@"extern" => {}, }, @@ -3793,24 +3162,18 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { } fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { - const inst_ty = f.typeOfIndex(inst); - const inst_ctype = try f.ctypeFromType(inst_ty, .parameter); - const i = f.next_arg_index; f.next_arg_index += 1; - const result: CValue = if (inst_ctype.eql(try f.ctypeFromType(inst_ty, .complete))) - .{ .arg = i } - else - .{ .arg_array = i }; + const result: CValue = .{ .arg = i }; if (f.liveness.isUnused(inst)) { - const w = &f.object.code.writer; + const w = &f.code.writer; try w.writeByte('('); try f.renderType(w, .void); try w.writeByte(')'); - try f.writeCValue(w, result, .Other); + try f.writeCValue(w, result, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return .none; } @@ -3818,7 +3181,7 @@ fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { } fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -3841,94 +3204,69 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; - const is_array = lowersToArray(src_ty, zcu); - const need_memcpy = !is_aligned or is_array; - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, src_ty); const v = try Vectorize.start(f, inst, w, ptr_ty); - if (need_memcpy) { - try w.writeAll("memcpy("); - if (!is_array) try w.writeByte('&'); - try f.writeCValue(w, local, .Other); + if (!is_aligned) { + try w.writeAll("memcpy(&"); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(", (const char *)"); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); try w.writeAll(", sizeof("); try f.renderType(w, src_ty); try w.writeAll("))"); } else { - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); try f.writeCValueDeref(w, operand); try v.elem(f, w); } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; } fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !void { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const op_inst = un_op.toIndex(); const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType(zcu) else op_ty; - const ret_ctype = try f.ctypeFromType(ret_ty, .parameter); if (op_inst != null and f.air.instructions.items(.tag)[@intFromEnum(op_inst.?)] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (ret_ctype.index != .void) { + } else if (ret_ty.hasRuntimeBits(zcu)) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - var deref = is_ptr; - const is_array = lowersToArray(ret_ty, zcu); - const ret_val = if (is_array) ret_val: { - const array_local = try f.allocAlignedLocal(inst, .{ - .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), - }); - try w.writeAll("memcpy("); - try f.writeCValueMember(w, array_local, .{ .identifier = "array" }); - try w.writeAll(", "); - if (deref) - try f.writeCValueDeref(w, operand) - else - try f.writeCValue(w, operand, .FunctionArgument); - deref = false; - try w.writeAll(", sizeof("); - try f.renderType(w, ret_ty); - try w.writeAll("));"); - try f.object.newline(); - break :ret_val array_local; - } else operand; try w.writeAll("return "); - if (deref) - try f.writeCValueDeref(w, ret_val) - else - try f.writeCValue(w, ret_val, .Other); - try w.writeAll(";\n"); - if (is_array) { - try freeLocal(f, inst, ret_val.new_local, null); + if (is_ptr) { + try f.writeCValueDeref(w, operand); + } else switch (operand) { + // Instead of 'return &local', emit 'return undefined'. + .local_ref => try f.dg.renderUndefValue(w, ret_ty, .other), + else => try f.writeCValue(w, operand, .other), } + try w.writeAll(";\n"); } else { try reap(f, inst, &.{un_op}); // Not even allowed to return void in a naked function. - if (!f.object.dg.is_naked_fn) try w.writeAll("return;\n"); + if (!f.dg.is_naked_fn) try w.writeAll("return;\n"); } } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -3940,23 +3278,23 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(zcu); - if (f.object.dg.intCastIsNoop(inst_scalar_ty, scalar_ty)) return f.moveCValue(inst, inst_ty, operand); + if (f.dg.intCastIsNoop(inst_scalar_ty, scalar_ty)) return f.moveCValue(inst, inst_ty, operand); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(scalar_ty, .complete)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); - try f.renderIntCast(w, inst_scalar_ty, operand, v, scalar_ty, .Other); - try a.end(f, w); + try w.writeAll(" = "); + try f.renderIntCast(w, inst_scalar_ty, operand, v, scalar_ty, .other); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); return local; } fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -3978,13 +3316,12 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const need_mask = dest_bits < 8 or !std.math.isPowerOfTwo(dest_bits); if (!need_cast and !need_lo and !need_mask) return f.moveCValue(inst, inst_ty, operand); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_scalar_ty, .complete)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); + try w.writeAll(" = "); if (need_cast) { try w.writeByte('('); try f.renderType(w, inst_scalar_ty); @@ -3992,18 +3329,18 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { } if (need_lo) { try w.writeAll("zig_lo_"); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); } if (!need_mask) { - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); } else switch (dest_int_info.signedness) { .unsigned => { try w.writeAll("zig_and_"); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); - try f.writeCValue(w, operand, .FunctionArgument); + try f.writeCValue(w, operand, .other); try v.elem(f, w); try w.print(", {f})", .{ try f.fmtIntLiteralHex(try inst_scalar_ty.maxIntScalar(pt, scalar_ty)), @@ -4015,7 +3352,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { const shift_val = try pt.intValue(.u8, c_bits - dest_bits); try w.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); if (c_bits == 128) { try w.print("(zig_bitCast_i{d}(", .{c_bits}); } else { @@ -4027,7 +3364,7 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { } else { try w.print("(uint{d}_t)", .{c_bits}); } - try f.writeCValue(w, operand, .FunctionArgument); + try f.writeCValue(w, operand, .other); try v.elem(f, w); if (c_bits == 128) try w.writeByte(')'); try w.print(", {f})", .{try f.fmtIntLiteralDec(shift_val)}); @@ -4036,13 +3373,14 @@ fn airTrunc(f: *Function, inst: Air.Inst.Index) !CValue { }, } if (need_lo) try w.writeByte(')'); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); return local; } fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; // *a = b; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -4060,7 +3398,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |v| v.isUndef(zcu) else false; - const w = &f.object.code.writer; + const w = &f.code.writer; if (val_is_undef) { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); if (safety and ptr_info.packed_offset.host_size == 0) { @@ -4080,11 +3418,11 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { }, }; try w.writeAll("memset("); - try f.writeCValue(w, ptr_val, .FunctionArgument); + try f.writeCValue(w, ptr_val, .other); try w.print(", {s}, sizeof(", .{byte_str}); try f.renderType(w, .fromInterned(ptr_info.child)); try w.writeAll("));"); - try f.object.newline(); + try f.newline(); } return .none; } @@ -4093,46 +3431,29 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; - const is_array = lowersToArray(.fromInterned(ptr_info.child), zcu); - const need_memcpy = !is_aligned or is_array; const src_val = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const src_scalar_ctype = try f.ctypeFromType(src_ty.scalarType(zcu), .complete); - if (need_memcpy) { + if (!is_aligned) { // For this memcpy to safely work we need the rhs to have the same // underlying type as the lhs (i.e. they must both be arrays of the same underlying type). assert(src_ty.eql(.fromInterned(ptr_info.child), zcu)); - // If the source is a constant, writeCValue will emit a brace initialization - // so work around this by initializing into new local. - // TODO this should be done by manually initializing elements of the dest array - const array_src = if (src_val == .constant) blk: { - const new_local = try f.allocLocal(inst, src_ty); - try f.writeCValue(w, new_local, .Other); - try w.writeAll(" = "); - try f.writeCValue(w, src_val, .Other); - try w.writeByte(';'); - try f.object.newline(); - - break :blk new_local; - } else src_val; - const v = try Vectorize.start(f, inst, w, ptr_ty); try w.writeAll("memcpy((char *)"); - try f.writeCValue(w, ptr_val, .FunctionArgument); + try f.writeCValue(w, ptr_val, .other); try v.elem(f, w); - try w.writeAll(", "); - if (!is_array) try w.writeByte('&'); - try f.writeCValue(w, array_src, .FunctionArgument); + try w.writeAll(", &"); + switch (src_val) { + .constant => |val| try f.dg.renderValueAsLvalue(w, val), + else => try f.writeCValue(w, src_val, .other), + } try v.elem(f, w); try w.writeAll(", sizeof("); try f.renderType(w, src_ty); - try w.writeAll("))"); - try f.freeCValue(inst, array_src); - try w.writeByte(';'); - try f.object.newline(); + try w.writeAll("));"); + try f.newline(); try v.end(f, inst, w); } else { switch (ptr_val) { @@ -4144,20 +3465,20 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { else => {}, } const v = try Vectorize.start(f, inst, w, ptr_ty); - const a = try Assignment.start(f, w, src_scalar_ctype); try f.writeCValueDeref(w, ptr_val); try v.elem(f, w); - try a.assign(f, w); - try f.writeCValue(w, src_val, .Other); + try w.writeAll(" = "); + try f.writeCValue(w, src_val, .other); try v.elem(f, w); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); } return .none; } fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4170,7 +3491,7 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: const operand_ty = f.typeOf(bin_op.lhs); const scalar_ty = operand_ty.scalarType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); try f.writeCValueMember(w, local, .{ .field = 1 }); @@ -4178,26 +3499,26 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: try w.writeAll(" = zig_"); try w.writeAll(operation); try w.writeAll("o_"); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeAll("(&"); try f.writeCValueMember(w, local, .{ .field = 0 }); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, lhs, .FunctionArgument); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, rhs, .FunctionArgument); + try f.writeCValue(w, rhs, .other); if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, w); - try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); + try f.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; } fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand_ty = f.typeOf(ty_op.operand); @@ -4209,17 +3530,17 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); try w.writeByte('!'); - try f.writeCValue(w, op, .Other); + try f.writeCValue(w, op, .other); try v.elem(f, w); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; @@ -4232,7 +3553,7 @@ fn airBinOp( operation: []const u8, info: BuiltinInfo, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); @@ -4246,21 +3567,21 @@ fn airBinOp( const inst_ty = f.typeOfIndex(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeByte(' '); try w.writeAll(operator); try w.writeByte(' '); - try f.writeCValue(w, rhs, .Other); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; @@ -4272,7 +3593,7 @@ fn airCmpOp( data: anytype, operator: std.math.CompareOperator, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const lhs_ty = f.typeOf(data.lhs); const scalar_ty = lhs_ty.scalarType(zcu); @@ -4297,26 +3618,26 @@ fn airCmpOp( const rhs_ty = f.typeOf(data.rhs); const need_cast = lhs_ty.isSinglePointer(zcu) or rhs_ty.isSinglePointer(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, lhs_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(scalar_ty, .complete)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); + try w.writeAll(" = "); if (lhs != .undef and lhs.eql(rhs)) try w.writeAll(switch (operator) { .lt, .neq, .gt => "false", .lte, .eq, .gte => "true", }) else { if (need_cast) try w.writeAll("(void*)"); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(compareOperatorC(operator)); if (need_cast) try w.writeAll("(void*)"); - try f.writeCValue(w, rhs, .Other); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); } - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); return local; @@ -4327,9 +3648,8 @@ fn airEquality( inst: Air.Inst.Index, operator: std.math.CompareOperator, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const ctype_pool = &f.object.dg.ctype_pool; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); @@ -4350,54 +3670,64 @@ fn airEquality( const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; - const local = try f.allocLocal(inst, .bool); - const a = try Assignment.start(f, w, .bool); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - - const operand_ctype = try f.ctypeFromType(operand_ty, .complete); - if (lhs != .undef and lhs.eql(rhs)) try w.writeAll(switch (operator) { - .lt, .lte, .gte, .gt => unreachable, - .neq => "false", - .eq => "true", - }) else switch (operand_ctype.info(ctype_pool)) { - .basic, .pointer => { - try f.writeCValue(w, lhs, .Other); - try w.writeAll(compareOperatorC(operator)); - try f.writeCValue(w, rhs, .Other); - }, - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| if (aggregate.fields.len == 2 and - (aggregate.fields.at(0, ctype_pool).name.index == .is_null or - aggregate.fields.at(1, ctype_pool).name.index == .is_null)) - { - try f.writeCValueMember(w, lhs, .{ .identifier = "is_null" }); - try w.writeAll(" || "); - try f.writeCValueMember(w, rhs, .{ .identifier = "is_null" }); - try w.writeAll(" ? "); - try f.writeCValueMember(w, lhs, .{ .identifier = "is_null" }); - try w.writeAll(compareOperatorC(operator)); - try f.writeCValueMember(w, rhs, .{ .identifier = "is_null" }); - try w.writeAll(" : "); - try f.writeCValueMember(w, lhs, .{ .identifier = "payload" }); - try w.writeAll(compareOperatorC(operator)); - try f.writeCValueMember(w, rhs, .{ .identifier = "payload" }); - } else for (0..aggregate.fields.len) |field_index| { - if (field_index > 0) try w.writeAll(switch (operator) { - .lt, .lte, .gte, .gt => unreachable, - .eq => " && ", - .neq => " || ", - }); - const field_name: CValue = .{ - .ctype_pool_string = aggregate.fields.at(field_index, ctype_pool).name, - }; - try f.writeCValueMember(w, lhs, field_name); - try w.writeAll(compareOperatorC(operator)); - try f.writeCValueMember(w, rhs, field_name); - }, + if (lhs.eql(rhs)) { + // Avoid emitting a tautological comparison. + return .{ .constant = .makeBool(switch (operator) { + .eq, .lte, .gte => true, + .neq, .lt, .gt => false, + }) }; } - try a.end(f, w); + + const w = &f.code.writer; + const local = try f.allocLocal(inst, .bool); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); + + switch (operand_ty.zigTypeTag(zcu)) { + .optional => switch (CType.classifyOptional(operand_ty, zcu)) { + .npv_payload => unreachable, // opv optional + + .error_set, .ptr_like => {}, + + .slice_like => unreachable, // equality is not defined on slices + + .opv_payload => { + try f.writeCValueMember(w, lhs, .{ .identifier = "is_null" }); + try w.writeAll(compareOperatorC(operator)); + try f.writeCValueMember(w, rhs, .{ .identifier = "is_null" }); + try w.writeByte(';'); + try f.newline(); + return local; + }, + + .@"struct" => { + // `lhs.is_null || rhs.is_null ? lhs.is_null == rhs.is_null : lhs.payload == rhs.payload` + try f.writeCValueMember(w, lhs, .{ .identifier = "is_null" }); + try w.writeAll(" || "); + try f.writeCValueMember(w, rhs, .{ .identifier = "is_null" }); + try w.writeAll(" ? "); + try f.writeCValueMember(w, lhs, .{ .identifier = "is_null" }); + try w.writeAll(compareOperatorC(operator)); + try f.writeCValueMember(w, rhs, .{ .identifier = "is_null" }); + try w.writeAll(" : "); + try f.writeCValueMember(w, lhs, .{ .identifier = "payload" }); + try w.writeAll(compareOperatorC(operator)); + try f.writeCValueMember(w, rhs, .{ .identifier = "payload" }); + try w.writeByte(';'); + try f.newline(); + return local; + }, + }, + .bool, .int, .pointer, .@"enum", .error_set => {}, + .@"struct", .@"union" => assert(operand_ty.containerLayout(zcu) == .@"packed"), + else => unreachable, + } + + try f.writeCValue(w, lhs, .other); + try w.writeAll(compareOperatorC(operator)); + try f.writeCValue(w, rhs, .other); + try w.writeByte(';'); + try f.newline(); return local; } @@ -4408,18 +3738,18 @@ fn airCmpLtErrorsLen(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, .bool); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = "); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try w.print(" < sizeof({f}) / sizeof(*{0f});", .{fmtIdentSolo("zig_errorName")}); - try f.object.newline(); + try f.newline(); return local; } fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4432,38 +3762,34 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const inst_scalar_ty = inst_ty.scalarType(zcu); const elem_ty = inst_scalar_ty.indexableElem(zcu); assert(elem_ty.hasRuntimeBits(zcu)); - const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); const local = try f.allocLocal(inst, inst_ty); - const w = &f.object.code.writer; + const w = &f.code.writer; const v = try Vectorize.start(f, inst, w, inst_ty); - const a = try Assignment.start(f, w, inst_scalar_ctype); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); + try w.writeAll(" = "); // We must convert to and from integer types to prevent UB if the operation // results in a NULL pointer, or if LHS is NULL. The operation is only UB // if the result is NULL and then dereferenced. try w.writeByte('('); - try f.renderCType(w, inst_scalar_ctype); + try f.renderType(w, inst_scalar_ty); try w.writeAll(")(((uintptr_t)"); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); - try w.writeAll(") "); - try w.writeByte(operator); - try w.writeAll(" ("); - try f.writeCValue(w, rhs, .Other); + try w.print(") {c} (", .{operator}); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); try w.writeAll("*sizeof("); try f.renderType(w, elem_ty); - try w.writeAll(")))"); - try a.end(f, w); + try w.writeAll(")));"); + try f.newline(); try v.end(f, inst, w); return local; } fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []const u8) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; @@ -4477,36 +3803,34 @@ fn airMinMax(f: *Function, inst: Air.Inst.Index, operator: u8, operation: []cons const rhs = try f.resolveInst(bin_op.rhs); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); // (lhs <> rhs) ? lhs : rhs try w.writeAll(" = ("); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeByte(' '); try w.writeByte(operator); try w.writeByte(' '); - try f.writeCValue(w, rhs, .Other); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); try w.writeAll(") ? "); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(" : "); - try f.writeCValue(w, rhs, .Other); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; } fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; - const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const bin_op = f.air.extraData(Air.Bin, ty_pl.payload).data; @@ -4515,24 +3839,22 @@ fn airSlice(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const inst_ty = f.typeOfIndex(inst); - const ptr_ty = inst_ty.slicePtrFieldType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - { - const a = try Assignment.start(f, w, try f.ctypeFromType(ptr_ty, .complete)); - try f.writeCValueMember(w, local, .{ .identifier = "ptr" }); - try a.assign(f, w); - try f.writeCValue(w, ptr, .Other); - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, .usize); - try f.writeCValueMember(w, local, .{ .identifier = "len" }); - try a.assign(f, w); - try f.writeCValue(w, len, .Other); - try a.end(f, w); - } + + try f.writeCValueMember(w, local, .{ .identifier = "ptr" }); + try w.writeAll(" = "); + try f.writeCValue(w, ptr, .other); + try w.writeByte(';'); + try f.newline(); + + try f.writeCValueMember(w, local, .{ .identifier = "len" }); + try w.writeAll(" = "); + try f.writeCValue(w, len, .other); + try w.writeByte(';'); + try f.newline(); + return local; } @@ -4541,14 +3863,14 @@ fn airCall( inst: Air.Inst.Index, modifier: std.builtin.CallModifier, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; // Not even allowed to call panic in a naked function. - if (f.object.dg.is_naked_fn) return .none; + if (f.dg.is_naked_fn) return .none; - const gpa = f.object.dg.gpa; - const w = &f.object.code.writer; + const gpa = f.dg.gpa; + const w = &f.code.writer; const call = f.air.unwrapCall(inst); const args = call.args; @@ -4557,27 +3879,11 @@ fn airCall( defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { const arg_ty = f.typeOf(arg); - const arg_ctype = try f.ctypeFromType(arg_ty, .parameter); - if (arg_ctype.index == .void) { + if (!arg_ty.hasRuntimeBits(zcu)) { resolved_arg.* = .none; continue; } resolved_arg.* = try f.resolveInst(arg); - if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { - const array_local = try f.allocAlignedLocal(inst, .{ - .ctype = arg_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)), - }); - try w.writeAll("memcpy("); - try f.writeCValueMember(w, array_local, .{ .identifier = "array" }); - try w.writeAll(", "); - try f.writeCValue(w, resolved_arg.*, .FunctionArgument); - try w.writeAll(", sizeof("); - try f.renderCType(w, arg_ctype); - try w.writeAll("));"); - try f.object.newline(); - resolved_arg.* = array_local; - } } const callee = try f.resolveInst(call.callee); @@ -4596,28 +3902,22 @@ fn airCall( }; const fn_info = zcu.typeToFunc(if (callee_is_ptr) callee_ty.childType(zcu) else callee_ty).?; const ret_ty: Type = .fromInterned(fn_info.return_type); - const ret_ctype: CType = if (ret_ty.isNoReturn(zcu)) - .void - else - try f.ctypeFromType(ret_ty, .parameter); const result_local = result: { if (modifier == .always_tail) { try w.writeAll("zig_always_tail return "); break :result .none; - } else if (ret_ctype.index == .void) { + } else if (!ret_ty.hasRuntimeBits(zcu)) { break :result .none; } else if (f.liveness.isUnused(inst)) { - try w.writeByte('('); - try f.renderCType(w, .void); - try w.writeByte(')'); + try w.writeAll("(void)"); break :result .none; } else { const local = try f.allocAlignedLocal(inst, .{ - .ctype = ret_ctype, - .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), + .type = ret_ty, + .alignment = .none, }); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = "); break :result local; } @@ -4644,8 +3944,19 @@ fn airCall( if (!callee_is_ptr) try w.writeByte('&'); } switch (modifier) { - .auto, .always_tail => try f.object.dg.renderNavName(w, fn_nav), - inline .never_tail, .never_inline => |m| try w.writeAll(try f.getLazyFnName(@unionInit(LazyFnKey, @tagName(m), fn_nav))), + .auto, .always_tail => try renderNavName(w, fn_nav, ip), + .never_tail => { + try f.need_never_tail_funcs.put(gpa, fn_nav, {}); + try w.print("zig_never_tail_{f}__{d}", .{ + fmtIdentUnsolo(ip.getNav(fn_nav).name.toSlice(ip)), @intFromEnum(fn_nav), + }); + }, + .never_inline => { + try f.need_never_inline_funcs.put(gpa, fn_nav, {}); + try w.print("zig_never_inline_{f}__{d}", .{ + fmtIdentUnsolo(ip.getNav(fn_nav).name.toSlice(ip)), @intFromEnum(fn_nav), + }); + }, else => unreachable, } if (need_cast) try w.writeByte(')'); @@ -4658,7 +3969,7 @@ fn airCall( else => unreachable, } // Fall back to function pointer call. - try f.writeCValue(w, callee, .Other); + try f.writeCValue(w, callee, .other); } try w.writeByte('('); @@ -4667,38 +3978,20 @@ fn airCall( if (resolved_arg == .none) continue; if (need_comma) try w.writeAll(", "); need_comma = true; - try f.writeCValue(w, resolved_arg, .FunctionArgument); - try f.freeCValue(inst, resolved_arg); + try f.writeCValue(w, resolved_arg, .other); } try w.writeAll(");"); switch (modifier) { .always_tail => try w.writeByte('\n'), - else => try f.object.newline(), + else => try f.newline(), } - const result = result: { - if (result_local == .none or !lowersToArray(ret_ty, zcu)) - break :result result_local; - - const array_local = try f.allocLocal(inst, ret_ty); - try w.writeAll("memcpy("); - try f.writeCValue(w, array_local, .FunctionArgument); - try w.writeAll(", "); - try f.writeCValueMember(w, result_local, .{ .identifier = "array" }); - try w.writeAll(", sizeof("); - try f.renderType(w, ret_ty); - try w.writeAll("));"); - try f.object.newline(); - try freeLocal(f, inst, result_local.new_local, null); - break :result array_local; - }; - - return result; + return result_local; } fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { const dbg_stmt = f.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt; - const w = &f.object.code.writer; + const w = &f.code.writer; // TODO re-evaluate whether to emit these or not. If we naively emit // these directives, the output file will report bogus line numbers because // every newline after the #line directive adds one to the line. @@ -4707,32 +4000,32 @@ fn airDbgStmt(f: *Function, inst: Air.Inst.Index) !CValue { // newlines until the next dbg_stmt occurs. // Perhaps an additional compilation option is in order? //try w.print("#line {d}", .{dbg_stmt.line + 1}); - //try f.object.newline(); + //try f.newline(); try w.print("/* file:{d}:{d} */", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 }); - try f.object.newline(); + try f.newline(); return .none; } fn airDbgEmptyStmt(f: *Function, _: Air.Inst.Index) !CValue { - try f.object.code.writer.writeAll("(void)0;"); - try f.object.newline(); + try f.code.writer.writeAll("(void)0;"); + try f.newline(); return .none; } fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const block = f.air.unwrapDbgBlock(inst); const owner_nav = ip.getNav(zcu.funcInfo(block.func).owner_nav); - const w = &f.object.code.writer; + const w = &f.code.writer; try w.print("/* inline:{f} */", .{owner_nav.fqn.fmt(&zcu.intern_pool)}); - try f.object.newline(); + try f.newline(); return lowerBlock(f, inst, block.body); } fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const tag = f.air.instructions.items(.tag)[@intFromEnum(inst)]; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; @@ -4741,9 +4034,9 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue { if (!operand_is_undef) _ = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; try w.print("/* {s}:{s} */", .{ @tagName(tag), name.toSlice(f.air) }); - try f.object.newline(); + try f.newline(); return .none; } @@ -4753,13 +4046,13 @@ fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue { } fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const liveness_block = f.liveness.getBlock(inst); const block_id = f.next_block_index; f.next_block_index += 1; - const w = &f.object.code.writer; + const w = &f.code.writer; const inst_ty = f.typeOfIndex(inst); const result = if (inst_ty.hasRuntimeBits(zcu) and !f.liveness.isUnused(inst)) @@ -4767,7 +4060,7 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) else .none; - try f.blocks.putNoClobber(f.object.dg.gpa, inst, .{ + try f.blocks.putNoClobber(f.dg.gpa, inst, .{ .block_id = block_id, .result = result, }); @@ -4782,23 +4075,23 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) } // noreturn blocks have no `br` instructions reaching them, so we don't want a label - if (f.object.dg.is_naked_fn) { - if (f.object.dg.expected_block) |expected_block| { + if (f.dg.is_naked_fn) { + if (f.dg.expected_block) |expected_block| { if (block_id != expected_block) return f.fail("runtime code not allowed in naked function", .{}); - f.object.dg.expected_block = null; + f.dg.expected_block = null; } } else if (!f.typeOfIndex(inst).isNoReturn(zcu)) { // label must be followed by an expression, include an empty one. try w.print("\nzig_block_{d}:;", .{block_id}); - try f.object.newline(); + try f.newline(); } return result; } fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const unwrapped_try = f.air.unwrapTry(inst); const body = unwrapped_try.else_body; const err_union_ty = f.air.typeOf(unwrapped_try.error_union, &pt.zcu.intern_pool); @@ -4806,7 +4099,7 @@ fn airTry(f: *Function, inst: Air.Inst.Index) !CValue { } fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const unwrapped_try = f.air.unwrapTryPtr(inst); const body = unwrapped_try.else_body; const err_union_ty = f.air.typeOf(unwrapped_try.error_union_ptr, &pt.zcu.intern_pool).childType(pt.zcu); @@ -4821,46 +4114,38 @@ fn lowerTry( err_union_ty: Type, is_ptr: bool, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const err_union = try f.resolveInst(operand); const inst_ty = f.typeOfIndex(inst); const liveness_condbr = f.liveness.getCondBr(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const payload_ty = err_union_ty.errorUnionPayload(zcu); - const payload_has_bits = payload_ty.hasRuntimeBits(zcu); - if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) { - try w.writeAll("if ("); - if (!payload_has_bits) { - if (is_ptr) - try f.writeCValueDeref(w, err_union) - else - try f.writeCValue(w, err_union, .Other); - } else { - // Reap the operand so that it can be reused inside genBody. - // Remember we must avoid calling reap() twice for the same operand - // in this function. - try reap(f, inst, &.{operand}); - if (is_ptr) - try f.writeCValueDerefMember(w, err_union, .{ .identifier = "error" }) - else - try f.writeCValueMember(w, err_union, .{ .identifier = "error" }); - } - try w.writeAll(") "); + try w.writeAll("if ("); - try genBodyResolveState(f, inst, liveness_condbr.else_deaths, body, false); - try f.object.newline(); - if (f.object.dg.expected_block) |_| - return f.fail("runtime code not allowed in naked function", .{}); - } + // Reap the operand so that it can be reused inside genBody. + // Remember we must avoid calling reap() twice for the same operand + // in this function. + try reap(f, inst, &.{operand}); + if (is_ptr) + try f.writeCValueDerefMember(w, err_union, .{ .identifier = "error" }) + else + try f.writeCValueMember(w, err_union, .{ .identifier = "error" }); + + try w.writeAll(") "); + + try genBodyResolveState(f, inst, liveness_condbr.else_deaths, body, false); + try f.newline(); + if (f.dg.expected_block) |_| + return f.fail("runtime code not allowed in naked function", .{}); // Now we have the "then branch" (in terms of the liveness data); process any deaths. for (liveness_condbr.then_deaths) |death| { try die(f, inst, death.toRef()); } - if (!payload_has_bits) { + if (!payload_ty.hasRuntimeBits(zcu)) { if (!is_ptr) { return .none; } else { @@ -4873,14 +4158,14 @@ fn lowerTry( if (f.liveness.isUnused(inst)) return .none; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); if (is_ptr) { try w.writeByte('&'); try f.writeCValueDerefMember(w, err_union, .{ .identifier = "payload" }); } else try f.writeCValueMember(w, err_union, .{ .identifier = "payload" }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } @@ -4888,25 +4173,24 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !void { const branch = f.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = f.blocks.get(branch.block_inst).?; const result = block.result; - const w = &f.object.code.writer; + const w = &f.code.writer; - if (f.object.dg.is_naked_fn) { + if (f.dg.is_naked_fn) { if (result != .none) return f.fail("runtime code not allowed in naked function", .{}); - f.object.dg.expected_block = block.block_id; + f.dg.expected_block = block.block_id; return; } // If result is .none then the value of the block is unused. if (result != .none) { - const operand_ty = f.typeOf(branch.operand); const operand = try f.resolveInst(branch.operand); try reap(f, inst, &.{branch.operand}); - const a = try Assignment.start(f, w, try f.ctypeFromType(operand_ty, .complete)); - try f.writeCValue(w, result, .Other); - try a.assign(f, w); - try f.writeCValue(w, operand, .Other); - try a.end(f, w); + try f.writeCValue(w, result, .other); + try w.writeAll(" = "); + try f.writeCValue(w, operand, .other); + try w.writeByte(';'); + try f.newline(); } try w.print("goto zig_block_{d};\n", .{block.block_id}); @@ -4914,14 +4198,14 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !void { fn airRepeat(f: *Function, inst: Air.Inst.Index) !void { const repeat = f.air.instructions.items(.data)[@intFromEnum(inst)].repeat; - try f.object.code.writer.print("goto zig_loop_{d};\n", .{@intFromEnum(repeat.loop_inst)}); + try f.code.writer.print("goto zig_loop_{d};\n", .{@intFromEnum(repeat.loop_inst)}); } fn airSwitchDispatch(f: *Function, inst: Air.Inst.Index) !void { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const br = f.air.instructions.items(.data)[@intFromEnum(inst)].br; - const w = &f.object.code.writer; + const w = &f.code.writer; if (try f.air.value(br.operand, pt)) |cond_val| { // Comptime-known dispatch. Iterate the cases to find the correct @@ -4950,11 +4234,11 @@ fn airSwitchDispatch(f: *Function, inst: Air.Inst.Index) !void { // Runtime-known dispatch. Set the switch condition, and branch back. const cond = try f.resolveInst(br.operand); const cond_local = f.loop_switch_conds.get(br.block_inst).?; - try f.writeCValue(w, .{ .local = cond_local }, .Other); + try f.writeCValue(w, .{ .local = cond_local }, .other); try w.writeAll(" = "); - try f.writeCValue(w, cond, .Other); + try f.writeCValue(w, cond, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try w.print("goto zig_switch_{d}_loop;\n", .{@intFromEnum(br.block_inst)}); } @@ -4971,11 +4255,10 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { } fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const target = &f.object.dg.mod.resolved_target.result; - const ctype_pool = &f.object.dg.ctype_pool; - const w = &f.object.code.writer; + const target = &f.dg.mod.resolved_target.result; + const w = &f.code.writer; if (operand_ty.isAbiInt(zcu) and dest_ty.isAbiInt(zcu)) { const src_info = dest_ty.intInfo(zcu); @@ -4986,26 +4269,16 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal if (dest_ty.isPtrAtRuntime(zcu) or operand_ty.isPtrAtRuntime(zcu)) { const local = try f.allocLocal(null, dest_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, dest_ty); try w.writeByte(')'); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } - const operand_lval = if (operand == .constant) blk: { - const operand_local = try f.allocLocal(null, operand_ty); - try f.writeCValue(w, operand_local, .Other); - try w.writeAll(" = "); - try f.writeCValue(w, operand, .Other); - try w.writeByte(';'); - try f.object.newline(); - break :blk operand_local; - } else operand; - const local = try f.allocLocal(null, dest_ty); // On big-endian targets, copying ABI integers with padding bits is awkward, because the padding bits are at the low bytes of the value. // We need to offset the source or destination pointer appropriately and copy the right number of bytes. @@ -5013,141 +4286,134 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal // e.g. [10]u8 -> u80. We need to offset the destination so that we copy to the least significant bits of the integer. const offset = dest_ty.abiSize(zcu) - operand_ty.abiSize(zcu); try w.writeAll("memcpy((char *)&"); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.print(" + {d}, &", .{offset}); - try f.writeCValue(w, operand_lval, .Other); + switch (operand) { + .constant => |val| try f.dg.renderValueAsLvalue(w, val), + else => try f.writeCValue(w, operand, .other), + } try w.print(", {d});", .{operand_ty.abiSize(zcu)}); } else if (target.cpu.arch.endian() == .big and operand_ty.isAbiInt(zcu) and !dest_ty.isAbiInt(zcu)) { // e.g. u80 -> [10]u8. We need to offset the source so that we copy from the least significant bits of the integer. const offset = operand_ty.abiSize(zcu) - dest_ty.abiSize(zcu); try w.writeAll("memcpy(&"); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(", (const char *)&"); - try f.writeCValue(w, operand_lval, .Other); + switch (operand) { + .constant => |val| try f.dg.renderValueAsLvalue(w, val), + else => try f.writeCValue(w, operand, .other), + } try w.print(" + {d}, {d});", .{ offset, dest_ty.abiSize(zcu) }); } else { try w.writeAll("memcpy(&"); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(", &"); - try f.writeCValue(w, operand_lval, .Other); + switch (operand) { + .constant => |val| try f.dg.renderValueAsLvalue(w, val), + else => try f.writeCValue(w, operand, .other), + } try w.print(", {d});", .{@min(dest_ty.abiSize(zcu), operand_ty.abiSize(zcu))}); } - try f.object.newline(); + try f.newline(); // Ensure padding bits have the expected value. if (dest_ty.isAbiInt(zcu)) { - const dest_ctype = try f.ctypeFromType(dest_ty, .complete); - const dest_info = dest_ty.intInfo(zcu); - var bits: u16 = dest_info.bits; - var wrap_ctype: ?CType = null; - var need_bitcasts = false; - - try f.writeCValue(w, local, .Other); - switch (dest_ctype.info(ctype_pool)) { - else => {}, - .array => |array_info| { - try w.print("[{d}]", .{switch (target.cpu.arch.endian()) { - .little => array_info.len - 1, + switch (CType.classifyInt(dest_ty, zcu)) { + .void => unreachable, // opv + .small => { + try f.writeCValue(w, local, .other); + try w.writeAll(" = zig_wrap_"); + try f.dg.renderTypeForBuiltinFnName(w, dest_ty); + try w.writeByte('('); + try f.writeCValue(w, local, .other); + try f.dg.renderBuiltinInfo(w, dest_ty, .bits); + try w.writeAll(");"); + try f.newline(); + }, + .big => |big| { + const dest_info = dest_ty.intInfo(zcu); + const padding_index: u16 = switch (target.cpu.arch.endian()) { + .little => big.limbs_len - 1, .big => 0, - }}); - wrap_ctype = array_info.elem_ctype.toSignedness(dest_info.signedness); - need_bitcasts = wrap_ctype.?.index == .zig_i128; - bits -= 1; - bits %= @as(u16, @intCast(f.byteSize(array_info.elem_ctype) * 8)); - bits += 1; + }; + const wrap_bits = ((dest_info.bits - 1) % big.limb_size.bits()) + 1; + if (big.limb_size != .@"128" or dest_info.signedness == .unsigned) { + try f.writeCValueMember(w, local, .{ .identifier = "limbs" }); + try w.print("[{d}] = zig_wrap_{c}{d}(", .{ + padding_index, + signAbbrev(dest_info.signedness), + big.limb_size.bits(), + }); + try f.writeCValueMember(w, local, .{ .identifier = "limbs" }); + try w.print("[{d}], {d});", .{ padding_index, wrap_bits }); + } else { + try f.writeCValueMember(w, local, .{ .identifier = "limbs" }); + try w.print("[{d}] = zig_bitCast_u128(zig_wrap_i128(zig_bitCast_i128(", .{ + padding_index, + }); + try f.writeCValueMember(w, local, .{ .identifier = "limbs" }); + try w.print("[{d}]), {d}));", .{ padding_index, wrap_bits }); + try f.newline(); + } }, } - try w.writeAll(" = "); - if (need_bitcasts) { - try w.writeAll("zig_bitCast_"); - try f.object.dg.renderCTypeForBuiltinFnName(w, wrap_ctype.?.toUnsigned()); - try w.writeByte('('); - } - try w.writeAll("zig_wrap_"); - const info_ty = try pt.intType(dest_info.signedness, bits); - if (wrap_ctype) |ctype| - try f.object.dg.renderCTypeForBuiltinFnName(w, ctype) - else - try f.object.dg.renderTypeForBuiltinFnName(w, info_ty); - try w.writeByte('('); - if (need_bitcasts) { - try w.writeAll("zig_bitCast_"); - try f.object.dg.renderCTypeForBuiltinFnName(w, wrap_ctype.?); - try w.writeByte('('); - } - try f.writeCValue(w, local, .Other); - switch (dest_ctype.info(ctype_pool)) { - else => {}, - .array => |array_info| try w.print("[{d}]", .{ - switch (target.cpu.arch.endian()) { - .little => array_info.len - 1, - .big => 0, - }, - }), - } - if (need_bitcasts) try w.writeByte(')'); - try f.object.dg.renderBuiltinInfo(w, info_ty, .bits); - if (need_bitcasts) try w.writeByte(')'); - try w.writeAll(");"); - try f.object.newline(); } - try f.freeCValue(null, operand_lval); return local; } -fn airTrap(f: *Function, w: *Writer) !void { +fn airTrap(f: *Function) !void { // Not even allowed to call trap in a naked function. - if (f.object.dg.is_naked_fn) return; - try w.writeAll("zig_trap();\n"); + if (f.dg.is_naked_fn) return; + try f.code.writer.writeAll("zig_trap();\n"); } fn airBreakpoint(f: *Function) !CValue { - const w = &f.object.code.writer; + const w = &f.code.writer; try w.writeAll("zig_breakpoint();"); - try f.object.newline(); + try f.newline(); return .none; } fn airRetAddr(f: *Function, inst: Air.Inst.Index) !CValue { - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, .usize); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, .usize); try w.writeAll(")zig_return_address();"); - try f.object.newline(); + try f.newline(); return local; } fn airFrameAddress(f: *Function, inst: Air.Inst.Index) !CValue { - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, .usize); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, .usize); try w.writeAll(")zig_frame_address();"); - try f.object.newline(); + try f.newline(); return local; } -fn airUnreach(o: *Object) !void { +fn airUnreach(f: *Function) !void { // Not even allowed to call unreachable in a naked function. - if (o.dg.is_naked_fn) return; - try o.code.writer.writeAll("zig_unreachable();\n"); + if (f.dg.is_naked_fn) return; + try f.code.writer.writeAll("zig_unreachable();\n"); } fn airLoop(f: *Function, inst: Air.Inst.Index) !void { const block = f.air.unwrapBlock(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; // `repeat` instructions matching this loop will branch to // this label. Since we need a label for arbitrary `repeat` // anyway, there's actually no need to use a "real" looping // construct at all! try w.print("zig_loop_{d}:", .{@intFromEnum(inst)}); - try f.object.newline(); + try f.newline(); try genBodyInner(f, block.body); // no need to restore state, we're noreturn } @@ -5158,15 +4424,15 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !void { const then_body = cond_br.then_body; const else_body = cond_br.else_body; const liveness_condbr = f.liveness.getCondBr(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; try w.writeAll("if ("); - try f.writeCValue(w, cond, .Other); + try f.writeCValue(w, cond, .other); try w.writeAll(") "); try genBodyResolveState(f, inst, liveness_condbr.then_deaths, then_body, false); - try f.object.newline(); - if (else_body.len > 0) if (f.object.dg.expected_block) |_| + try f.newline(); + if (else_body.len > 0) if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); // We don't need to use `genBodyResolveState` for the else block, because this instruction is @@ -5184,23 +4450,23 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !void { } fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const gpa = f.object.dg.gpa; + const gpa = f.dg.gpa; const switch_br = f.air.unwrapSwitch(inst); const init_condition = try f.resolveInst(switch_br.operand); try reap(f, inst, &.{switch_br.operand}); const condition_ty = f.typeOf(switch_br.operand); - const w = &f.object.code.writer; + const w = &f.code.writer; // For dispatches, we will create a local alloc to contain the condition value. // This may not result in optimal codegen for switch loops, but it minimizes the // amount of C code we generate, which is probably more desirable here (and is simpler). const condition = if (is_dispatch_loop) cond: { const new_local = try f.allocLocal(inst, condition_ty); - try f.copyCValue(try f.ctypeFromType(condition_ty, .complete), new_local, init_condition); + try f.copyCValue(new_local, init_condition); try w.print("zig_switch_{d}_loop:", .{@intFromEnum(inst)}); - try f.object.newline(); + try f.newline(); try f.loop_switch_conds.put(gpa, inst, new_local.new_local); break :cond new_local; } else init_condition; @@ -5222,9 +4488,9 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void try f.renderType(w, lowered_condition_ty); try w.writeByte(')'); } - try f.writeCValue(w, condition, .Other); + try f.writeCValue(w, condition, .other); try w.writeAll(") {"); - f.object.indent(); + f.indent(); const liveness = try f.liveness.getSwitchBr(gpa, inst, switch_br.cases_len + 1); defer gpa.free(liveness.deaths); @@ -5237,7 +4503,7 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void continue; } for (case.items) |item| { - try f.object.newline(); + try f.newline(); try w.writeAll("case "); const item_value = try f.air.value(item, pt); // If `item_value` is a pointer with a known integer address, print the address @@ -5254,28 +4520,28 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void try f.renderType(w, .usize); try w.writeByte(')'); } - try f.object.dg.renderValue(w, (try f.air.value(item, pt)).?, .Other); + try f.dg.renderValue(w, (try f.air.value(item, pt)).?, .other); } try w.writeByte(':'); } try w.writeAll(" {"); - f.object.indent(); - try f.object.newline(); + f.indent(); + try f.newline(); if (is_dispatch_loop) { try w.print("zig_switch_{d}_dispatch_{d}:;", .{ @intFromEnum(inst), case.idx }); - try f.object.newline(); + try f.newline(); } try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, true); - try f.object.outdent(); + try f.outdent(); try w.writeByte('}'); - if (f.object.dg.expected_block) |_| + if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); // The case body must be noreturn so we don't need to insert a break. } const else_body = it.elseBody(); - try f.object.newline(); + try f.newline(); try w.writeAll("default: "); if (any_range_cases) { @@ -5288,33 +4554,33 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void try w.writeAll("if ("); for (case.items, 0..) |item, item_i| { if (item_i != 0) try w.writeAll(" || "); - try f.writeCValue(w, condition, .Other); + try f.writeCValue(w, condition, .other); try w.writeAll(" == "); - try f.object.dg.renderValue(w, (try f.air.value(item, pt)).?, .Other); + try f.dg.renderValue(w, (try f.air.value(item, pt)).?, .other); } for (case.ranges, 0..) |range, range_i| { if (case.items.len != 0 or range_i != 0) try w.writeAll(" || "); // "(x >= lower && x <= upper)" try w.writeByte('('); - try f.writeCValue(w, condition, .Other); + try f.writeCValue(w, condition, .other); try w.writeAll(" >= "); - try f.object.dg.renderValue(w, (try f.air.value(range[0], pt)).?, .Other); + try f.dg.renderValue(w, (try f.air.value(range[0], pt)).?, .other); try w.writeAll(" && "); - try f.writeCValue(w, condition, .Other); + try f.writeCValue(w, condition, .other); try w.writeAll(" <= "); - try f.object.dg.renderValue(w, (try f.air.value(range[1], pt)).?, .Other); + try f.dg.renderValue(w, (try f.air.value(range[1], pt)).?, .other); try w.writeByte(')'); } try w.writeAll(") {"); - f.object.indent(); - try f.object.newline(); + f.indent(); + try f.newline(); if (is_dispatch_loop) { try w.print("zig_switch_{d}_dispatch_{d}: ", .{ @intFromEnum(inst), case.idx }); } try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, true); - try f.object.outdent(); + try f.outdent(); try w.writeByte('}'); - if (f.object.dg.expected_block) |_| + if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); } } @@ -5328,16 +4594,16 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void try die(f, inst, death.toRef()); } try genBody(f, else_body); - if (f.object.dg.expected_block) |_| + if (f.dg.expected_block) |_| return f.fail("runtime code not allowed in naked function", .{}); - } else try airUnreach(&f.object); - try f.object.newline(); - try f.object.outdent(); + } else try airUnreach(f); + try f.newline(); + try f.outdent(); try w.writeAll("}\n"); } fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool { - const dg = f.object.dg; + const dg = f.dg; const target = &dg.mod.resolved_target.result; return switch (constraint[0]) { '{' => true, @@ -5357,28 +4623,28 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool } fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const unwrapped_asm = f.air.unwrapAsm(inst); const is_volatile = unwrapped_asm.is_volatile; - const gpa = f.object.dg.gpa; + const gpa = f.dg.gpa; const outputs = unwrapped_asm.outputs; const inputs = unwrapped_asm.inputs; const result = result: { - const w = &f.object.code.writer; + const w = &f.code.writer; const inst_ty = f.typeOfIndex(inst); const inst_local = if (inst_ty.hasRuntimeBits(zcu)) local: { const inst_local = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(inst_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(inst_ty.abiAlignment(zcu)), + .type = inst_ty, + .alignment = .none, }); if (f.wantSafety()) { - try f.writeCValue(w, inst_local, .Other); + try f.writeCValue(w, inst_local, .other); try w.writeAll(" = "); - try f.writeCValue(w, .{ .undef = inst_ty }, .Other); + try f.writeCValue(w, .{ .undef = inst_ty }, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); } break :local inst_local; } else .none; @@ -5399,20 +4665,20 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const output_ty = if (output.operand == .none) inst_ty else f.typeOf(output.operand).childType(zcu); try w.writeAll("register "); const output_local = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(output_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(output_ty.abiAlignment(zcu)), + .type = output_ty, + .alignment = .none, }); try f.allocs.put(gpa, output_local.new_local, false); - try f.object.dg.renderTypeAndName(w, output_ty, output_local, .{}, .none, .complete); + try f.dg.renderTypeAndName(w, output_ty, output_local, .{}, .none); try w.writeAll(" __asm(\""); try w.writeAll(constraint["={".len .. constraint.len - "}".len]); try w.writeAll("\")"); if (f.wantSafety()) { try w.writeAll(" = "); - try f.writeCValue(w, .{ .undef = output_ty }, .Other); + try f.writeCValue(w, .{ .undef = output_ty }, .other); } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); } } @@ -5432,29 +4698,29 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const input_ty = f.typeOf(input.operand); if (is_reg) try w.writeAll("register "); const input_local = try f.allocLocalValue(.{ - .ctype = try f.ctypeFromType(input_ty, .complete), - .alignas = CType.AlignAs.fromAbiAlignment(input_ty.abiAlignment(zcu)), + .type = input_ty, + .alignment = .none, }); try f.allocs.put(gpa, input_local.new_local, false); // Do not render the declaration as `const` qualified if we're generating an // explicit `register` local, as GCC will ignore the constraint completely. - try f.object.dg.renderTypeAndName(w, input_ty, input_local, if (is_reg) .{} else Const, .none, .complete); + try f.dg.renderTypeAndName(w, input_ty, input_local, .{ .@"const" = is_reg }, .none); if (is_reg) { try w.writeAll(" __asm(\""); try w.writeAll(constraint["{".len .. constraint.len - "}".len]); try w.writeAll("\")"); } try w.writeAll(" = "); - try f.writeCValue(w, input_val, .Other); + try f.writeCValue(w, input_val, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); } } { const asm_source = unwrapped_asm.source; - var stack = std.heap.stackFallback(256, f.object.dg.gpa); + var stack = std.heap.stackFallback(256, f.dg.gpa); const allocator = stack.get(); const fixed_asm_source = try allocator.alloc(u8, asm_source.len); defer allocator.free(fixed_asm_source); @@ -5520,10 +4786,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const is_reg = constraint[1] == '{'; try w.print("{f}(", .{fmtStringLiteral(if (is_reg) "=r" else constraint, null)}); if (is_reg) { - try f.writeCValue(w, .{ .local = locals_index }, .Other); + try f.writeCValue(w, .{ .local = locals_index }, .other); locals_index += 1; } else if (output.operand == .none) { - try f.writeCValue(w, inst_local, .FunctionArgument); + try f.writeCValue(w, inst_local, .other); } else { try f.writeCValueDeref(w, try f.resolveInst(output.operand)); } @@ -5547,7 +4813,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const input_local_idx = locals_index; locals_index += 1; break :local .{ .local = input_local_idx }; - } else input_val, .Other); + } else input_val, .other); try w.writeByte(')'); } try w.writeByte(':'); @@ -5567,7 +4833,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const field_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?; assert(field_name.len != 0); - const target = &f.object.dg.mod.resolved_target.result; + const target = &f.dg.mod.resolved_target.result; var c_name_buf: [16]u8 = undefined; const name = if ((target.cpu.arch.isMIPS() or target.cpu.arch == .alpha) and field_name[0] == 'r') name: { @@ -5594,7 +4860,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { } w.undo(1); // erase the last comma try w.writeAll(");"); - try f.object.newline(); + try f.newline(); locals_index = locals_begin; it = unwrapped_asm.iterateOutputs(); @@ -5608,10 +4874,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { else try f.resolveInst(output.operand)); try w.writeAll(" = "); - try f.writeCValue(w, .{ .local = locals_index }, .Other); + try f.writeCValue(w, .{ .local = locals_index }, .other); locals_index += 1; try w.writeByte(';'); - try f.object.newline(); + try f.newline(); } } @@ -5633,147 +4899,145 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { fn airIsNull( f: *Function, inst: Air.Inst.Index, - operator: std.math.CompareOperator, + operator: enum { eq, neq }, is_ptr: bool, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const ctype_pool = &f.object.dg.ctype_pool; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, .bool); - const a = try Assignment.start(f, w, .bool); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); const operand_ty = f.typeOf(un_op); const optional_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; - const opt_ctype = try f.ctypeFromType(optional_ty, .complete); - const rhs = switch (opt_ctype.info(ctype_pool)) { - .basic, .pointer => rhs: { - if (is_ptr) - try f.writeCValueDeref(w, operand) - else - try f.writeCValue(w, operand, .Other); - break :rhs if (opt_ctype.isBool()) - "true" - else if (opt_ctype.isInteger()) - "0" - else - "NULL"; + + const pre: []const u8, const maybe_field: ?[]const u8, const post: []const u8 = switch (operator) { + // zig fmt: off + .eq => switch (CType.classifyOptional(optional_ty, zcu)) { + .npv_payload => unreachable, // opv optional + .error_set => .{ "", null, " == 0" }, + .ptr_like => .{ "", null, " == NULL" }, + .slice_like => .{ "", "ptr", " == NULL" }, + .opv_payload => .{ "", "is_null", "" }, + .@"struct" => .{ "", "is_null", "" }, }, - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| switch (aggregate.fields.at(0, ctype_pool).name.index) { - .is_null, .payload => rhs: { - if (is_ptr) - try f.writeCValueDerefMember(w, operand, .{ .identifier = "is_null" }) - else - try f.writeCValueMember(w, operand, .{ .identifier = "is_null" }); - break :rhs "true"; - }, - .ptr, .len => rhs: { - if (is_ptr) - try f.writeCValueDerefMember(w, operand, .{ .identifier = "ptr" }) - else - try f.writeCValueMember(w, operand, .{ .identifier = "ptr" }); - break :rhs "NULL"; - }, - else => unreachable, + .neq => switch (CType.classifyOptional(optional_ty, zcu)) { + .npv_payload => unreachable, // opv optional + .error_set => .{ "", null, " != 0" }, + .ptr_like => .{ "", null, " != NULL" }, + .slice_like => .{ "", "ptr", " != NULL" }, + .opv_payload => .{ "!", "is_null", "" }, + .@"struct" => .{ "!", "is_null", "" }, }, + // zig fmt: on }; - try w.writeAll(compareOperatorC(operator)); - try w.writeAll(rhs); - try a.end(f, w); + + try w.writeAll(pre); + if (maybe_field) |field| { + if (is_ptr) { + try f.writeCValueDerefMember(w, operand, .{ .identifier = field }); + } else { + try f.writeCValueMember(w, operand, .{ .identifier = field }); + } + } else { + if (is_ptr) { + try f.writeCValueDeref(w, operand); + } else { + try f.writeCValue(w, operand, .other); + } + } + try w.writeAll(post); + + try w.writeByte(';'); + try f.newline(); return local; } fn airOptionalPayload(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const operand_ty = f.typeOf(ty_op.operand); const opt_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; - const opt_ctype = try f.ctypeFromType(opt_ty, .complete); - if (opt_ctype.isBool()) return if (is_ptr) .{ .undef = inst_ty } else .none; const operand = try f.resolveInst(ty_op.operand); - switch (opt_ctype.info(ctype_pool)) { - .basic, .pointer => return f.moveCValue(inst, inst_ty, operand), - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| switch (aggregate.fields.at(0, ctype_pool).name.index) { - .is_null, .payload => { - const w = &f.object.code.writer; - const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - if (is_ptr) { - try w.writeByte('&'); - try f.writeCValueDerefMember(w, operand, .{ .identifier = "payload" }); - } else try f.writeCValueMember(w, operand, .{ .identifier = "payload" }); - try a.end(f, w); - return local; - }, - .ptr, .len => return f.moveCValue(inst, inst_ty, operand), - else => unreachable, + + switch (CType.classifyOptional(opt_ty, zcu)) { + .npv_payload => unreachable, // opv optional + + .opv_payload => return if (is_ptr) .{ .undef = inst_ty } else .none, + + .error_set, + .ptr_like, + .slice_like, + => return f.moveCValue(inst, inst_ty, operand), + + .@"struct" => { + const w = &f.code.writer; + const local = try f.allocLocal(inst, inst_ty); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); + if (is_ptr) { + try w.writeByte('&'); + try f.writeCValueDerefMember(w, operand, .{ .identifier = "payload" }); + } else try f.writeCValueMember(w, operand, .{ .identifier = "payload" }); + try w.writeByte(';'); + try f.newline(); + return local; }, } } fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); + const opt_ty = operand_ty.childType(zcu); const inst_ty = f.typeOfIndex(inst); - const opt_ctype = try f.ctypeFromType(operand_ty.childType(zcu), .complete); - switch (opt_ctype.info(&f.object.dg.ctype_pool)) { - .basic => { - const a = try Assignment.start(f, w, opt_ctype); - try f.writeCValueDeref(w, operand); - try a.assign(f, w); - try f.object.dg.renderValue(w, Value.false, .Other); - try a.end(f, w); - return .none; + + switch (CType.classifyOptional(opt_ty, zcu)) { + .npv_payload => unreachable, // opv optional + + .opv_payload => { + try f.writeCValueDerefMember(w, operand, .{ .identifier = "is_null" }); + try w.writeAll(" = "); + try f.dg.renderValue(w, .false, .other); + try w.writeByte(';'); + try f.newline(); + return .{ .undef = inst_ty }; }, - .pointer => { + + .error_set, + .ptr_like, + .slice_like, + => return f.moveCValue(inst, inst_ty, operand), + + .@"struct" => { + try f.writeCValueDerefMember(w, operand, .{ .identifier = "is_null" }); + try w.writeAll(" = "); + try f.dg.renderValue(w, .false, .other); + try w.writeByte(';'); + try f.newline(); if (f.liveness.isUnused(inst)) return .none; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, opt_ctype); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try f.writeCValue(w, operand, .Other); - try a.end(f, w); - return local; - }, - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => { - { - const a = try Assignment.start(f, w, opt_ctype); - try f.writeCValueDerefMember(w, operand, .{ .identifier = "is_null" }); - try a.assign(f, w); - try f.object.dg.renderValue(w, Value.false, .Other); - try a.end(f, w); - } - if (f.liveness.isUnused(inst)) return .none; - const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, opt_ctype); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try w.writeByte('&'); + try f.writeCValue(w, local, .other); + try w.writeAll(" = &"); try f.writeCValueDerefMember(w, operand, .{ .identifier = "payload" }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; }, } @@ -5817,18 +5081,20 @@ fn fieldLocation( .union_type => { const loaded_union = ip.loadUnionType(container_ty.toIntern()); switch (loaded_union.layout) { - .auto, .@"extern" => { + .auto => { const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(zcu)) - return if (loaded_union.has_runtime_tag and !container_ty.unionHasAllZeroBitFieldTypes(zcu)) - .{ .field = .{ .identifier = "payload" } } - else - .begin; + if (!field_ty.hasRuntimeBits(zcu)) { + if (container_ty.unionHasAllZeroBitFieldTypes(zcu)) return .begin; + return .{ .field = .{ .identifier = "payload" } }; + } const field_name = ip.loadEnumType(loaded_union.enum_tag_type).field_names.get(ip)[field_index]; - return .{ .field = if (loaded_union.has_runtime_tag) - .{ .payload_identifier = field_name.toSlice(ip) } - else - .{ .identifier = field_name.toSlice(ip) } }; + return .{ .field = .{ .payload_identifier = field_name.toSlice(ip) } }; + }, + .@"extern" => { + const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBits(zcu)) return .begin; + const field_name = ip.loadEnumType(loaded_union.enum_tag_type).field_names.get(ip)[field_index]; + return .{ .field = .{ .identifier = field_name.toSlice(ip) } }; }, .@"packed" => return .begin, } @@ -5865,7 +5131,7 @@ fn airStructFieldPtrIndex(f: *Function, inst: Air.Inst.Index, index: u8) !CValue } fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; @@ -5877,26 +5143,26 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { const field_ptr_val = try f.resolveInst(extra.field_ptr); try reap(f, inst, &.{extra.field_ptr}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, container_ptr_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, container_ptr_ty); try w.writeByte(')'); switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) { - .begin => try f.writeCValue(w, field_ptr_val, .Other), + .begin => try f.writeCValue(w, field_ptr_val, .other), .field => |field| { const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8); try w.writeAll("(("); try f.renderType(w, u8_ptr_ty); try w.writeByte(')'); - try f.writeCValue(w, field_ptr_val, .Other); + try f.writeCValue(w, field_ptr_val, .other); try w.writeAll(" - offsetof("); try f.renderType(w, container_ty); try w.writeAll(", "); - try f.writeCValue(w, field, .Other); + try f.writeCValue(w, field, .other); try w.writeAll("))"); }, .byte_offset => |byte_offset| { @@ -5905,7 +5171,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { try w.writeAll("(("); try f.renderType(w, u8_ptr_ty); try w.writeByte(')'); - try f.writeCValue(w, field_ptr_val, .Other); + try f.writeCValue(w, field_ptr_val, .other); try w.print(" - {f})", .{ try f.fmtIntLiteralDec(try pt.intValue(.usize, byte_offset)), }); @@ -5913,7 +5179,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue { } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } @@ -5924,23 +5190,19 @@ fn fieldPtr( container_ptr_val: CValue, field_index: u32, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const container_ty = container_ptr_ty.childType(zcu); const field_ptr_ty = f.typeOfIndex(inst); - // Ensure complete type definition is visible before accessing fields. - _ = try f.ctypeFromType(container_ty, .complete); - - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, field_ptr_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, field_ptr_ty); try w.writeByte(')'); switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) { - .begin => try f.writeCValue(w, container_ptr_val, .Other), + .begin => try f.writeCValue(w, container_ptr_val, .other), .field => |field| { try w.writeByte('&'); try f.writeCValueDerefMember(w, container_ptr_val, field); @@ -5951,7 +5213,7 @@ fn fieldPtr( try w.writeAll("(("); try f.renderType(w, u8_ptr_ty); try w.writeByte(')'); - try f.writeCValue(w, container_ptr_val, .Other); + try f.writeCValue(w, container_ptr_val, .other); try w.print(" + {f})", .{ try f.fmtIntLiteralDec(try pt.intValue(.usize, byte_offset)), }); @@ -5959,12 +5221,12 @@ fn fieldPtr( } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; @@ -5976,10 +5238,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const struct_byval = try f.resolveInst(extra.struct_operand); try reap(f, inst, &.{extra.struct_operand}); const struct_ty = f.typeOf(extra.struct_operand); - const w = &f.object.code.writer; - - // Ensure complete type definition is visible before accessing fields. - _ = try f.ctypeFromType(struct_ty, .complete); + const w = &f.code.writer; assert(struct_ty.containerLayout(zcu) != .@"packed"); // `Air.Legalize.Feature.expand_packed_struct_field_val` handles this case const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) { @@ -5988,29 +5247,25 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const union_type = ip.loadUnionType(struct_ty.toIntern()); const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_type); const field_name_str = enum_tag_ty.enumFieldName(extra.field_index, zcu).toSlice(ip); - if (union_type.has_runtime_tag) { - break :name .{ .payload_identifier = field_name_str }; - } else { - break :name .{ .identifier = field_name_str }; - } + break :name .{ .payload_identifier = field_name_str }; }, .tuple_type => .{ .field = extra.field_index }, else => unreachable, }; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); try f.writeCValueMember(w, struct_byval, field_name); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } /// *(E!T) -> E /// Note that the result is never a pointer. fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6020,37 +5275,23 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_is_ptr = operand_ty.zigTypeTag(zcu) == .pointer; - const error_union_ty = if (operand_is_ptr) operand_ty.childType(zcu) else operand_ty; - const error_ty = error_union_ty.errorUnionSet(zcu); - const payload_ty = error_union_ty.errorUnionPayload(zcu); const local = try f.allocLocal(inst, inst_ty); - if (!payload_ty.hasRuntimeBits(zcu) and operand == .local and operand.local == local.new_local) { - // The store will be 'x = x'; elide it. - return local; - } - - const w = &f.object.code.writer; - try f.writeCValue(w, local, .Other); + const w = &f.code.writer; + try f.writeCValue(w, local, .other); try w.writeAll(" = "); - if (!payload_ty.hasRuntimeBits(zcu)) - try f.writeCValue(w, operand, .Other) - else if (error_ty.errorSetIsEmpty(zcu)) - try w.print("{f}", .{ - try f.fmtIntLiteralDec(try pt.intValue(try pt.errorIntType(), 0)), - }) - else if (operand_is_ptr) + if (operand_is_ptr) try f.writeCValueDerefMember(w, operand, .{ .identifier = "error" }) else try f.writeCValueMember(w, operand, .{ .identifier = "error" }); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6060,154 +5301,124 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu const operand_ty = f.typeOf(ty_op.operand); const error_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; - const w = &f.object.code.writer; + const w = &f.code.writer; if (!error_union_ty.errorUnionPayload(zcu).hasRuntimeBits(zcu)) { - if (!is_ptr) return .none; - + assert(is_ptr); // opv bug in sema const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = ("); try f.renderType(w, inst_ty); try w.writeByte(')'); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); if (is_ptr) { try w.writeByte('&'); try f.writeCValueDerefMember(w, operand, .{ .identifier = "payload" }); } else try f.writeCValueMember(w, operand, .{ .identifier = "payload" }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue { - const ctype_pool = &f.object.dg.ctype_pool; + const zcu = f.dg.pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); - const inst_ctype = try f.ctypeFromType(inst_ty, .complete); - if (inst_ctype.isBool()) return .{ .constant = Value.true }; const operand = try f.resolveInst(ty_op.operand); - switch (inst_ctype.info(ctype_pool)) { - .basic, .pointer => return f.moveCValue(inst, inst_ty, operand), - .aligned, .array, .vector, .fwd_decl, .function => unreachable, - .aggregate => |aggregate| switch (aggregate.fields.at(0, ctype_pool).name.index) { - .is_null, .payload => { - const operand_ctype = try f.ctypeFromType(f.typeOf(ty_op.operand), .complete); - const w = &f.object.code.writer; - const local = try f.allocLocal(inst, inst_ty); - { - const a = try Assignment.start(f, w, .bool); - try f.writeCValueMember(w, local, .{ .identifier = "is_null" }); - try a.assign(f, w); - try w.writeAll("false"); - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, operand_ctype); - try f.writeCValueMember(w, local, .{ .identifier = "payload" }); - try a.assign(f, w); - try f.writeCValue(w, operand, .Other); - try a.end(f, w); - } - return local; - }, - .ptr, .len => return f.moveCValue(inst, inst_ty, operand), - else => unreachable, + + switch (CType.classifyOptional(inst_ty, zcu)) { + .npv_payload => unreachable, // opv optional + + .opv_payload => unreachable, // opv bug in Sema + + .error_set, + .ptr_like, + .slice_like, + => return f.moveCValue(inst, inst_ty, operand), + + .@"struct" => { + const w = &f.code.writer; + const local = try f.allocLocal(inst, inst_ty); + + try f.writeCValueMember(w, local, .{ .identifier = "is_null" }); + try w.writeAll(" = false;"); + try f.newline(); + + try f.writeCValueMember(w, local, .{ .identifier = "payload" }); + try w.writeAll(" = "); + try f.writeCValue(w, operand, .other); + try w.writeByte(';'); + try f.newline(); + + return local; }, } } fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const payload_ty = inst_ty.errorUnionPayload(zcu); - const repr_is_err = !payload_ty.hasRuntimeBits(zcu); - const err_ty = inst_ty.errorUnionSet(zcu); const err = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - if (repr_is_err and err == .local and err.local == local.new_local) { - // The store will be 'x = x'; elide it. - return local; + if (payload_ty.hasRuntimeBits(zcu)) { + try f.writeCValueMember(w, local, .{ .identifier = "payload" }); + try w.writeAll(" = "); + try f.dg.renderUndefValue(w, payload_ty, .other); + try w.writeByte(';'); + try f.newline(); } - if (!repr_is_err) { - const a = try Assignment.start(f, w, try f.ctypeFromType(payload_ty, .complete)); - try f.writeCValueMember(w, local, .{ .identifier = "payload" }); - try a.assign(f, w); - try f.object.dg.renderUndefValue(w, payload_ty, .Other); - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, try f.ctypeFromType(err_ty, .complete)); - if (repr_is_err) - try f.writeCValue(w, local, .Other) - else - try f.writeCValueMember(w, local, .{ .identifier = "error" }); - try a.assign(f, w); - try f.writeCValue(w, err, .Other); - try a.end(f, w); - } + try f.writeCValueMember(w, local, .{ .identifier = "error" }); + try w.writeAll(" = "); + try f.writeCValue(w, err, .other); + try w.writeByte(';'); + try f.newline(); + return local; } fn airErrUnionPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; - const zcu = pt.zcu; - const w = &f.object.code.writer; + const pt = f.dg.pt; + const w = &f.code.writer; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(ty_op.operand); - const operand_ty = f.typeOf(ty_op.operand); - const error_union_ty = operand_ty.childType(zcu); - const payload_ty = error_union_ty.errorUnionPayload(zcu); const err_int_ty = try pt.errorIntType(); const no_err = try pt.intValue(err_int_ty, 0); try reap(f, inst, &.{ty_op.operand}); // First, set the non-error value. - if (!payload_ty.hasRuntimeBits(zcu)) { - const a = try Assignment.start(f, w, try f.ctypeFromType(operand_ty, .complete)); - try f.writeCValueDeref(w, operand); - try a.assign(f, w); - try w.print("{f}", .{try f.fmtIntLiteralDec(no_err)}); - try a.end(f, w); - return .none; - } - { - const a = try Assignment.start(f, w, try f.ctypeFromType(err_int_ty, .complete)); - try f.writeCValueDerefMember(w, operand, .{ .identifier = "error" }); - try a.assign(f, w); - try w.print("{f}", .{try f.fmtIntLiteralDec(no_err)}); - try a.end(f, w); - } + try f.writeCValueDerefMember(w, operand, .{ .identifier = "error" }); + try w.print(" = {f};", .{try f.fmtIntLiteralDec(no_err)}); + try f.newline(); // Then return the payload pointer (only if it is used) if (f.liveness.isUnused(inst)) return .none; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try w.writeByte('&'); + try f.writeCValue(w, local, .other); + try w.writeAll(" = &"); try f.writeCValueDerefMember(w, operand, .{ .identifier = "payload" }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } @@ -6227,7 +5438,7 @@ fn airSaveErrReturnTraceIndex(f: *Function, inst: Air.Inst.Index) !CValue { } fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6235,120 +5446,88 @@ fn airWrapErrUnionPay(f: *Function, inst: Air.Inst.Index) !CValue { const payload_ty = inst_ty.errorUnionPayload(zcu); const payload = try f.resolveInst(ty_op.operand); assert(payload_ty.hasRuntimeBits(zcu)); - const err_ty = inst_ty.errorUnionSet(zcu); try reap(f, inst, &.{ty_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - { - const a = try Assignment.start(f, w, try f.ctypeFromType(payload_ty, .complete)); - try f.writeCValueMember(w, local, .{ .identifier = "payload" }); - try a.assign(f, w); - try f.writeCValue(w, payload, .Other); - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, try f.ctypeFromType(err_ty, .complete)); - try f.writeCValueMember(w, local, .{ .identifier = "error" }); - try a.assign(f, w); - try f.object.dg.renderValue(w, try pt.intValue(try pt.errorIntType(), 0), .Other); - try a.end(f, w); - } + + try f.writeCValueMember(w, local, .{ .identifier = "payload" }); + try w.writeAll(" = "); + try f.writeCValue(w, payload, .other); + try w.writeByte(';'); + try f.newline(); + + try f.writeCValueMember(w, local, .{ .identifier = "error" }); + try w.writeAll(" = "); + try f.dg.renderValue(w, try pt.intValue(try pt.errorIntType(), 0), .other); + try w.writeByte(';'); + try f.newline(); + return local; } fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const u8) !CValue { - const pt = f.object.dg.pt; - const zcu = pt.zcu; + const pt = f.dg.pt; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const operand_ty = f.typeOf(un_op); const local = try f.allocLocal(inst, .bool); - const err_union_ty = if (is_ptr) operand_ty.childType(zcu) else operand_ty; - const payload_ty = err_union_ty.errorUnionPayload(zcu); - const error_ty = err_union_ty.errorUnionSet(zcu); - const a = try Assignment.start(f, w, .bool); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); const err_int_ty = try pt.errorIntType(); - if (!error_ty.errorSetIsEmpty(zcu)) - if (payload_ty.hasRuntimeBits(zcu)) - if (is_ptr) - try f.writeCValueDerefMember(w, operand, .{ .identifier = "error" }) - else - try f.writeCValueMember(w, operand, .{ .identifier = "error" }) - else - try f.writeCValue(w, operand, .Other) + if (is_ptr) + try f.writeCValueDerefMember(w, operand, .{ .identifier = "error" }) else - try f.object.dg.renderValue(w, try pt.intValue(err_int_ty, 0), .Other); - try w.writeByte(' '); - try w.writeAll(operator); - try w.writeByte(' '); - try f.object.dg.renderValue(w, try pt.intValue(err_int_ty, 0), .Other); - try a.end(f, w); + try f.writeCValueMember(w, operand, .{ .identifier = "error" }); + try w.print(" {s} ", .{operator}); + try f.dg.renderValue(w, try pt.intValue(err_int_ty, 0), .other); + try w.writeByte(';'); + try f.newline(); return local; } fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.typeOfIndex(inst); - const ptr_ty = inst_ty.slicePtrFieldType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const operand_ty = f.typeOf(ty_op.operand); const array_ty = operand_ty.childType(zcu); - { - const a = try Assignment.start(f, w, try f.ctypeFromType(ptr_ty, .complete)); - try f.writeCValueMember(w, local, .{ .identifier = "ptr" }); - try a.assign(f, w); - if (operand == .undef) { - try f.writeCValue(w, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Other); - } else { - const ptr_ctype = try f.ctypeFromType(ptr_ty, .complete); - const ptr_child_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype; - const elem_ty = array_ty.childType(zcu); - const elem_ctype = try f.ctypeFromType(elem_ty, .complete); - if (!ptr_child_ctype.eql(elem_ctype)) { - try w.writeByte('('); - try f.renderCType(w, ptr_ctype); - try w.writeByte(')'); - } - const operand_ctype = try f.ctypeFromType(operand_ty, .complete); - const operand_child_ctype = operand_ctype.info(ctype_pool).pointer.elem_ctype; - if (operand_child_ctype.info(ctype_pool) == .array) { - try w.writeByte('&'); - try f.writeCValueDeref(w, operand); - try w.print("[{f}]", .{try f.fmtIntLiteralDec(.zero_usize)}); - } else try f.writeCValue(w, operand, .Other); - } - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, .usize); - try f.writeCValueMember(w, local, .{ .identifier = "len" }); - try a.assign(f, w); - try w.print("{f}", .{ - try f.fmtIntLiteralDec(try pt.intValue(.usize, array_ty.arrayLen(zcu))), - }); - try a.end(f, w); - } + // We have a `*[n]T`, which was turned into to a pointer to `struct { T array[n]; }`. + // Ideally we would want to use 'operand->array' to convert to a `T *` (we get a `T []` + // which decays to a pointer), but if the element type is zero-bit or the array length is + // zero, there will not be an `array` member (the array type lowers to `void`). We cannot + // check the type layout here because it may not be resolved, so in this instance, we must + // use a pointer cast. + try f.writeCValueMember(w, local, .{ .identifier = "ptr" }); + try w.writeAll(" = ("); + try f.dg.renderType(w, inst_ty.slicePtrFieldType(zcu)); + try w.writeByte(')'); + try f.writeCValue(w, operand, .other); + try w.writeByte(';'); + try f.newline(); + + try f.writeCValueMember(w, local, .{ .identifier = "len" }); + try w.print(" = {f}", .{ + try f.fmtIntLiteralDec(try pt.intValue(.usize, array_ty.arrayLen(zcu))), + }); + try w.writeByte(';'); + try f.newline(); return local; } fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -6358,7 +5537,7 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(zcu); - const target = &f.object.dg.mod.resolved_target.result; + const target = &f.dg.mod.resolved_target.result; const operation = if (inst_scalar_ty.isRuntimeFloat() and scalar_ty.isRuntimeFloat()) if (inst_scalar_ty.floatBits(target) < scalar_ty.floatBits(target)) "trunc" else "extend" else if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat()) @@ -6368,16 +5547,15 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { else unreachable; - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(scalar_ty, .complete)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); + try w.writeAll(" = "); if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat()) { try w.writeAll("zig_wrap_"); - try f.object.dg.renderTypeForBuiltinFnName(w, inst_scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, inst_scalar_ty); try w.writeByte('('); } try w.writeAll("zig_"); @@ -6385,14 +5563,15 @@ fn airFloatCast(f: *Function, inst: Air.Inst.Index) !CValue { try w.writeAll(compilerRtAbbrev(scalar_ty, zcu, target)); try w.writeAll(compilerRtAbbrev(inst_scalar_ty, zcu, target)); try w.writeByte('('); - try f.writeCValue(w, operand, .FunctionArgument); + try f.writeCValue(w, operand, .other); try v.elem(f, w); try w.writeByte(')'); if (inst_scalar_ty.isInt(zcu) and scalar_ty.isRuntimeFloat()) { - try f.object.dg.renderBuiltinInfo(w, inst_scalar_ty, .bits); + try f.dg.renderBuiltinInfo(w, inst_scalar_ty, .bits); try w.writeByte(')'); } - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); return local; @@ -6405,7 +5584,7 @@ fn airUnBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const operand = try f.resolveInst(operand_ref); @@ -6415,30 +5594,32 @@ fn airUnBuiltinCall( const operand_ty = f.typeOf(operand_ref); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; + const ref_ret = lowersToBigInt(inst_scalar_ty, zcu); + const ref_arg = lowersToBigInt(scalar_ty, zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); if (!ref_ret) { - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); } try w.print("zig_{s}_", .{operation}); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); if (ref_ret) { - try f.writeCValue(w, local, .FunctionArgument); + try w.writeByte('&'); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(", "); } - try f.writeCValue(w, operand, .FunctionArgument); + if (ref_arg) try w.writeByte('&'); + try f.writeCValue(w, operand, .other); try v.elem(f, w); - try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); + try f.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; @@ -6450,13 +5631,12 @@ fn airBinBuiltinCall( operation: []const u8, info: BuiltinInfo, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); - const operand_ctype = try f.ctypeFromType(operand_ty, .complete); - const is_big = operand_ctype.info(&f.object.dg.ctype_pool) == .array; + const is_big = lowersToBigInt(operand_ty, zcu); const lhs = try f.resolveInst(bin_op.lhs); const rhs = try f.resolveInst(bin_op.rhs); @@ -6466,32 +5646,35 @@ fn airBinBuiltinCall( const inst_scalar_ty = inst_ty.scalarType(zcu); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; + const ref_ret = lowersToBigInt(inst_scalar_ty, zcu); + const ref_arg = lowersToBigInt(scalar_ty, zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); if (is_big) try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); const v = try Vectorize.start(f, inst, w, operand_ty); if (!ref_ret) { - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); } try w.print("zig_{s}_", .{operation}); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); if (ref_ret) { - try f.writeCValue(w, local, .FunctionArgument); + try w.writeByte('&'); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(", "); } - try f.writeCValue(w, lhs, .FunctionArgument); + if (ref_arg) try w.writeByte('&'); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, rhs, .FunctionArgument); + if (ref_arg) try w.writeByte('&'); + try f.writeCValue(w, rhs, .other); if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, w); - try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); + try f.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeAll(");\n"); try v.end(f, inst, w); @@ -6506,7 +5689,7 @@ fn airCmpBuiltinCall( operation: enum { cmp, operator }, info: BuiltinInfo, ) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const lhs = try f.resolveInst(data.lhs); const rhs = try f.resolveInst(data.rhs); @@ -6517,14 +5700,14 @@ fn airCmpBuiltinCall( const operand_ty = f.typeOf(data.lhs); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; + const ref_ret = lowersToBigInt(inst_scalar_ty, zcu); + const ref_arg = lowersToBigInt(scalar_ty, zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, operand_ty); if (!ref_ret) { - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); } @@ -6532,33 +5715,36 @@ fn airCmpBuiltinCall( else => @tagName(operation), .operator => compareOperatorAbbrev(operator), }}); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); if (ref_ret) { - try f.writeCValue(w, local, .FunctionArgument); + try w.writeByte('&'); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(", "); } - try f.writeCValue(w, lhs, .FunctionArgument); + if (ref_arg) try w.writeByte('&'); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, rhs, .FunctionArgument); + if (ref_arg) try w.writeByte('&'); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); - try f.object.dg.renderBuiltinInfo(w, scalar_ty, info); + try f.dg.renderBuiltinInfo(w, scalar_ty, info); try w.writeByte(')'); if (!ref_ret) try w.print("{s}{f}", .{ compareOperatorC(operator), try f.fmtIntLiteralDec(try pt.intValue(.i32, 0)), }); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; } fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.Cmpxchg, ty_pl.payload).data; @@ -6568,9 +5754,8 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const new_value = try f.resolveInst(extra.new_value); const ptr_ty = f.typeOf(extra.ptr); const ty = ptr_ty.childType(zcu); - const ctype = try f.ctypeFromType(ty, .complete); - const w = &f.object.code.writer; + const w = &f.code.writer; const new_value_mat = try Materialize.start(f, inst, ty, new_value); try reap(f, inst, &.{ extra.ptr, extra.expected_value, extra.new_value }); @@ -6581,13 +5766,11 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue const local = try f.allocLocal(inst, inst_ty); if (inst_ty.isPtrLikeOptional(zcu)) { - { - const a = try Assignment.start(f, w, ctype); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try f.writeCValue(w, expected_value, .Other); - try a.end(f, w); - } + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); + try f.writeCValue(w, expected_value, .other); + try w.writeByte(';'); + try f.newline(); try w.writeAll("if ("); try w.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); @@ -6595,9 +5778,9 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try w.writeByte(')'); if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); try w.writeAll(" *)"); - try f.writeCValue(w, ptr, .Other); + try f.writeCValue(w, ptr, .other); try w.writeAll(", "); - try f.writeCValue(w, local, .FunctionArgument); + try f.writeCValue(w, local, .other); try w.writeAll(", "); try new_value_mat.mat(f, w); try w.writeAll(", "); @@ -6605,56 +5788,49 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue try w.writeAll(", "); try writeMemoryOrder(w, extra.failureOrder()); try w.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(w, ty); + try f.dg.renderTypeForBuiltinFnName(w, ty); try w.writeAll(", "); try f.renderType(w, repr_ty); try w.writeByte(')'); try w.writeAll(") {"); - f.object.indent(); - try f.object.newline(); - { - const a = try Assignment.start(f, w, ctype); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); - try w.writeAll("NULL"); - try a.end(f, w); - } - try f.object.outdent(); + f.indent(); + try f.newline(); + + try f.writeCValue(w, local, .other); + try w.writeAll(" = NULL;"); + try f.newline(); + + try f.outdent(); try w.writeByte('}'); - try f.object.newline(); + try f.newline(); } else { - { - const a = try Assignment.start(f, w, ctype); - try f.writeCValueMember(w, local, .{ .identifier = "payload" }); - try a.assign(f, w); - try f.writeCValue(w, expected_value, .Other); - try a.end(f, w); - } - { - const a = try Assignment.start(f, w, .bool); - try f.writeCValueMember(w, local, .{ .identifier = "is_null" }); - try a.assign(f, w); - try w.print("zig_cmpxchg_{s}((zig_atomic(", .{flavor}); - try f.renderType(w, ty); - try w.writeByte(')'); - if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); - try w.writeAll(" *)"); - try f.writeCValue(w, ptr, .Other); - try w.writeAll(", "); - try f.writeCValueMember(w, local, .{ .identifier = "payload" }); - try w.writeAll(", "); - try new_value_mat.mat(f, w); - try w.writeAll(", "); - try writeMemoryOrder(w, extra.successOrder()); - try w.writeAll(", "); - try writeMemoryOrder(w, extra.failureOrder()); - try w.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(w, ty); - try w.writeAll(", "); - try f.renderType(w, repr_ty); - try w.writeByte(')'); - try a.end(f, w); - } + try f.writeCValueMember(w, local, .{ .identifier = "payload" }); + try w.writeAll(" = "); + try f.writeCValue(w, expected_value, .other); + try w.writeByte(';'); + try f.newline(); + + try f.writeCValueMember(w, local, .{ .identifier = "is_null" }); + try w.print(" = zig_cmpxchg_{s}((zig_atomic(", .{flavor}); + try f.renderType(w, ty); + try w.writeByte(')'); + if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); + try w.writeAll(" *)"); + try f.writeCValue(w, ptr, .other); + try w.writeAll(", "); + try f.writeCValueMember(w, local, .{ .identifier = "payload" }); + try w.writeAll(", "); + try new_value_mat.mat(f, w); + try w.writeAll(", "); + try writeMemoryOrder(w, extra.successOrder()); + try w.writeAll(", "); + try writeMemoryOrder(w, extra.failureOrder()); + try w.writeAll(", "); + try f.dg.renderTypeForBuiltinFnName(w, ty); + try w.writeAll(", "); + try f.renderType(w, repr_ty); + try w.writeAll(");"); + try f.newline(); } try new_value_mat.end(f, inst); @@ -6667,7 +5843,7 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue } fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = f.air.extraData(Air.AtomicRmw, pl_op.payload).data; @@ -6677,7 +5853,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(pl_op.operand); const operand = try f.resolveInst(extra.operand); - const w = &f.object.code.writer; + const w = &f.code.writer; const operand_mat = try Materialize.start(f, inst, ty, operand); try reap(f, inst, &.{ pl_op.operand, extra.operand }); @@ -6690,7 +5866,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { try w.print("zig_atomicrmw_{s}", .{toAtomicRmwSuffix(extra.op())}); if (is_float) try w.writeAll("_float") else if (is_128) try w.writeAll("_int128"); try w.writeByte('('); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(", ("); const use_atomic = switch (extra.op()) { else => true, @@ -6702,17 +5878,17 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { if (use_atomic) try w.writeByte(')'); if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); try w.writeAll(" *)"); - try f.writeCValue(w, ptr, .Other); + try f.writeCValue(w, ptr, .other); try w.writeAll(", "); try operand_mat.mat(f, w); try w.writeAll(", "); try writeMemoryOrder(w, extra.ordering()); try w.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(w, ty); + try f.dg.renderTypeForBuiltinFnName(w, ty); try w.writeAll(", "); try f.renderType(w, repr_ty); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); try operand_mat.end(f, inst); if (f.liveness.isUnused(inst)) { @@ -6724,7 +5900,7 @@ fn airAtomicRmw(f: *Function, inst: Air.Inst.Index) !CValue { } fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const atomic_load = f.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; const ptr = try f.resolveInst(atomic_load.ptr); @@ -6738,31 +5914,31 @@ fn airAtomicLoad(f: *Function, inst: Air.Inst.Index) !CValue { ty; const inst_ty = f.typeOfIndex(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); try w.writeAll("zig_atomic_load("); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(", (zig_atomic("); try f.renderType(w, ty); try w.writeByte(')'); if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); try w.writeAll(" *)"); - try f.writeCValue(w, ptr, .Other); + try f.writeCValue(w, ptr, .other); try w.writeAll(", "); try writeMemoryOrder(w, atomic_load.order); try w.writeAll(", "); - try f.object.dg.renderTypeForBuiltinFnName(w, ty); + try f.dg.renderTypeForBuiltinFnName(w, ty); try w.writeAll(", "); try f.renderType(w, repr_ty); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = f.typeOf(bin_op.lhs); @@ -6770,7 +5946,7 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa const ptr = try f.resolveInst(bin_op.lhs); const element = try f.resolveInst(bin_op.rhs); - const w = &f.object.code.writer; + const w = &f.code.writer; const element_mat = try Materialize.start(f, inst, ty, element); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); @@ -6784,32 +5960,22 @@ fn airAtomicStore(f: *Function, inst: Air.Inst.Index, order: [*:0]const u8) !CVa try w.writeByte(')'); if (ptr_ty.isVolatilePtr(zcu)) try w.writeAll(" volatile"); try w.writeAll(" *)"); - try f.writeCValue(w, ptr, .Other); + try f.writeCValue(w, ptr, .other); try w.writeAll(", "); try element_mat.mat(f, w); try w.print(", {s}, ", .{order}); - try f.object.dg.renderTypeForBuiltinFnName(w, ty); + try f.dg.renderTypeForBuiltinFnName(w, ty); try w.writeAll(", "); try f.renderType(w, repr_ty); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); try element_mat.end(f, inst); return .none; } -fn writeSliceOrPtr(f: *Function, w: *Writer, ptr: CValue, ptr_ty: Type) !void { - const pt = f.object.dg.pt; - const zcu = pt.zcu; - if (ptr_ty.isSlice(zcu)) { - try f.writeCValueMember(w, ptr, .{ .identifier = "ptr" }); - } else { - try f.writeCValue(w, ptr, .FunctionArgument); - } -} - fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ty = f.typeOf(bin_op.lhs); @@ -6818,7 +5984,7 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { const elem_ty = f.typeOf(bin_op.rhs); const elem_abi_size = elem_ty.abiSize(zcu); const val_is_undef = if (try f.air.value(bin_op.rhs, pt)) |val| val.isUndef(zcu) else false; - const w = &f.object.code.writer; + const w = &f.code.writer; if (val_is_undef) { if (!safety) { @@ -6832,153 +5998,128 @@ fn airMemset(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { try f.writeCValueMember(w, dest_slice, .{ .identifier = "ptr" }); try w.writeAll(", 0xaa, "); try f.writeCValueMember(w, dest_slice, .{ .identifier = "len" }); - if (elem_abi_size > 1) { - try w.print(" * {d}", .{elem_abi_size}); - } - try w.writeAll(");"); - try f.object.newline(); }, .one => { - const array_ty = dest_ty.childType(zcu); - const len = array_ty.arrayLen(zcu) * elem_abi_size; - - try f.writeCValue(w, dest_slice, .FunctionArgument); - try w.print(", 0xaa, {d});", .{len}); - try f.object.newline(); + try f.writeCValue(w, dest_slice, .other); + try w.print(", 0xaa, {d}", .{dest_ty.childType(zcu).arrayLen(zcu)}); }, .many, .c => unreachable, } + if (elem_abi_size > 0) try w.print(" * {d}", .{elem_abi_size}); + try w.writeAll(");"); + try f.newline(); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } - if (elem_abi_size > 1 or dest_ty.isVolatilePtr(zcu)) { - // For the assignment in this loop, the array pointer needs to get - // casted to a regular pointer, otherwise an error like this occurs: - // error: array type 'uint32_t[20]' (aka 'unsigned int[20]') is not assignable - const elem_ptr_ty = try pt.ptrType(.{ - .child = elem_ty.toIntern(), - .flags = .{ - .size = .c, - }, - }); - - const index = try f.allocLocal(inst, .usize); - - try w.writeAll("for ("); - try f.writeCValue(w, index, .Other); - try w.writeAll(" = "); - try f.object.dg.renderValue(w, .zero_usize, .Other); - try w.writeAll("; "); - try f.writeCValue(w, index, .Other); - try w.writeAll(" != "); + if (elem_abi_size == 1 and !dest_ty.isVolatilePtr(zcu)) { + const bitcasted = try bitcast(f, .u8, value, elem_ty); + try w.writeAll("memset("); switch (dest_ty.ptrSize(zcu)) { .slice => { + try f.writeCValueMember(w, dest_slice, .{ .identifier = "ptr" }); + try w.writeAll(", "); + try f.writeCValue(w, bitcasted, .other); + try w.writeAll(", "); try f.writeCValueMember(w, dest_slice, .{ .identifier = "len" }); }, .one => { - const array_ty = dest_ty.childType(zcu); - try w.print("{d}", .{array_ty.arrayLen(zcu)}); + try f.writeCValue(w, dest_slice, .other); + try w.writeAll(", "); + try f.writeCValue(w, bitcasted, .other); + try w.print(", {d}", .{dest_ty.childType(zcu).arrayLen(zcu)}); }, .many, .c => unreachable, } - try w.writeAll("; ++"); - try f.writeCValue(w, index, .Other); - try w.writeAll(") "); - - const a = try Assignment.start(f, w, try f.ctypeFromType(elem_ty, .complete)); - try w.writeAll("(("); - try f.renderType(w, elem_ptr_ty); - try w.writeByte(')'); - try writeSliceOrPtr(f, w, dest_slice, dest_ty); - try w.writeAll(")["); - try f.writeCValue(w, index, .Other); - try w.writeByte(']'); - try a.assign(f, w); - try f.writeCValue(w, value, .Other); - try a.end(f, w); - + try w.writeAll(");"); + try f.newline(); + try f.freeCValue(inst, bitcasted); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); - try freeLocal(f, inst, index.new_local, null); - return .none; } - const bitcasted = try bitcast(f, .u8, value, elem_ty); + // Fallback path: use a `for` loop. - try w.writeAll("memset("); + const index = try f.allocLocal(inst, .usize); + + try w.writeAll("for ("); + try f.writeCValue(w, index, .other); + try w.writeAll(" = "); + try f.dg.renderValue(w, .zero_usize, .other); + try w.writeAll("; "); + try f.writeCValue(w, index, .other); + try w.writeAll(" != "); switch (dest_ty.ptrSize(zcu)) { - .slice => { - try f.writeCValueMember(w, dest_slice, .{ .identifier = "ptr" }); - try w.writeAll(", "); - try f.writeCValue(w, bitcasted, .FunctionArgument); - try w.writeAll(", "); - try f.writeCValueMember(w, dest_slice, .{ .identifier = "len" }); - try w.writeAll(");"); - try f.object.newline(); - }, - .one => { - const array_ty = dest_ty.childType(zcu); - const len = array_ty.arrayLen(zcu) * elem_abi_size; - - try f.writeCValue(w, dest_slice, .FunctionArgument); - try w.writeAll(", "); - try f.writeCValue(w, bitcasted, .FunctionArgument); - try w.print(", {d});", .{len}); - try f.object.newline(); - }, + .slice => try f.writeCValueMember(w, dest_slice, .{ .identifier = "len" }), + .one => try w.print("{d}", .{dest_ty.childType(zcu).arrayLen(zcu)}), .many, .c => unreachable, } - try f.freeCValue(inst, bitcasted); + try w.writeAll("; ++"); + try f.writeCValue(w, index, .other); + try w.writeAll(") "); + + switch (dest_ty.ptrSize(zcu)) { + .slice => try f.writeCValueMember(w, dest_slice, .{ .identifier = "ptr" }), + .one => try f.writeCValueDerefMember(w, dest_slice, .{ .identifier = "array" }), + .many, .c => unreachable, + } + try w.writeByte('['); + try f.writeCValue(w, index, .other); + try w.writeAll("] = "); + try f.writeCValue(w, value, .other); + try w.writeByte(';'); + try f.newline(); + try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); + try freeLocal(f, inst, index.new_local, null); + return .none; } fn airMemcpy(f: *Function, inst: Air.Inst.Index, function_paren: []const u8) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const dest_ptr = try f.resolveInst(bin_op.lhs); const src_ptr = try f.resolveInst(bin_op.rhs); const dest_ty = f.typeOf(bin_op.lhs); const src_ty = f.typeOf(bin_op.rhs); - const w = &f.object.code.writer; + const w = &f.code.writer; if (dest_ty.ptrSize(zcu) != .one) { try w.writeAll("if ("); - try writeArrayLen(f, dest_ptr, dest_ty); + try f.writeCValueMember(w, dest_ptr, .{ .identifier = "len" }); try w.writeAll(" != 0) "); } try w.writeAll(function_paren); - try writeSliceOrPtr(f, w, dest_ptr, dest_ty); + switch (dest_ty.ptrSize(zcu)) { + .slice => try f.writeCValueMember(w, dest_ptr, .{ .identifier = "ptr" }), + .one => try f.writeCValueDerefMember(w, dest_ptr, .{ .identifier = "array" }), + .many, .c => unreachable, + } try w.writeAll(", "); - try writeSliceOrPtr(f, w, src_ptr, src_ty); + switch (src_ty.ptrSize(zcu)) { + .slice => try f.writeCValueMember(w, src_ptr, .{ .identifier = "ptr" }), + .one => try f.writeCValueDerefMember(w, src_ptr, .{ .identifier = "array" }), + .many, .c => try f.writeCValue(w, src_ptr, .other), + } try w.writeAll(", "); - try writeArrayLen(f, dest_ptr, dest_ty); + switch (dest_ty.ptrSize(zcu)) { + .slice => try f.writeCValueMember(w, dest_ptr, .{ .identifier = "len" }), + .one => try w.print("{d}", .{dest_ty.childType(zcu).arrayLen(zcu)}), + .many, .c => unreachable, + } try w.writeAll(" * sizeof("); try f.renderType(w, dest_ty.indexableElem(zcu)); try w.writeAll("));"); - try f.object.newline(); + try f.newline(); try reap(f, inst, &.{ bin_op.lhs, bin_op.rhs }); return .none; } -fn writeArrayLen(f: *Function, dest_ptr: CValue, dest_ty: Type) !void { - const pt = f.object.dg.pt; - const zcu = pt.zcu; - const w = &f.object.code.writer; - switch (dest_ty.ptrSize(zcu)) { - .one => try w.print("{f}", .{ - try f.fmtIntLiteralDec(try pt.intValue(.usize, dest_ty.childType(zcu).arrayLen(zcu))), - }), - .many, .c => unreachable, - .slice => try f.writeCValueMember(w, dest_ptr, .{ .identifier = "len" }), - } -} - fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const union_ptr = try f.resolveInst(bin_op.lhs); @@ -6988,19 +6129,18 @@ fn airSetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { const union_ty = f.typeOf(bin_op.lhs).childType(zcu); const layout = union_ty.unionGetLayout(zcu); if (layout.tag_size == 0) return .none; - const tag_ty = union_ty.unionTagTypeRuntime(zcu).?; - const w = &f.object.code.writer; - const a = try Assignment.start(f, w, try f.ctypeFromType(tag_ty, .complete)); + const w = &f.code.writer; try f.writeCValueDerefMember(w, union_ptr, .{ .identifier = "tag" }); - try a.assign(f, w); - try f.writeCValue(w, new_tag, .Other); - try a.end(f, w); + try w.writeAll(" = "); + try f.writeCValue(w, new_tag, .other); + try w.writeByte(';'); + try f.newline(); return .none; } fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -7012,17 +6152,20 @@ fn airGetUnionTag(f: *Function, inst: Air.Inst.Index) !CValue { if (layout.tag_size == 0) return .none; const inst_ty = f.typeOfIndex(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_ty, .complete)); - try f.writeCValue(w, local, .Other); - try a.assign(f, w); + try f.writeCValue(w, local, .other); + try w.writeAll(" = "); try f.writeCValueMember(w, operand, .{ .identifier = "tag" }); - try a.end(f, w); + try w.writeByte(';'); + try f.newline(); return local; } fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { + const zcu = f.dg.pt.zcu; + const ip = &zcu.intern_pool; + const gpa = zcu.comp.gpa; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const inst_ty = f.typeOfIndex(inst); @@ -7030,15 +6173,17 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); - try w.print(" = {s}(", .{ - try f.getLazyFnName(.{ .tag_name = enum_ty.toIntern() }), + try f.writeCValue(w, local, .other); + try f.need_tag_name_funcs.put(gpa, enum_ty.toIntern(), {}); + try w.print(" = zig_tagName_{f}__{d}(", .{ + fmtIdentUnsolo(enum_ty.containerTypeName(ip).toSlice(ip)), + @intFromEnum(enum_ty.toIntern()), }); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } @@ -7046,40 +6191,37 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue { fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = zig_errorName["); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try w.writeAll(" - 1];"); - try f.object.newline(); + try f.newline(); return local; } fn airSplat(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; - const zcu = pt.zcu; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); const inst_ty = f.typeOfIndex(inst); - const inst_scalar_ty = inst_ty.scalarType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, inst_ty); - const a = try Assignment.start(f, w, try f.ctypeFromType(inst_scalar_ty, .complete)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); - try a.assign(f, w); - try f.writeCValue(w, operand, .Other); - try a.end(f, w); + try w.writeAll(" = "); + try f.writeCValue(w, operand, .other); + try w.writeByte(';'); + try f.newline(); try v.end(f, inst, w); return local; @@ -7096,29 +6238,29 @@ fn airSelect(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = "); - try f.writeCValue(w, pred, .Other); + try f.writeCValue(w, pred, .other); try v.elem(f, w); try w.writeAll(" ? "); - try f.writeCValue(w, lhs, .Other); + try f.writeCValue(w, lhs, .other); try v.elem(f, w); try w.writeAll(" : "); - try f.writeCValue(w, rhs, .Other); + try f.writeCValue(w, rhs, .other); try v.elem(f, w); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; } fn airShuffleOne(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const unwrapped = f.air.unwrapShuffleOne(zcu, inst); @@ -7126,22 +6268,22 @@ fn airShuffleOne(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(unwrapped.operand); const inst_ty = unwrapped.result_ty; - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{unwrapped.operand}); // local cannot alias operand for (mask, 0..) |mask_elem, out_idx| { - try f.writeCValue(w, local, .Other); + try f.writeCValueMember(w, local, .{ .identifier = "array" }); try w.writeByte('['); - try f.object.dg.renderValue(w, try pt.intValue(.usize, out_idx), .Other); + try f.dg.renderValue(w, try pt.intValue(.usize, out_idx), .other); try w.writeAll("] = "); switch (mask_elem.unwrap()) { .elem => |src_idx| { - try f.writeCValue(w, operand, .Other); + try f.writeCValueMember(w, operand, .{ .identifier = "array" }); try w.writeByte('['); - try f.object.dg.renderValue(w, try pt.intValue(.usize, src_idx), .Other); + try f.dg.renderValue(w, try pt.intValue(.usize, src_idx), .other); try w.writeByte(']'); }, - .value => |val| try f.object.dg.renderValue(w, .fromInterned(val), .Other), + .value => |val| try f.dg.renderValue(w, .fromInterned(val), .other), } try w.writeAll(";\n"); } @@ -7150,7 +6292,7 @@ fn airShuffleOne(f: *Function, inst: Air.Inst.Index) !CValue { } fn airShuffleTwo(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const unwrapped = f.air.unwrapShuffleTwo(zcu, inst); @@ -7160,38 +6302,38 @@ fn airShuffleTwo(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = unwrapped.result_ty; const elem_ty = inst_ty.childType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); try reap(f, inst, &.{ unwrapped.operand_a, unwrapped.operand_b }); // local cannot alias operands for (mask, 0..) |mask_elem, out_idx| { - try f.writeCValue(w, local, .Other); + try f.writeCValueMember(w, local, .{ .identifier = "array" }); try w.writeByte('['); - try f.object.dg.renderValue(w, try pt.intValue(.usize, out_idx), .Other); + try f.dg.renderValue(w, try pt.intValue(.usize, out_idx), .other); try w.writeAll("] = "); switch (mask_elem.unwrap()) { .a_elem => |src_idx| { - try f.writeCValue(w, operand_a, .Other); + try f.writeCValueMember(w, operand_a, .{ .identifier = "array" }); try w.writeByte('['); - try f.object.dg.renderValue(w, try pt.intValue(.usize, src_idx), .Other); + try f.dg.renderValue(w, try pt.intValue(.usize, src_idx), .other); try w.writeByte(']'); }, .b_elem => |src_idx| { - try f.writeCValue(w, operand_b, .Other); + try f.writeCValueMember(w, operand_b, .{ .identifier = "array" }); try w.writeByte('['); - try f.object.dg.renderValue(w, try pt.intValue(.usize, src_idx), .Other); + try f.dg.renderValue(w, try pt.intValue(.usize, src_idx), .other); try w.writeByte(']'); }, - .undef => try f.object.dg.renderUndefValue(w, elem_ty, .Other), + .undef => try f.dg.renderUndefValue(w, elem_ty, .other), } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); } return local; } fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const reduce = f.air.instructions.items(.data)[@intFromEnum(inst)].reduce; @@ -7199,7 +6341,7 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(reduce.operand); try reap(f, inst, &.{reduce.operand}); const operand_ty = f.typeOf(reduce.operand); - const w = &f.object.code.writer; + const w = &f.code.writer; const use_operator = scalar_ty.bitSize(zcu) <= 64; const op: union(enum) { @@ -7246,10 +6388,10 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { // } const accum = try f.allocLocal(inst, scalar_ty); - try f.writeCValue(w, accum, .Other); + try f.writeCValue(w, accum, .other); try w.writeAll(" = "); - try f.object.dg.renderValue(w, switch (reduce.operation) { + try f.dg.renderValue(w, switch (reduce.operation) { .Or, .Xor => switch (scalar_ty.zigTypeTag(zcu)) { .bool => Value.false, .int => try pt.intValue(scalar_ty, 0), @@ -7285,58 +6427,58 @@ fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue { .float => try pt.floatValue(scalar_ty, std.math.nan(f128)), else => unreachable, }, - }, .Other); + }, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); const v = try Vectorize.start(f, inst, w, operand_ty); - try f.writeCValue(w, accum, .Other); + try f.writeCValue(w, accum, .other); switch (op) { .builtin => |func| { try w.print(" = zig_{s}_", .{func.operation}); - try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, scalar_ty); try w.writeByte('('); - try f.writeCValue(w, accum, .FunctionArgument); + try f.writeCValue(w, accum, .other); try w.writeAll(", "); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); - try f.object.dg.renderBuiltinInfo(w, scalar_ty, func.info); + try f.dg.renderBuiltinInfo(w, scalar_ty, func.info); try w.writeByte(')'); }, .infix => |ass| { try w.writeAll(ass); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); }, .ternary => |cmp| { try w.writeAll(" = "); - try f.writeCValue(w, accum, .Other); + try f.writeCValue(w, accum, .other); try w.writeAll(cmp); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); try w.writeAll(" ? "); - try f.writeCValue(w, accum, .Other); + try f.writeCValue(w, accum, .other); try w.writeAll(" : "); - try f.writeCValue(w, operand, .Other); + try f.writeCValue(w, operand, .other); try v.elem(f, w); }, } try w.writeByte(';'); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return accum; } fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const inst_ty = f.typeOfIndex(inst); const len: usize = @intCast(inst_ty.arrayLen(zcu)); const elements: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[ty_pl.payload..][0..len]); - const gpa = f.object.dg.gpa; + const gpa = f.dg.gpa; const resolved_elements = try gpa.alloc(CValue, elements.len); defer gpa.free(resolved_elements); for (resolved_elements, elements) |*resolved_element, element| { @@ -7349,28 +6491,23 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } } - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); switch (ip.indexToKey(inst_ty.toIntern())) { inline .array_type, .vector_type => |info, tag| { - const a: Assignment = .{ - .ctype = try f.ctypeFromType(.fromInterned(info.child), .complete), - }; for (resolved_elements, 0..) |element, i| { - try a.restart(f, w); - try f.writeCValue(w, local, .Other); - try w.print("[{d}]", .{i}); - try a.assign(f, w); - try f.writeCValue(w, element, .Other); - try a.end(f, w); + try f.writeCValueMember(w, local, .{ .identifier = "array" }); + try w.print("[{d}] = ", .{i}); + try f.writeCValue(w, element, .other); + try w.writeByte(';'); + try f.newline(); } if (tag == .array_type and info.sentinel != .none) { - try a.restart(f, w); - try f.writeCValue(w, local, .Other); - try w.print("[{d}]", .{info.len}); - try a.assign(f, w); - try f.object.dg.renderValue(w, Value.fromInterned(info.sentinel), .Other); - try a.end(f, w); + try f.writeCValueMember(w, local, .{ .identifier = "array" }); + try w.print("[{d}] = ", .{info.len}); + try f.dg.renderValue(w, Value.fromInterned(info.sentinel), .other); + try w.writeByte(';'); + try f.newline(); } }, .struct_type => { @@ -7382,11 +6519,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBits(zcu)) continue; - const a = try Assignment.start(f, w, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(w, local, .{ .identifier = loaded_struct.field_names.get(ip)[field_index].toSlice(ip) }); - try a.assign(f, w); - try f.writeCValue(w, resolved_elements[field_index], .Other); - try a.end(f, w); + try w.writeAll(" = "); + try f.writeCValue(w, resolved_elements[field_index], .other); + try w.writeByte(';'); + try f.newline(); } }, .@"packed" => unreachable, // `Air.Legalize.Feature.expand_packed_struct_init` handles this case @@ -7397,11 +6534,11 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const field_ty: Type = .fromInterned(tuple_info.types.get(ip)[field_index]); if (!field_ty.hasRuntimeBits(zcu)) continue; - const a = try Assignment.start(f, w, try f.ctypeFromType(field_ty, .complete)); try f.writeCValueMember(w, local, .{ .field = field_index }); - try a.assign(f, w); - try f.writeCValue(w, resolved_elements[field_index], .Other); - try a.end(f, w); + try w.writeAll(" = "); + try f.writeCValue(w, resolved_elements[field_index], .other); + try w.writeByte(';'); + try f.newline(); }, else => unreachable, } @@ -7410,46 +6547,52 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { } fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; + const field_index = extra.field_index; const union_ty = f.typeOfIndex(inst); const loaded_union = ip.loadUnionType(union_ty.toIntern()); - const field_name = ip.loadEnumType(loaded_union.enum_tag_type).field_names.get(ip)[extra.field_index]; - const payload_ty = f.typeOf(extra.init); + const loaded_enum = ip.loadEnumType(loaded_union.enum_tag_type); + const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); - const w = &f.object.code.writer; + const w = &f.code.writer; if (loaded_union.layout == .@"packed") return f.moveCValue(inst, union_ty, payload); const local = try f.allocLocal(inst, union_ty); - const field: CValue = if (union_ty.unionTagTypeRuntime(zcu)) |tag_ty| field: { - assert(union_ty.unionGetLayout(zcu).tag_size != 0); - const field_index = tag_ty.enumFieldIndex(field_name, zcu).?; - const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); - const a = try Assignment.start(f, w, try f.ctypeFromType(tag_ty, .complete)); + if (loaded_union.has_runtime_tag) { try f.writeCValueMember(w, local, .{ .identifier = "tag" }); - try a.assign(f, w); - try w.print("{f}", .{try f.fmtIntLiteralDec(tag_val.intFromEnum(zcu))}); - try a.end(f, w); - break :field .{ .payload_identifier = field_name.toSlice(ip) }; - } else .{ .identifier = field_name.toSlice(ip) }; + if (loaded_enum.field_values.len == 0) { + // auto-numbered + try w.print(" = {d};", .{field_index}); + } else { + const tag_int_val: Value = .fromInterned(loaded_enum.field_values.get(ip)[field_index]); + try w.print(" = {f};", .{try f.fmtIntLiteralDec(tag_int_val)}); + } + try f.newline(); + } - const a = try Assignment.start(f, w, try f.ctypeFromType(payload_ty, .complete)); - try f.writeCValueMember(w, local, field); - try a.assign(f, w); - try f.writeCValue(w, payload, .Other); - try a.end(f, w); + const field_name_slice = loaded_enum.field_names.get(ip)[field_index].toSlice(ip); + switch (loaded_union.layout) { + .auto => try f.writeCValueMember(w, local, .{ .payload_identifier = field_name_slice }), + .@"extern" => try f.writeCValueMember(w, local, .{ .identifier = field_name_slice }), + .@"packed" => unreachable, + } + try w.writeAll(" = "); + try f.writeCValue(w, payload, .other); + try w.writeByte(';'); + try f.newline(); return local; } fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const prefetch = f.air.instructions.items(.data)[@intFromEnum(inst)].prefetch; @@ -7457,16 +6600,16 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { const ptr = try f.resolveInst(prefetch.ptr); try reap(f, inst, &.{prefetch.ptr}); - const w = &f.object.code.writer; + const w = &f.code.writer; switch (prefetch.cache) { .data => { try w.writeAll("zig_prefetch("); if (ptr_ty.isSlice(zcu)) try f.writeCValueMember(w, ptr, .{ .identifier = "ptr" }) else - try f.writeCValue(w, ptr, .FunctionArgument); + try f.writeCValue(w, ptr, .other); try w.print(", {d}, {d});", .{ @intFromEnum(prefetch.rw), prefetch.locality }); - try f.object.newline(); + try f.newline(); }, // The available prefetch intrinsics do not accept a cache argument; only // address, rw, and locality. @@ -7479,14 +6622,14 @@ fn airPrefetch(f: *Function, inst: Air.Inst.Index) !CValue { fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const inst_ty = f.typeOfIndex(inst); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = "); try w.print("zig_wasm_memory_size({d});", .{pl_op.payload}); - try f.object.newline(); + try f.newline(); return local; } @@ -7494,23 +6637,23 @@ fn airWasmMemorySize(f: *Function, inst: Air.Inst.Index) !CValue { fn airWasmMemoryGrow(f: *Function, inst: Air.Inst.Index) !CValue { const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; - const w = &f.object.code.writer; + const w = &f.code.writer; const inst_ty = f.typeOfIndex(inst); const operand = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = "); try w.print("zig_wasm_memory_grow({d}, ", .{pl_op.payload}); - try f.writeCValue(w, operand, .FunctionArgument); + try f.writeCValue(w, operand, .other); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const bin_op = f.air.extraData(Air.Bin, pl_op.payload).data; @@ -7523,24 +6666,24 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); const v = try Vectorize.start(f, inst, w, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try v.elem(f, w); try w.writeAll(" = zig_fma_"); - try f.object.dg.renderTypeForBuiltinFnName(w, inst_scalar_ty); + try f.dg.renderTypeForBuiltinFnName(w, inst_scalar_ty); try w.writeByte('('); - try f.writeCValue(w, mulend1, .FunctionArgument); + try f.writeCValue(w, mulend1, .other); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, mulend2, .FunctionArgument); + try f.writeCValue(w, mulend2, .other); try v.elem(f, w); try w.writeAll(", "); - try f.writeCValue(w, addend, .FunctionArgument); + try f.writeCValue(w, addend, .other); try v.elem(f, w); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); try v.end(f, inst, w); return local; @@ -7548,34 +6691,33 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue { fn airRuntimeNavPtr(f: *Function, inst: Air.Inst.Index) !CValue { const ty_nav = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav; - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, .fromInterned(ty_nav.ty)); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = "); - try f.object.dg.renderNav(w, ty_nav.nav, .Other); + try f.dg.renderNav(w, ty_nav.nav, .other); try w.writeByte(';'); - try f.object.newline(); + try f.newline(); return local; } fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); - const function_ty = zcu.navValue(f.object.dg.pass.nav).typeOf(zcu); - const function_info = (try f.ctypeFromType(function_ty, .complete)).info(&f.object.dg.ctype_pool).function; - assert(function_info.varargs); - const w = &f.object.code.writer; + assert(Value.fromInterned(f.func_index).typeOf(zcu).fnIsVarArgs(zcu)); + + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); try w.writeAll("va_start(*(va_list *)&"); - try f.writeCValue(w, local, .Other); - if (function_info.param_ctypes.len > 0) { + try f.writeCValue(w, local, .other); + if (f.next_arg_index > 0) { try w.writeAll(", "); - try f.writeCValue(w, .{ .arg = function_info.param_ctypes.len - 1 }, .FunctionArgument); + try f.writeCValue(w, .{ .arg = f.next_arg_index - 1 }, .other); } try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } @@ -7586,15 +6728,15 @@ fn airCVaArg(f: *Function, inst: Air.Inst.Index) !CValue { const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(" = va_arg(*(va_list *)"); - try f.writeCValue(w, va_list, .Other); + try f.writeCValue(w, va_list, .other); try w.writeAll(", "); try f.renderType(w, ty_op.ty.toType()); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } @@ -7604,11 +6746,11 @@ fn airCVaEnd(f: *Function, inst: Air.Inst.Index) !CValue { const va_list = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); - const w = &f.object.code.writer; + const w = &f.code.writer; try w.writeAll("va_end(*(va_list *)"); - try f.writeCValue(w, va_list, .Other); + try f.writeCValue(w, va_list, .other); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return .none; } @@ -7619,14 +6761,14 @@ fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue { const va_list = try f.resolveInst(ty_op.operand); try reap(f, inst, &.{ty_op.operand}); - const w = &f.object.code.writer; + const w = &f.code.writer; const local = try f.allocLocal(inst, inst_ty); try w.writeAll("va_copy(*(va_list *)&"); - try f.writeCValue(w, local, .Other); + try f.writeCValue(w, local, .other); try w.writeAll(", *(va_list *)"); - try f.writeCValue(w, va_list, .Other); + try f.writeCValue(w, va_list, .other); try w.writeAll(");"); - try f.object.newline(); + try f.newline(); return local; } @@ -7943,103 +7085,193 @@ fn undefPattern(comptime IntType: type) IntType { const FormatIntLiteralContext = struct { dg: *DeclGen, - int_info: InternPool.Key.IntType, - kind: CType.Kind, - ctype: CType, + loc: ValueRenderLocation, val: Value, + cty: CType, base: u8, case: std.fmt.Case, }; fn formatIntLiteral(data: FormatIntLiteralContext, w: *Writer) Writer.Error!void { - const pt = data.dg.pt; - const zcu = pt.zcu; - const target = &data.dg.mod.resolved_target.result; - const ctype_pool = &data.dg.ctype_pool; + const dg = data.dg; + const zcu = dg.pt.zcu; + const target = &dg.mod.resolved_target.result; - const ExpectedContents = struct { - const base = 10; - const bits = 128; - const limbs_count = BigInt.calcTwosCompLimbCount(bits); + const val = data.val; + const ty = val.typeOf(zcu); - undef_limbs: [limbs_count]BigIntLimb, - wrap_limbs: [limbs_count]BigIntLimb, - to_string_buf: [bits]u8, - to_string_limbs: [BigInt.calcToStringLimbsBufferLen(limbs_count, base)]BigIntLimb, - }; - var stack align(@alignOf(ExpectedContents)) = - std.heap.stackFallback(@sizeOf(ExpectedContents), data.dg.gpa); - const allocator = stack.get(); + assert(!val.isUndef(zcu)); - var undef_limbs: []BigIntLimb = &.{}; - defer allocator.free(undef_limbs); + var space: Value.BigIntSpace = undefined; + const val_bigint = val.toBigInt(&space, zcu); - var int_buf: Value.BigIntSpace = undefined; - const int = if (data.val.isUndef(zcu)) blk: { - undef_limbs = allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(data.int_info.bits)) catch return error.WriteFailed; - @memset(undef_limbs, undefPattern(BigIntLimb)); + switch (CType.classifyInt(ty, zcu)) { + .void => unreachable, // opv + .small => |int_cty| return FormatInt128.format(.{ + .target = zcu.getTarget(), + .int_cty = int_cty, + .val = val_bigint, + .is_global = data.loc == .static_initializer, + .base = data.base, + .case = data.case, + }, w), + .big => |big| { + if (!data.loc.isInitializer()) { + // Use `CType.fmtTypeName` directly to avoid the possibility of `error.OutOfMemory`. + try w.print("({f})", .{data.cty.fmtTypeName(zcu)}); + } - var undef_int = BigInt.Mutable{ - .limbs = undef_limbs, - .len = undef_limbs.len, - .positive = true, - }; - undef_int.truncate(undef_int.toConst(), data.int_info.signedness, data.int_info.bits); - break :blk undef_int.toConst(); - } else data.val.toBigInt(&int_buf, zcu); - assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); + try w.writeAll("{{"); - const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); - var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined; - const one = BigInt.Mutable.init(&one_limbs, 1).toConst(); + var limb_buf: [std.math.big.int.calcTwosCompLimbCount(65535)]std.math.big.Limb = undefined; + for (0..big.limbs_len) |limb_index| { + if (limb_index != 0) try w.writeAll(", "); + const limb_bit_offset: u64 = switch (target.cpu.arch.endian()) { + .little => limb_index * big.limb_size.bits(), + .big => (big.limbs_len - limb_index - 1) * big.limb_size.bits(), + }; + var limb_bigint: std.math.big.int.Mutable = .{ + .limbs = &limb_buf, + .len = undefined, + .positive = undefined, + }; + limb_bigint.shiftRight(val_bigint, limb_bit_offset); + limb_bigint.truncate(limb_bigint.toConst(), .unsigned, big.limb_size.bits()); + try FormatInt128.format(.{ + .target = zcu.getTarget(), + .int_cty = big.limb_size.unsigned(), + .val = limb_bigint.toConst(), + .is_global = data.loc == .static_initializer, + .base = data.base, + .case = data.case, + }, w); + } - var wrap = BigInt.Mutable{ - .limbs = allocator.alloc(BigIntLimb, BigInt.calcTwosCompLimbCount(c_bits)) catch return error.WriteFailed, - .len = undefined, - .positive = undefined, - }; - defer allocator.free(wrap.limbs); - - const c_limb_info: struct { - ctype: CType, - count: usize, - endian: std.builtin.Endian, - homogeneous: bool, - } = switch (data.ctype.info(ctype_pool)) { - .basic => |basic_info| switch (basic_info) { - else => .{ - .ctype = .void, - .count = 1, - .endian = .little, - .homogeneous = true, - }, - .zig_u128, .zig_i128 => .{ - .ctype = .u64, - .count = 2, - .endian = .big, - .homogeneous = false, - }, + try w.writeAll("}}"); }, - .array => |array_info| .{ - .ctype = array_info.elem_ctype, - .count = @intCast(array_info.len), - .endian = target.cpu.arch.endian(), - .homogeneous = true, - }, - else => unreachable, + } +} +const FormatInt128 = struct { + target: *const std.Target, + int_cty: CType.Int, + val: std.math.big.int.Const, + is_global: bool, + base: u8, + case: std.fmt.Case, + pub fn format(data: FormatInt128, w: *Writer) Writer.Error!void { + const target = data.target; + + const val = data.val; + const is_global = data.is_global; + const base = data.base; + const case = data.case; + + switch (data.int_cty) { + .uint8_t, + .uint16_t, + .uint32_t, + .uint64_t, + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .uintptr_t, + => |t| try w.print("{f}", .{ + fmtUnsignedIntLiteralSmall(target, t, val.toInt(u64) catch unreachable, is_global, base, case), + }), + + .int8_t, + .int16_t, + .int32_t, + .int64_t, + .char, + .@"signed short", + .@"signed int", + .@"signed long", + .@"signed long long", + .intptr_t, + => |t| try w.print("{f}", .{ + fmtSignedIntLiteralSmall(target, t, val.toInt(i64) catch unreachable, is_global, base, case), + }), + + .zig_u128 => { + const raw = val.toInt(u128) catch unreachable; + const lo: u64 = @truncate(raw); + const hi: u64 = @intCast(raw >> 64); + const macro_name: []const u8 = if (is_global) "zig_init_u128" else "zig_make_u128"; + try w.print("{s}({f}, {f})", .{ + macro_name, + fmtUnsignedIntLiteralSmall(target, .uint64_t, hi, is_global, base, case), + fmtUnsignedIntLiteralSmall(target, .uint64_t, lo, is_global, base, case), + }); + }, + + .zig_i128 => { + const raw = val.toInt(i128) catch unreachable; + const lo: u64 = @truncate(@as(u128, @bitCast(raw))); + const hi: i64 = @intCast(raw >> 64); + const macro_name: []const u8 = if (is_global) "zig_init_i128" else "zig_make_i128"; + try w.print("{s}({f}, {f})", .{ + macro_name, + fmtSignedIntLiteralSmall(target, .int64_t, hi, is_global, base, case), + fmtUnsignedIntLiteralSmall(target, .uint64_t, lo, is_global, base, case), + }); + }, + } + } +}; +fn fmtUnsignedIntLiteralSmall( + target: *const std.Target, + int_cty: CType.Int, + val: u64, + is_global: bool, + base: u8, + case: std.fmt.Case, +) FormatUnsignedIntLiteralSmall { + return .{ + .target = target, + .int_cty = int_cty, + .val = val, + .is_global = is_global, + .base = base, + .case = case, }; - if (c_limb_info.count == 1) { - if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or - data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits)) - return w.print("{s}_{s}", .{ - data.ctype.getStandardDefineAbbrev() orelse return w.print("zig_{s}Int_{c}{d}", .{ - if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits, - }), - if (int.positive) "MAX" else "MIN", - }); - - if (!int.positive) try w.writeByte('-'); - try data.ctype.renderLiteralPrefix(w, data.kind, ctype_pool); +} +fn fmtSignedIntLiteralSmall( + target: *const std.Target, + int_cty: CType.Int, + val: i64, + is_global: bool, + base: u8, + case: std.fmt.Case, +) FormatSignedIntLiteralSmall { + return .{ + .target = target, + .int_cty = int_cty, + .val = val, + .is_global = is_global, + .base = base, + .case = case, + }; +} +const FormatSignedIntLiteralSmall = struct { + target: *const std.Target, + int_cty: CType.Int, + val: i64, + is_global: bool, + base: u8, + case: std.fmt.Case, + pub fn format(data: FormatSignedIntLiteralSmall, w: *Writer) Writer.Error!void { + const bits = data.int_cty.bits(data.target); + const max_int: i64 = @bitCast((@as(u64, 1) << @intCast(bits - 1)) - 1); + const min_int: i64 = @bitCast(@as(u64, 1) << @intCast(bits - 1)); + if (data.val == max_int) { + return w.print("{s}_MAX", .{minMaxMacroPrefix(data.int_cty)}); + } else if (data.val == min_int) { + return w.print("{s}_MIN", .{minMaxMacroPrefix(data.int_cty)}); + } + if (data.val < 0) try w.writeByte('-'); + try w.writeAll(intLiteralPrefix(data.int_cty, data.is_global)); switch (data.base) { 2 => try w.writeAll("0b"), 8 => try w.writeByte('0'), @@ -8047,68 +7279,131 @@ fn formatIntLiteral(data: FormatIntLiteralContext, w: *Writer) Writer.Error!void 16 => try w.writeAll("0x"), else => unreachable, } - const string = int.abs().toStringAlloc(allocator, data.base, data.case) catch - return error.WriteFailed; - defer allocator.free(string); - try w.writeAll(string); - } else { - try data.ctype.renderLiteralPrefix(w, data.kind, ctype_pool); - wrap.truncate(int, .unsigned, c_bits); - @memset(wrap.limbs[wrap.len..], 0); - wrap.len = wrap.limbs.len; - const limbs_per_c_limb = @divExact(wrap.len, c_limb_info.count); - - var c_limb_int_info: std.builtin.Type.Int = .{ - .signedness = undefined, - .bits = @intCast(@divExact(c_bits, c_limb_info.count)), - }; - var c_limb_ctype: CType = undefined; - - var limb_offset: usize = 0; - const most_significant_limb_i = wrap.len - limbs_per_c_limb; - while (limb_offset < wrap.len) : (limb_offset += limbs_per_c_limb) { - const limb_i = switch (c_limb_info.endian) { - .little => limb_offset, - .big => most_significant_limb_i - limb_offset, - }; - var c_limb_mut = BigInt.Mutable{ - .limbs = wrap.limbs[limb_i..][0..limbs_per_c_limb], - .len = undefined, - .positive = true, - }; - c_limb_mut.normalize(limbs_per_c_limb); - - if (limb_i == most_significant_limb_i and - !c_limb_info.homogeneous and data.int_info.signedness == .signed) - { - // most significant limb is actually signed - c_limb_int_info.signedness = .signed; - c_limb_ctype = c_limb_info.ctype.toSigned(); - - c_limb_mut.truncate( - c_limb_mut.toConst(), - .signed, - data.int_info.bits - limb_i * @bitSizeOf(BigIntLimb), - ); - } else { - c_limb_int_info.signedness = .unsigned; - c_limb_ctype = c_limb_info.ctype; - } - - if (limb_offset > 0) try w.writeAll(", "); - try formatIntLiteral(.{ - .dg = data.dg, - .int_info = c_limb_int_info, - .kind = data.kind, - .ctype = c_limb_ctype, - .val = pt.intValue_big(.comptime_int, c_limb_mut.toConst()) catch - return error.WriteFailed, - .base = data.base, - .case = data.case, - }, w); - } + // This `@abs` is safe thanks to the `min_int` case above. + try w.printInt(@abs(data.val), data.base, data.case, .{}); + try w.writeAll(intLiteralSuffix(data.int_cty)); } - try data.ctype.renderLiteralSuffix(w, ctype_pool); +}; +const FormatUnsignedIntLiteralSmall = struct { + target: *const std.Target, + int_cty: CType.Int, + val: u64, + is_global: bool, + base: u8, + case: std.fmt.Case, + pub fn format(data: FormatUnsignedIntLiteralSmall, w: *Writer) Writer.Error!void { + const bits = data.int_cty.bits(data.target); + const max_int: u64 = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - bits); + if (data.val == max_int) { + return w.print("{s}_MAX", .{minMaxMacroPrefix(data.int_cty)}); + } + try w.writeAll(intLiteralPrefix(data.int_cty, data.is_global)); + switch (data.base) { + 2 => try w.writeAll("0b"), + 8 => try w.writeByte('0'), + 10 => {}, + 16 => try w.writeAll("0x"), + else => unreachable, + } + try w.printInt(data.val, data.base, data.case, .{}); + try w.writeAll(intLiteralSuffix(data.int_cty)); + } +}; +fn minMaxMacroPrefix(int_cty: CType.Int) []const u8 { + return switch (int_cty) { + // zig fmt: off + .char => "CHAR", + + .@"unsigned short" => "USHRT", + .@"unsigned int" => "UINT", + .@"unsigned long" => "ULONG", + .@"unsigned long long" => "ULLONG", + + .@"signed short" => "SHRT", + .@"signed int" => "INT", + .@"signed long" => "LONG", + .@"signed long long" => "LLONG", + + .uint8_t => "UINT8", + .uint16_t => "UINT16", + .uint32_t => "UINT32", + .uint64_t => "UINT64", + .zig_u128 => unreachable, + + .int8_t => "INT8", + .int16_t => "INT16", + .int32_t => "INT32", + .int64_t => "INT64", + .zig_i128 => unreachable, + + .uintptr_t => "UINTPTR", + .intptr_t => "INTPTR", + // zig fmt: on + }; +} +fn intLiteralPrefix(cty: CType.Int, is_global: bool) []const u8 { + return switch (cty) { + // zig fmt: off + .char => if (is_global) "" else "(char)", + + .@"unsigned short" => if (is_global) "" else "(unsigned short)", + .@"unsigned int" => "", + .@"unsigned long" => "", + .@"unsigned long long" => "", + + .@"signed short" => if (is_global) "" else "(signed short)", + .@"signed int" => "", + .@"signed long" => "", + .@"signed long long" => "", + + .uint8_t => "UINT8_C(", + .uint16_t => "UINT16_C(", + .uint32_t => "UINT32_C(", + .uint64_t => "UINT64_C(", + .zig_u128 => unreachable, + + .int8_t => "INT8_C(", + .int16_t => "INT16_C(", + .int32_t => "INT32_C(", + .int64_t => "INT64_C(", + .zig_i128 => unreachable, + + .uintptr_t => if (is_global) "" else "(uintptr_t)", + .intptr_t => if (is_global) "" else "(intptr_t)", + // zig fmt: on + }; +} +fn intLiteralSuffix(cty: CType.Int) []const u8 { + return switch (cty) { + // zig fmt: off + .char => "", + + .@"unsigned short" => "u", + .@"unsigned int" => "u", + .@"unsigned long" => "ul", + .@"unsigned long long" => "ull", + + .@"signed short" => "", + .@"signed int" => "", + .@"signed long" => "l", + .@"signed long long" => "ll", + + .uint8_t => ")", + .uint16_t => ")", + .uint32_t => ")", + .uint64_t => ")", + .zig_u128 => unreachable, + + .int8_t => ")", + .int16_t => ")", + .int32_t => ")", + .int64_t => ")", + .zig_i128 => unreachable, + + .uintptr_t => "ul", + .intptr_t => "", + // zig fmt: on + }; } const Materialize = struct { @@ -8123,7 +7418,7 @@ const Materialize = struct { } pub fn mat(self: Materialize, f: *Function, w: *Writer) !void { - try f.writeCValue(w, self.local, .Other); + try f.writeCValue(w, self.local, .other); } pub fn end(self: Materialize, f: *Function, inst: Air.Inst.Index) !void { @@ -8131,95 +7426,52 @@ const Materialize = struct { } }; -const Assignment = struct { - ctype: CType, - - pub fn start(f: *Function, w: *Writer, ctype: CType) !Assignment { - const self: Assignment = .{ .ctype = ctype }; - try self.restart(f, w); - return self; - } - - pub fn restart(self: Assignment, f: *Function, w: *Writer) !void { - switch (self.strategy(f)) { - .assign => {}, - .memcpy => try w.writeAll("memcpy("), - } - } - - pub fn assign(self: Assignment, f: *Function, w: *Writer) !void { - switch (self.strategy(f)) { - .assign => try w.writeAll(" = "), - .memcpy => try w.writeAll(", "), - } - } - - pub fn end(self: Assignment, f: *Function, w: *Writer) !void { - switch (self.strategy(f)) { - .assign => {}, - .memcpy => { - try w.writeAll(", sizeof("); - try f.renderCType(w, self.ctype); - try w.writeAll("))"); - }, - } - try w.writeByte(';'); - try f.object.newline(); - } - - fn strategy(self: Assignment, f: *Function) enum { assign, memcpy } { - return switch (self.ctype.info(&f.object.dg.ctype_pool)) { - else => .assign, - .array, .vector => .memcpy, - }; - } -}; - const Vectorize = struct { index: CValue = .none, pub fn start(f: *Function, inst: Air.Inst.Index, w: *Writer, ty: Type) !Vectorize { - const pt = f.object.dg.pt; + const pt = f.dg.pt; const zcu = pt.zcu; - return if (ty.zigTypeTag(zcu) == .vector) index: { - const local = try f.allocLocal(inst, .usize); - - try w.writeAll("for ("); - try f.writeCValue(w, local, .Other); - try w.print(" = {f}; ", .{try f.fmtIntLiteralDec(.zero_usize)}); - try f.writeCValue(w, local, .Other); - try w.print(" < {f}; ", .{try f.fmtIntLiteralDec(try pt.intValue(.usize, ty.vectorLen(zcu)))}); - try f.writeCValue(w, local, .Other); - try w.print(" += {f}) {{\n", .{try f.fmtIntLiteralDec(.one_usize)}); - f.object.indent(); - try f.object.newline(); - - break :index .{ .index = local }; - } else .{}; + switch (ty.zigTypeTag(zcu)) { + else => return .{ .index = .none }, + .vector => { + const local = try f.allocLocal(inst, .usize); + try w.writeAll("for ("); + try f.writeCValue(w, local, .other); + try w.print(" = {f}; ", .{try f.fmtIntLiteralDec(.zero_usize)}); + try f.writeCValue(w, local, .other); + try w.print(" < {f}; ", .{try f.fmtIntLiteralDec(try pt.intValue(.usize, ty.vectorLen(zcu)))}); + try f.writeCValue(w, local, .other); + try w.print(" += {f}) {{", .{try f.fmtIntLiteralDec(.one_usize)}); + f.indent(); + try f.newline(); + return .{ .index = local }; + }, + } } pub fn elem(self: Vectorize, f: *Function, w: *Writer) !void { if (self.index != .none) { - try w.writeByte('['); - try f.writeCValue(w, self.index, .Other); + try w.writeAll(".array["); + try f.writeCValue(w, self.index, .other); try w.writeByte(']'); } } pub fn end(self: Vectorize, f: *Function, inst: Air.Inst.Index, w: *Writer) !void { if (self.index != .none) { - try f.object.outdent(); + try f.outdent(); try w.writeByte('}'); - try f.object.newline(); + try f.newline(); try freeLocal(f, inst, self.index.new_local, null); } } }; -fn lowersToArray(ty: Type, zcu: *Zcu) bool { +fn lowersToBigInt(ty: Type, zcu: *const Zcu) bool { return switch (ty.zigTypeTag(zcu)) { - .array, .vector => return true, - else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null, + .int, .@"enum", .@"struct", .@"union" => CType.classifyInt(ty, zcu) == .big, + else => false, }; } @@ -8245,8 +7497,8 @@ fn die(f: *Function, inst: Air.Inst.Index, ref: Air.Inst.Ref) !void { } fn freeLocal(f: *Function, inst: ?Air.Inst.Index, local_index: LocalIndex, ref_inst: ?Air.Inst.Index) !void { - const gpa = f.object.dg.gpa; - const local = &f.locals.items[local_index]; + const gpa = f.dg.gpa; + const local = f.locals.items[local_index]; if (inst) |i| { if (ref_inst) |operand| { log.debug("%{d}: freeing t{d} (operand %{d})", .{ @intFromEnum(i), local_index, operand }); @@ -8260,7 +7512,7 @@ fn freeLocal(f: *Function, inst: ?Air.Inst.Index, local_index: LocalIndex, ref_i log.debug("freeing t{d}", .{local_index}); } } - const gop = try f.free_locals_map.getOrPut(gpa, local.getType()); + const gop = try f.free_locals_map.getOrPut(gpa, local); if (!gop.found_existing) gop.value_ptr.* = .{}; if (std.debug.runtime_safety) { // If this trips, an unfreeable allocation was attempted to be freed. @@ -8317,3 +7569,28 @@ fn deinitFreeLocalsMap(gpa: Allocator, map: *LocalsMap) void { } map.deinit(gpa); } + +fn renderErrorName(w: *Writer, err_name: []const u8) Writer.Error!void { + try w.print("zig_error_{f}", .{fmtIdentUnsolo(err_name)}); +} + +fn renderNavName(w: *Writer, nav_index: InternPool.Nav.Index, ip: *const InternPool) !void { + const nav = ip.getNav(nav_index); + if (nav.getExtern(ip)) |@"extern"| { + try w.print("{f}", .{ + fmtIdentSolo(ip.getNav(@"extern".owner_nav).name.toSlice(ip)), + }); + } else { + // MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case), + // expand to 3x the length of its input, but let's cut it off at a much shorter limit. + const fqn_slice = ip.getNav(nav_index).fqn.toSlice(ip); + try w.print("{f}__{d}", .{ + fmtIdentUnsolo(fqn_slice[0..@min(fqn_slice.len, 100)]), + @intFromEnum(nav_index), + }); + } +} + +fn renderUavName(w: *Writer, uav: Value) !void { + try w.print("__anon_{d}", .{@intFromEnum(uav.toIntern())}); +} diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig deleted file mode 100644 index a7442a1d49..0000000000 --- a/src/codegen/c/Type.zig +++ /dev/null @@ -1,3471 +0,0 @@ -index: CType.Index, - -pub const @"void": CType = .{ .index = .void }; -pub const @"bool": CType = .{ .index = .bool }; -pub const @"i8": CType = .{ .index = .int8_t }; -pub const @"u8": CType = .{ .index = .uint8_t }; -pub const @"i16": CType = .{ .index = .int16_t }; -pub const @"u16": CType = .{ .index = .uint16_t }; -pub const @"i32": CType = .{ .index = .int32_t }; -pub const @"u32": CType = .{ .index = .uint32_t }; -pub const @"i64": CType = .{ .index = .int64_t }; -pub const @"u64": CType = .{ .index = .uint64_t }; -pub const @"i128": CType = .{ .index = .zig_i128 }; -pub const @"u128": CType = .{ .index = .zig_u128 }; -pub const @"isize": CType = .{ .index = .intptr_t }; -pub const @"usize": CType = .{ .index = .uintptr_t }; -pub const @"f16": CType = .{ .index = .zig_f16 }; -pub const @"f32": CType = .{ .index = .zig_f32 }; -pub const @"f64": CType = .{ .index = .zig_f64 }; -pub const @"f80": CType = .{ .index = .zig_f80 }; -pub const @"f128": CType = .{ .index = .zig_f128 }; - -pub fn fromPoolIndex(pool_index: usize) CType { - return .{ .index = @enumFromInt(CType.Index.first_pool_index + pool_index) }; -} - -pub fn toPoolIndex(ctype: CType) ?u32 { - const pool_index, const is_null = - @subWithOverflow(@intFromEnum(ctype.index), CType.Index.first_pool_index); - return switch (is_null) { - 0 => pool_index, - 1 => null, - }; -} - -pub fn eql(lhs: CType, rhs: CType) bool { - return lhs.index == rhs.index; -} - -pub fn isBool(ctype: CType) bool { - return switch (ctype.index) { - ._Bool, .bool => true, - else => false, - }; -} - -pub fn isInteger(ctype: CType) bool { - return switch (ctype.index) { - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - => true, - else => false, - }; -} - -pub fn signedness(ctype: CType, mod: *Module) std.builtin.Signedness { - return switch (ctype.index) { - .char => mod.resolved_target.result.cCharSignedness(), - .@"signed char", - .short, - .int, - .long, - .@"long long", - .ptrdiff_t, - .int8_t, - .int16_t, - .int32_t, - .int64_t, - .intptr_t, - .zig_i128, - => .signed, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .size_t, - .uint8_t, - .uint16_t, - .uint32_t, - .uint64_t, - .uintptr_t, - .zig_u128, - => .unsigned, - else => unreachable, - }; -} - -pub fn isFloat(ctype: CType) bool { - return switch (ctype.index) { - .float, - .double, - .@"long double", - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => true, - else => false, - }; -} - -pub fn toSigned(ctype: CType) CType { - return switch (ctype.index) { - .char, .@"signed char", .@"unsigned char" => .{ .index = .@"signed char" }, - .short, .@"unsigned short" => .{ .index = .short }, - .int, .@"unsigned int" => .{ .index = .int }, - .long, .@"unsigned long" => .{ .index = .long }, - .@"long long", .@"unsigned long long" => .{ .index = .@"long long" }, - .size_t, .ptrdiff_t => .{ .index = .ptrdiff_t }, - .uint8_t, .int8_t => .{ .index = .int8_t }, - .uint16_t, .int16_t => .{ .index = .int16_t }, - .uint32_t, .int32_t => .{ .index = .int32_t }, - .uint64_t, .int64_t => .{ .index = .int64_t }, - .uintptr_t, .intptr_t => .{ .index = .intptr_t }, - .zig_u128, .zig_i128 => .{ .index = .zig_i128 }, - .float, - .double, - .@"long double", - .zig_f16, - .zig_f32, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => ctype, - else => unreachable, - }; -} - -pub fn toUnsigned(ctype: CType) CType { - return switch (ctype.index) { - .char, .@"signed char", .@"unsigned char" => .{ .index = .@"unsigned char" }, - .short, .@"unsigned short" => .{ .index = .@"unsigned short" }, - .int, .@"unsigned int" => .{ .index = .@"unsigned int" }, - .long, .@"unsigned long" => .{ .index = .@"unsigned long" }, - .@"long long", .@"unsigned long long" => .{ .index = .@"unsigned long long" }, - .size_t, .ptrdiff_t => .{ .index = .size_t }, - .uint8_t, .int8_t => .{ .index = .uint8_t }, - .uint16_t, .int16_t => .{ .index = .uint16_t }, - .uint32_t, .int32_t => .{ .index = .uint32_t }, - .uint64_t, .int64_t => .{ .index = .uint64_t }, - .uintptr_t, .intptr_t => .{ .index = .uintptr_t }, - .zig_u128, .zig_i128 => .{ .index = .zig_u128 }, - else => unreachable, - }; -} - -pub fn toSignedness(ctype: CType, s: std.builtin.Signedness) CType { - return switch (s) { - .unsigned => ctype.toUnsigned(), - .signed => ctype.toSigned(), - }; -} - -pub fn isAnyChar(ctype: CType) bool { - return switch (ctype.index) { - else => false, - .char, .@"signed char", .@"unsigned char", .uint8_t, .int8_t => true, - }; -} - -pub fn isString(ctype: CType, pool: *const Pool) bool { - return info: switch (ctype.info(pool)) { - .basic, .fwd_decl, .aggregate, .function => false, - .pointer => |pointer_info| pointer_info.elem_ctype.isAnyChar(), - .aligned => |aligned_info| continue :info aligned_info.ctype.info(pool), - .array, .vector => |sequence_info| sequence_info.elem_type.isAnyChar(), - }; -} - -pub fn isNonString(ctype: CType, pool: *const Pool) bool { - var allow_pointer = true; - return info: switch (ctype.info(pool)) { - .basic, .fwd_decl, .aggregate, .function => false, - .pointer => |pointer_info| allow_pointer and pointer_info.nonstring, - .aligned => |aligned_info| continue :info aligned_info.ctype.info(pool), - .array, .vector => |sequence_info| sequence_info.nonstring or { - allow_pointer = false; - continue :info sequence_info.elem_ctype.info(pool); - }, - }; -} - -pub fn getStandardDefineAbbrev(ctype: CType) ?[]const u8 { - return switch (ctype.index) { - .char => "CHAR", - .@"signed char" => "SCHAR", - .short => "SHRT", - .int => "INT", - .long => "LONG", - .@"long long" => "LLONG", - .@"unsigned char" => "UCHAR", - .@"unsigned short" => "USHRT", - .@"unsigned int" => "UINT", - .@"unsigned long" => "ULONG", - .@"unsigned long long" => "ULLONG", - .float => "FLT", - .double => "DBL", - .@"long double" => "LDBL", - .size_t => "SIZE", - .ptrdiff_t => "PTRDIFF", - .uint8_t => "UINT8", - .int8_t => "INT8", - .uint16_t => "UINT16", - .int16_t => "INT16", - .uint32_t => "UINT32", - .int32_t => "INT32", - .uint64_t => "UINT64", - .int64_t => "INT64", - .uintptr_t => "UINTPTR", - .intptr_t => "INTPTR", - else => null, - }; -} - -pub fn renderLiteralPrefix(ctype: CType, w: *Writer, kind: Kind, pool: *const Pool) Writer.Error!void { - switch (ctype.info(pool)) { - .basic => |basic_info| switch (basic_info) { - .void => unreachable, - ._Bool, - .char, - .@"signed char", - .short, - .@"unsigned short", - .bool, - .size_t, - .ptrdiff_t, - .uintptr_t, - .intptr_t, - => switch (kind) { - else => try w.print("({s})", .{@tagName(basic_info)}), - .global => {}, - }, - .int, - .long, - .@"long long", - .@"unsigned char", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - => {}, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - => try w.print("{s}_C(", .{ctype.getStandardDefineAbbrev().?}), - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => try w.print("zig_{s}_{s}(", .{ - switch (kind) { - else => "make", - .global => "init", - }, - @tagName(basic_info)["zig_".len..], - }), - .va_list => unreachable, - _ => unreachable, - }, - .array, .vector => try w.writeByte('{'), - else => unreachable, - } -} - -pub fn renderLiteralSuffix(ctype: CType, w: *Writer, pool: *const Pool) Writer.Error!void { - switch (ctype.info(pool)) { - .basic => |basic_info| switch (basic_info) { - .void => unreachable, - ._Bool => {}, - .char, - .@"signed char", - .short, - .int, - => {}, - .long => try w.writeByte('l'), - .@"long long" => try w.writeAll("ll"), - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - => try w.writeByte('u'), - .@"unsigned long", - .size_t, - .uintptr_t, - => try w.writeAll("ul"), - .@"unsigned long long" => try w.writeAll("ull"), - .float => try w.writeByte('f'), - .double => {}, - .@"long double" => try w.writeByte('l'), - .bool, - .ptrdiff_t, - .intptr_t, - => {}, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => try w.writeByte(')'), - .va_list => unreachable, - _ => unreachable, - }, - .array, .vector => try w.writeByte('}'), - else => unreachable, - } -} - -pub fn floatActiveBits(ctype: CType, mod: *Module) u16 { - const target = &mod.resolved_target.result; - return switch (ctype.index) { - .float => target.cTypeBitSize(.float), - .double => target.cTypeBitSize(.double), - .@"long double", .zig_c_longdouble => target.cTypeBitSize(.longdouble), - .zig_f16 => 16, - .zig_f32 => 32, - .zig_f64 => 64, - .zig_f80 => 80, - .zig_f128 => 128, - else => unreachable, - }; -} - -pub fn byteSize(ctype: CType, pool: *const Pool, mod: *Module) u64 { - const target = &mod.resolved_target.result; - return switch (ctype.info(pool)) { - .basic => |basic_info| switch (basic_info) { - .void => 0, - .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1, - .short => target.cTypeByteSize(.short), - .int => target.cTypeByteSize(.int), - .long => target.cTypeByteSize(.long), - .@"long long" => target.cTypeByteSize(.longlong), - .@"unsigned short" => target.cTypeByteSize(.ushort), - .@"unsigned int" => target.cTypeByteSize(.uint), - .@"unsigned long" => target.cTypeByteSize(.ulong), - .@"unsigned long long" => target.cTypeByteSize(.ulonglong), - .float => target.cTypeByteSize(.float), - .double => target.cTypeByteSize(.double), - .@"long double" => target.cTypeByteSize(.longdouble), - .size_t, - .ptrdiff_t, - .uintptr_t, - .intptr_t, - => @divExact(target.ptrBitWidth(), 8), - .uint16_t, .int16_t, .zig_f16 => 2, - .uint32_t, .int32_t, .zig_f32 => 4, - .uint64_t, .int64_t, .zig_f64 => 8, - .zig_u128, .zig_i128, .zig_f128 => 16, - .zig_f80 => if (target.cTypeBitSize(.longdouble) == 80) - target.cTypeByteSize(.longdouble) - else - 16, - .zig_c_longdouble => target.cTypeByteSize(.longdouble), - .va_list => unreachable, - _ => unreachable, - }, - .pointer => @divExact(target.ptrBitWidth(), 8), - .array, .vector => |sequence_info| sequence_info.elem_ctype.byteSize(pool, mod) * sequence_info.len, - else => unreachable, - }; -} - -pub fn info(ctype: CType, pool: *const Pool) Info { - const pool_index = ctype.toPoolIndex() orelse return .{ .basic = ctype.index }; - const item = pool.items.get(pool_index); - switch (item.tag) { - .basic => unreachable, - .pointer => return .{ .pointer = .{ - .elem_ctype = .{ .index = @enumFromInt(item.data) }, - } }, - .pointer_const => return .{ .pointer = .{ - .elem_ctype = .{ .index = @enumFromInt(item.data) }, - .@"const" = true, - } }, - .pointer_volatile => return .{ .pointer = .{ - .elem_ctype = .{ .index = @enumFromInt(item.data) }, - .@"volatile" = true, - } }, - .pointer_const_volatile => return .{ .pointer = .{ - .elem_ctype = .{ .index = @enumFromInt(item.data) }, - .@"const" = true, - .@"volatile" = true, - } }, - .aligned => { - const extra = pool.getExtra(Pool.Aligned, item.data); - return .{ .aligned = .{ - .ctype = .{ .index = extra.ctype }, - .alignas = extra.flags.alignas, - } }; - }, - .array_small => { - const extra = pool.getExtra(Pool.SequenceSmall, item.data); - return .{ .array = .{ - .elem_ctype = .{ .index = extra.elem_ctype }, - .len = extra.len, - } }; - }, - .array_large => { - const extra = pool.getExtra(Pool.SequenceLarge, item.data); - return .{ .array = .{ - .elem_ctype = .{ .index = extra.elem_ctype }, - .len = extra.len(), - } }; - }, - .vector => { - const extra = pool.getExtra(Pool.SequenceSmall, item.data); - return .{ .vector = .{ - .elem_ctype = .{ .index = extra.elem_ctype }, - .len = extra.len, - } }; - }, - .nonstring => { - var child_info = info(.{ .index = @enumFromInt(item.data) }, pool); - switch (child_info) { - else => unreachable, - .pointer => |*pointer_info| pointer_info.nonstring = true, - .array, .vector => |*sequence_info| sequence_info.nonstring = true, - } - return child_info; - }, - .fwd_decl_struct_anon => { - const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data); - return .{ .fwd_decl = .{ - .tag = .@"struct", - .name = .{ .anon = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - } }, - } }; - }, - .fwd_decl_union_anon => { - const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data); - return .{ .fwd_decl = .{ - .tag = .@"union", - .name = .{ .anon = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - } }, - } }; - }, - .fwd_decl_struct => return .{ .fwd_decl = .{ - .tag = .@"struct", - .name = .{ .index = @enumFromInt(item.data) }, - } }, - .fwd_decl_union => return .{ .fwd_decl = .{ - .tag = .@"union", - .name = .{ .index = @enumFromInt(item.data) }, - } }, - .aggregate_struct_anon => { - const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); - return .{ .aggregate = .{ - .tag = .@"struct", - .name = .{ .anon = .{ - .index = extra_trail.extra.index, - .id = extra_trail.extra.id, - } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_union_anon => { - const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); - return .{ .aggregate = .{ - .tag = .@"union", - .name = .{ .anon = .{ - .index = extra_trail.extra.index, - .id = extra_trail.extra.id, - } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_struct_packed_anon => { - const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); - return .{ .aggregate = .{ - .tag = .@"struct", - .@"packed" = true, - .name = .{ .anon = .{ - .index = extra_trail.extra.index, - .id = extra_trail.extra.id, - } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_union_packed_anon => { - const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); - return .{ .aggregate = .{ - .tag = .@"union", - .@"packed" = true, - .name = .{ .anon = .{ - .index = extra_trail.extra.index, - .id = extra_trail.extra.id, - } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_struct => { - const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); - return .{ .aggregate = .{ - .tag = .@"struct", - .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_union => { - const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); - return .{ .aggregate = .{ - .tag = .@"union", - .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_struct_packed => { - const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); - return .{ .aggregate = .{ - .tag = .@"struct", - .@"packed" = true, - .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .aggregate_union_packed => { - const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); - return .{ .aggregate = .{ - .tag = .@"union", - .@"packed" = true, - .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, - .fields = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.fields_len, - }, - } }; - }, - .function => { - const extra_trail = pool.getExtraTrail(Pool.Function, item.data); - return .{ .function = .{ - .return_ctype = .{ .index = extra_trail.extra.return_ctype }, - .param_ctypes = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.param_ctypes_len, - }, - .varargs = false, - } }; - }, - .function_varargs => { - const extra_trail = pool.getExtraTrail(Pool.Function, item.data); - return .{ .function = .{ - .return_ctype = .{ .index = extra_trail.extra.return_ctype }, - .param_ctypes = .{ - .extra_index = extra_trail.trail.extra_index, - .len = extra_trail.extra.param_ctypes_len, - }, - .varargs = true, - } }; - }, - } -} - -pub fn hash(ctype: CType, pool: *const Pool) Pool.Map.Hash { - return if (ctype.toPoolIndex()) |pool_index| - pool.map.entries.items(.hash)[pool_index] - else - CType.Index.basic_hashes[@intFromEnum(ctype.index)]; -} - -fn toForward(ctype: CType, pool: *Pool, allocator: std.mem.Allocator) !CType { - return switch (ctype.info(pool)) { - .basic, .pointer, .fwd_decl => ctype, - .aligned => |aligned_info| pool.getAligned(allocator, .{ - .ctype = try aligned_info.ctype.toForward(pool, allocator), - .alignas = aligned_info.alignas, - }), - .array => |array_info| pool.getArray(allocator, .{ - .elem_ctype = try array_info.elem_ctype.toForward(pool, allocator), - .len = array_info.len, - .nonstring = array_info.nonstring, - }), - .vector => |vector_info| pool.getVector(allocator, .{ - .elem_ctype = try vector_info.elem_ctype.toForward(pool, allocator), - .len = vector_info.len, - .nonstring = vector_info.nonstring, - }), - .aggregate => |aggregate_info| switch (aggregate_info.name) { - .anon => ctype, - .fwd_decl => |fwd_decl| fwd_decl, - }, - .function => unreachable, - }; -} - -const Index = enum(u32) { - void, - - // C basic types - char, - - @"signed char", - short, - int, - long, - @"long long", - - _Bool, - @"unsigned char", - @"unsigned short", - @"unsigned int", - @"unsigned long", - @"unsigned long long", - - float, - double, - @"long double", - - // C header types - // - stdbool.h - bool, - // - stddef.h - size_t, - ptrdiff_t, - // - stdint.h - uint8_t, - int8_t, - uint16_t, - int16_t, - uint32_t, - int32_t, - uint64_t, - int64_t, - uintptr_t, - intptr_t, - // - stdarg.h - va_list, - - // zig.h types - zig_u128, - zig_i128, - zig_f16, - zig_f32, - zig_f64, - zig_f80, - zig_f128, - zig_c_longdouble, - - _, - - const first_pool_index: u32 = @typeInfo(CType.Index).@"enum".fields.len; - const basic_hashes = init: { - @setEvalBranchQuota(1_600); - var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined; - for (&basic_hashes_init, 0..) |*basic_hash, index| { - const ctype_index: CType.Index = @enumFromInt(index); - var hasher = Pool.Hasher.init; - hasher.update(@intFromEnum(ctype_index)); - basic_hash.* = hasher.final(.basic); - } - break :init basic_hashes_init; - }; -}; - -const Slice = struct { - extra_index: Pool.ExtraIndex, - len: u32, - - pub fn at(slice: CType.Slice, index: usize, pool: *const Pool) CType { - var extra: Pool.ExtraTrail = .{ .extra_index = slice.extra_index }; - return .{ .index = extra.next(slice.len, CType.Index, pool)[index] }; - } -}; - -pub const Kind = enum { - forward, - forward_parameter, - complete, - global, - parameter, - - pub fn isForward(kind: Kind) bool { - return switch (kind) { - .forward, .forward_parameter => true, - .complete, .global, .parameter => false, - }; - } - - pub fn isParameter(kind: Kind) bool { - return switch (kind) { - .forward_parameter, .parameter => true, - .forward, .complete, .global => false, - }; - } - - pub fn asParameter(kind: Kind) Kind { - return switch (kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - }; - } - - pub fn noParameter(kind: Kind) Kind { - return switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - }; - } - - pub fn asComplete(kind: Kind) Kind { - return switch (kind) { - .forward, .complete => .complete, - .forward_parameter, .parameter => .parameter, - .global => .global, - }; - } -}; - -pub const Info = union(enum) { - basic: CType.Index, - pointer: Pointer, - aligned: Aligned, - array: Sequence, - vector: Sequence, - fwd_decl: FwdDecl, - aggregate: Aggregate, - function: Function, - - const Tag = @typeInfo(Info).@"union".tag_type.?; - - pub const Pointer = struct { - elem_ctype: CType, - @"const": bool = false, - @"volatile": bool = false, - nonstring: bool = false, - - fn tag(pointer_info: Pointer) Pool.Tag { - return @enumFromInt(@intFromEnum(Pool.Tag.pointer) + - @as(u2, @bitCast(packed struct(u2) { - @"const": bool, - @"volatile": bool, - }{ - .@"const" = pointer_info.@"const", - .@"volatile" = pointer_info.@"volatile", - }))); - } - }; - - pub const Aligned = struct { - ctype: CType, - alignas: AlignAs, - }; - - pub const Sequence = struct { - elem_ctype: CType, - len: u64, - nonstring: bool = false, - }; - - pub const AggregateTag = enum { @"enum", @"struct", @"union" }; - - pub const Field = struct { - name: Pool.String, - ctype: CType, - alignas: AlignAs, - - pub const Slice = struct { - extra_index: Pool.ExtraIndex, - len: u32, - - pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field { - assert(index < slice.len); - const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index + - index * @typeInfo(Pool.Field).@"struct".fields.len)); - return .{ - .name = .{ .index = extra.name }, - .ctype = .{ .index = extra.ctype }, - .alignas = extra.flags.alignas, - }; - } - - fn eqlAdapted( - lhs_slice: Field.Slice, - lhs_pool: *const Pool, - rhs_slice: Field.Slice, - rhs_pool: *const Pool, - pool_adapter: anytype, - ) bool { - if (lhs_slice.len != rhs_slice.len) return false; - for (0..lhs_slice.len) |index| { - if (!lhs_slice.at(index, lhs_pool).eqlAdapted( - lhs_pool, - rhs_slice.at(index, rhs_pool), - rhs_pool, - pool_adapter, - )) return false; - } - return true; - } - }; - - fn eqlAdapted( - lhs_field: Field, - lhs_pool: *const Pool, - rhs_field: Field, - rhs_pool: *const Pool, - pool_adapter: anytype, - ) bool { - if (!std.meta.eql(lhs_field.alignas, rhs_field.alignas)) return false; - if (!pool_adapter.eql(lhs_field.ctype, rhs_field.ctype)) return false; - return if (lhs_field.name.toPoolSlice(lhs_pool)) |lhs_name| - if (rhs_field.name.toPoolSlice(rhs_pool)) |rhs_name| - std.mem.eql(u8, lhs_name, rhs_name) - else - false - else - lhs_field.name.index == rhs_field.name.index; - } - }; - - pub const FwdDecl = struct { - tag: AggregateTag, - name: union(enum) { - anon: Field.Slice, - index: InternPool.Index, - }, - }; - - pub const Aggregate = struct { - tag: AggregateTag, - @"packed": bool = false, - name: union(enum) { - anon: struct { - index: InternPool.Index, - id: u32, - }, - fwd_decl: CType, - }, - fields: Field.Slice, - }; - - pub const Function = struct { - return_ctype: CType, - param_ctypes: CType.Slice, - varargs: bool = false, - }; - - pub fn eqlAdapted( - lhs_info: Info, - lhs_pool: *const Pool, - rhs_ctype: CType, - rhs_pool: *const Pool, - pool_adapter: anytype, - ) bool { - const rhs_info = rhs_ctype.info(rhs_pool); - if (@as(Info.Tag, lhs_info) != @as(Info.Tag, rhs_info)) return false; - return switch (lhs_info) { - .basic => |lhs_basic_info| lhs_basic_info == rhs_info.basic, - .pointer => |lhs_pointer_info| lhs_pointer_info.@"const" == rhs_info.pointer.@"const" and - lhs_pointer_info.@"volatile" == rhs_info.pointer.@"volatile" and - lhs_pointer_info.nonstring == rhs_info.pointer.nonstring and - pool_adapter.eql(lhs_pointer_info.elem_ctype, rhs_info.pointer.elem_ctype), - .aligned => |lhs_aligned_info| std.meta.eql(lhs_aligned_info.alignas, rhs_info.aligned.alignas) and - pool_adapter.eql(lhs_aligned_info.ctype, rhs_info.aligned.ctype), - .array => |lhs_array_info| lhs_array_info.len == rhs_info.array.len and - lhs_array_info.nonstring == rhs_info.array.nonstring and - pool_adapter.eql(lhs_array_info.elem_ctype, rhs_info.array.elem_ctype), - .vector => |lhs_vector_info| lhs_vector_info.len == rhs_info.vector.len and - lhs_vector_info.nonstring == rhs_info.vector.nonstring and - pool_adapter.eql(lhs_vector_info.elem_ctype, rhs_info.vector.elem_ctype), - .fwd_decl => |lhs_fwd_decl_info| lhs_fwd_decl_info.tag == rhs_info.fwd_decl.tag and - switch (lhs_fwd_decl_info.name) { - .anon => |lhs_anon| rhs_info.fwd_decl.name == .anon and lhs_anon.eqlAdapted( - lhs_pool, - rhs_info.fwd_decl.name.anon, - rhs_pool, - pool_adapter, - ), - .index => |lhs_index| rhs_info.fwd_decl.name == .index and - lhs_index == rhs_info.fwd_decl.name.index, - }, - .aggregate => |lhs_aggregate_info| lhs_aggregate_info.tag == rhs_info.aggregate.tag and - lhs_aggregate_info.@"packed" == rhs_info.aggregate.@"packed" and - switch (lhs_aggregate_info.name) { - .anon => |lhs_anon| rhs_info.aggregate.name == .anon and - lhs_anon.index == rhs_info.aggregate.name.anon.index and - lhs_anon.id == rhs_info.aggregate.name.anon.id, - .fwd_decl => |lhs_fwd_decl| rhs_info.aggregate.name == .fwd_decl and - pool_adapter.eql(lhs_fwd_decl, rhs_info.aggregate.name.fwd_decl), - } and lhs_aggregate_info.fields.eqlAdapted( - lhs_pool, - rhs_info.aggregate.fields, - rhs_pool, - pool_adapter, - ), - .function => |lhs_function_info| lhs_function_info.param_ctypes.len == - rhs_info.function.param_ctypes.len and - pool_adapter.eql(lhs_function_info.return_ctype, rhs_info.function.return_ctype) and - for (0..lhs_function_info.param_ctypes.len) |param_index| { - if (!pool_adapter.eql( - lhs_function_info.param_ctypes.at(param_index, lhs_pool), - rhs_info.function.param_ctypes.at(param_index, rhs_pool), - )) break false; - } else true, - }; - } -}; - -pub const Pool = struct { - map: Map, - items: std.MultiArrayList(Item), - extra: std.ArrayList(u32), - - string_map: Map, - string_indices: std.ArrayList(u32), - string_bytes: std.ArrayList(u8), - - const Map = std.AutoArrayHashMapUnmanaged(void, void); - - pub const String = struct { - index: String.Index, - - const FormatData = struct { string: String, pool: *const Pool }; - fn format(data: FormatData, writer: *Writer) Writer.Error!void { - if (data.string.toSlice(data.pool)) |slice| - try writer.writeAll(slice) - else - try writer.print("f{d}", .{@intFromEnum(data.string.index)}); - } - pub fn fmt(str: String, pool: *const Pool) std.fmt.Alt(FormatData, format) { - return .{ .data = .{ .string = str, .pool = pool } }; - } - - fn fromUnnamed(index: u31) String { - return .{ .index = @enumFromInt(index) }; - } - - fn isNamed(str: String) bool { - return @intFromEnum(str.index) >= String.Index.first_named_index; - } - - pub fn toSlice(str: String, pool: *const Pool) ?[]const u8 { - return str.toPoolSlice(pool) orelse if (str.isNamed()) @tagName(str.index) else null; - } - - fn toPoolSlice(str: String, pool: *const Pool) ?[]const u8 { - if (str.toPoolIndex()) |pool_index| { - const start = pool.string_indices.items[pool_index + 0]; - const end = pool.string_indices.items[pool_index + 1]; - return pool.string_bytes.items[start..end]; - } else return null; - } - - fn fromPoolIndex(pool_index: usize) String { - return .{ .index = @enumFromInt(String.Index.first_pool_index + pool_index) }; - } - - fn toPoolIndex(str: String) ?u32 { - const pool_index, const is_null = - @subWithOverflow(@intFromEnum(str.index), String.Index.first_pool_index); - return switch (is_null) { - 0 => pool_index, - 1 => null, - }; - } - - const Index = enum(u32) { - array = first_named_index, - @"error", - is_null, - len, - payload, - ptr, - tag, - _, - - const first_named_index: u32 = 1 << 31; - const first_pool_index: u32 = first_named_index + @typeInfo(String.Index).@"enum".fields.len; - }; - - const Adapter = struct { - pool: *const Pool, - pub fn hash(_: @This(), slice: []const u8) Map.Hash { - return @truncate(Hasher.Impl.hash(1, slice)); - } - pub fn eql(string_adapter: @This(), lhs_slice: []const u8, _: void, rhs_index: usize) bool { - const rhs_string = String.fromPoolIndex(rhs_index); - const rhs_slice = rhs_string.toPoolSlice(string_adapter.pool).?; - return std.mem.eql(u8, lhs_slice, rhs_slice); - } - }; - }; - - pub const empty: Pool = .{ - .map = .empty, - .items = .empty, - .extra = .empty, - - .string_map = .empty, - .string_indices = .empty, - .string_bytes = .empty, - }; - - pub fn init(pool: *Pool, allocator: std.mem.Allocator) !void { - if (pool.string_indices.items.len == 0) - try pool.string_indices.append(allocator, 0); - } - - pub fn deinit(pool: *Pool, allocator: std.mem.Allocator) void { - pool.map.deinit(allocator); - pool.items.deinit(allocator); - pool.extra.deinit(allocator); - - pool.string_map.deinit(allocator); - pool.string_indices.deinit(allocator); - pool.string_bytes.deinit(allocator); - - pool.* = undefined; - } - - pub fn move(pool: *Pool) Pool { - defer pool.* = empty; - return pool.*; - } - - pub fn clearRetainingCapacity(pool: *Pool) void { - pool.map.clearRetainingCapacity(); - pool.items.shrinkRetainingCapacity(0); - pool.extra.clearRetainingCapacity(); - - pool.string_map.clearRetainingCapacity(); - pool.string_indices.shrinkRetainingCapacity(1); - pool.string_bytes.clearRetainingCapacity(); - } - - pub fn freeUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator) void { - pool.map.shrinkAndFree(allocator, pool.map.count()); - pool.items.shrinkAndFree(allocator, pool.items.len); - pool.extra.shrinkAndFree(allocator, pool.extra.items.len); - - pool.string_map.shrinkAndFree(allocator, pool.string_map.count()); - pool.string_indices.shrinkAndFree(allocator, pool.string_indices.items.len); - pool.string_bytes.shrinkAndFree(allocator, pool.string_bytes.items.len); - } - - pub fn getPointer(pool: *Pool, allocator: std.mem.Allocator, pointer_info: Info.Pointer) !CType { - var hasher = Hasher.init; - hasher.update(pointer_info.elem_ctype.hash(pool)); - return pool.getNonString(allocator, try pool.tagData( - allocator, - hasher, - pointer_info.tag(), - @intFromEnum(pointer_info.elem_ctype.index), - ), pointer_info.nonstring); - } - - pub fn getAligned(pool: *Pool, allocator: std.mem.Allocator, aligned_info: Info.Aligned) !CType { - return pool.tagExtra(allocator, .aligned, Aligned, .{ - .ctype = aligned_info.ctype.index, - .flags = .{ .alignas = aligned_info.alignas }, - }); - } - - pub fn getArray(pool: *Pool, allocator: std.mem.Allocator, array_info: Info.Sequence) !CType { - return pool.getNonString(allocator, if (std.math.cast(u32, array_info.len)) |small_len| - try pool.tagExtra(allocator, .array_small, SequenceSmall, .{ - .elem_ctype = array_info.elem_ctype.index, - .len = small_len, - }) - else - try pool.tagExtra(allocator, .array_large, SequenceLarge, .{ - .elem_ctype = array_info.elem_ctype.index, - .len_lo = @truncate(array_info.len >> 0), - .len_hi = @truncate(array_info.len >> 32), - }), array_info.nonstring); - } - - pub fn getVector(pool: *Pool, allocator: std.mem.Allocator, vector_info: Info.Sequence) !CType { - return pool.getNonString(allocator, try pool.tagExtra(allocator, .vector, SequenceSmall, .{ - .elem_ctype = vector_info.elem_ctype.index, - .len = @intCast(vector_info.len), - }), vector_info.nonstring); - } - - pub fn getNonString( - pool: *Pool, - allocator: std.mem.Allocator, - child_ctype: CType, - nonstring: bool, - ) !CType { - if (!nonstring) return child_ctype; - var hasher = Hasher.init; - hasher.update(child_ctype.hash(pool)); - return pool.tagData(allocator, hasher, .nonstring, @intFromEnum(child_ctype.index)); - } - - pub fn getFwdDecl( - pool: *Pool, - allocator: std.mem.Allocator, - fwd_decl_info: struct { - tag: Info.AggregateTag, - name: union(enum) { - anon: []const Info.Field, - index: InternPool.Index, - }, - }, - ) !CType { - var hasher = Hasher.init; - switch (fwd_decl_info.name) { - .anon => |fields| { - const ExpectedContents = [32]CType; - var stack align(@max( - @alignOf(std.heap.StackFallbackAllocator(0)), - @alignOf(ExpectedContents), - )) = std.heap.stackFallback(@sizeOf(ExpectedContents), allocator); - const stack_allocator = stack.get(); - const field_ctypes = try stack_allocator.alloc(CType, fields.len); - defer stack_allocator.free(field_ctypes); - for (field_ctypes, fields) |*field_ctype, field| - field_ctype.* = try field.ctype.toForward(pool, allocator); - const extra: FwdDeclAnon = .{ .fields_len = @intCast(fields.len) }; - const extra_index = try pool.addExtra( - allocator, - FwdDeclAnon, - extra, - fields.len * @typeInfo(Field).@"struct".fields.len, - ); - for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity( - &hasher, - Field, - .{ - .name = field.name.index, - .ctype = field_ctype.index, - .flags = .{ .alignas = field.alignas }, - }, - ); - hasher.updateExtra(FwdDeclAnon, extra, pool); - return pool.tagTrailingExtra(allocator, hasher, switch (fwd_decl_info.tag) { - .@"struct" => .fwd_decl_struct_anon, - .@"union" => .fwd_decl_union_anon, - .@"enum" => unreachable, - }, extra_index); - }, - .index => |index| { - hasher.update(index); - return pool.tagData(allocator, hasher, switch (fwd_decl_info.tag) { - .@"struct" => .fwd_decl_struct, - .@"union" => .fwd_decl_union, - .@"enum" => unreachable, - }, @intFromEnum(index)); - }, - } - } - - pub fn getAggregate( - pool: *Pool, - allocator: std.mem.Allocator, - aggregate_info: struct { - tag: Info.AggregateTag, - @"packed": bool = false, - name: union(enum) { - anon: struct { - index: InternPool.Index, - id: u32, - }, - fwd_decl: CType, - }, - fields: []const Info.Field, - }, - ) !CType { - var hasher = Hasher.init; - switch (aggregate_info.name) { - .anon => |anon| { - const extra: AggregateAnon = .{ - .index = anon.index, - .id = anon.id, - .fields_len = @intCast(aggregate_info.fields.len), - }; - const extra_index = try pool.addExtra( - allocator, - AggregateAnon, - extra, - aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, - ); - for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ - .name = field.name.index, - .ctype = field.ctype.index, - .flags = .{ .alignas = field.alignas }, - }); - hasher.updateExtra(AggregateAnon, extra, pool); - return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) { - .@"struct" => switch (aggregate_info.@"packed") { - false => .aggregate_struct_anon, - true => .aggregate_struct_packed_anon, - }, - .@"union" => switch (aggregate_info.@"packed") { - false => .aggregate_union_anon, - true => .aggregate_union_packed_anon, - }, - .@"enum" => unreachable, - }, extra_index); - }, - .fwd_decl => |fwd_decl| { - const extra: Aggregate = .{ - .fwd_decl = fwd_decl.index, - .fields_len = @intCast(aggregate_info.fields.len), - }; - const extra_index = try pool.addExtra( - allocator, - Aggregate, - extra, - aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len, - ); - for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ - .name = field.name.index, - .ctype = field.ctype.index, - .flags = .{ .alignas = field.alignas }, - }); - hasher.updateExtra(Aggregate, extra, pool); - return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) { - .@"struct" => switch (aggregate_info.@"packed") { - false => .aggregate_struct, - true => .aggregate_struct_packed, - }, - .@"union" => switch (aggregate_info.@"packed") { - false => .aggregate_union, - true => .aggregate_union_packed, - }, - .@"enum" => unreachable, - }, extra_index); - }, - } - } - - pub fn getFunction( - pool: *Pool, - allocator: std.mem.Allocator, - function_info: struct { - return_ctype: CType, - param_ctypes: []const CType, - varargs: bool = false, - }, - ) !CType { - var hasher = Hasher.init; - const extra: Function = .{ - .return_ctype = function_info.return_ctype.index, - .param_ctypes_len = @intCast(function_info.param_ctypes.len), - }; - const extra_index = try pool.addExtra(allocator, Function, extra, function_info.param_ctypes.len); - for (function_info.param_ctypes) |param_ctype| { - hasher.update(param_ctype.hash(pool)); - pool.extra.appendAssumeCapacity(@intFromEnum(param_ctype.index)); - } - hasher.updateExtra(Function, extra, pool); - return pool.tagTrailingExtra(allocator, hasher, switch (function_info.varargs) { - false => .function, - true => .function_varargs, - }, extra_index); - } - - pub fn fromFields( - pool: *Pool, - allocator: std.mem.Allocator, - tag: Info.AggregateTag, - fields: []Info.Field, - kind: Kind, - ) !CType { - sortFields(fields); - const fwd_decl = try pool.getFwdDecl(allocator, .{ - .tag = tag, - .name = .{ .anon = fields }, - }); - return if (kind.isForward()) fwd_decl else pool.getAggregate(allocator, .{ - .tag = tag, - .name = .{ .fwd_decl = fwd_decl }, - .fields = fields, - }); - } - - pub fn fromIntInfo( - pool: *Pool, - allocator: std.mem.Allocator, - int_info: std.builtin.Type.Int, - mod: *Module, - kind: Kind, - ) !CType { - switch (int_info.bits) { - 0 => return .void, - 1...8 => switch (int_info.signedness) { - .signed => return .i8, - .unsigned => return .u8, - }, - 9...16 => switch (int_info.signedness) { - .signed => return .i16, - .unsigned => return .u16, - }, - 17...32 => switch (int_info.signedness) { - .signed => return .i32, - .unsigned => return .u32, - }, - 33...64 => switch (int_info.signedness) { - .signed => return .i64, - .unsigned => return .u64, - }, - 65...128 => switch (int_info.signedness) { - .signed => return .i128, - .unsigned => return .u128, - }, - else => { - const target = &mod.resolved_target.result; - const abi_align_bytes = std.zig.target.intAlignment(target, int_info.bits); - const limb_ctype = try pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = @intCast(abi_align_bytes * 8), - }, mod, kind.noParameter()); - const array_ctype = try pool.getArray(allocator, .{ - .len = @divExact(std.zig.target.intByteSize(target, int_info.bits), abi_align_bytes), - .elem_ctype = limb_ctype, - .nonstring = limb_ctype.isAnyChar(), - }); - if (!kind.isParameter()) return array_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = array_ctype, - .alignas = AlignAs.fromAbiAlignment(.fromByteUnits(abi_align_bytes)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - } - } - - pub fn fromType( - pool: *Pool, - allocator: std.mem.Allocator, - scratch: *std.ArrayList(u32), - ty: Type, - pt: Zcu.PerThread, - mod: *Module, - kind: Kind, - ) !CType { - const ip = &pt.zcu.intern_pool; - const zcu = pt.zcu; - switch (ty.toIntern()) { - .u0_type, - .i0_type, - .anyopaque_type, - .void_type, - .empty_tuple_type, - .type_type, - .comptime_int_type, - .comptime_float_type, - .null_type, - .undefined_type, - .enum_literal_type, - .optional_type_type, - .manyptr_const_type_type, - .slice_const_type_type, - => return .void, - .u1_type, .u8_type => return .u8, - .i8_type => return .i8, - .u16_type => return .u16, - .i16_type => return .i16, - .u29_type, .u32_type => return .u32, - .i32_type => return .i32, - .u64_type => return .u64, - .i64_type => return .i64, - .u80_type, .u128_type => return .u128, - .i128_type => return .i128, - .u256_type => return pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = 256, - }, mod, kind), - .usize_type => return .usize, - .isize_type => return .isize, - .c_char_type => return .{ .index = .char }, - .c_short_type => return .{ .index = .short }, - .c_ushort_type => return .{ .index = .@"unsigned short" }, - .c_int_type => return .{ .index = .int }, - .c_uint_type => return .{ .index = .@"unsigned int" }, - .c_long_type => return .{ .index = .long }, - .c_ulong_type => return .{ .index = .@"unsigned long" }, - .c_longlong_type => return .{ .index = .@"long long" }, - .c_ulonglong_type => return .{ .index = .@"unsigned long long" }, - .c_longdouble_type => return .{ .index = .@"long double" }, - .f16_type => return .f16, - .f32_type => return .f32, - .f64_type => return .f64, - .f80_type => return .f80, - .f128_type => return .f128, - .bool_type, .optional_noreturn_type => return .bool, - .noreturn_type, - .anyframe_type, - .generic_poison_type, - => unreachable, - .anyerror_type, - .anyerror_void_error_union_type, - .adhoc_inferred_error_set_type, - => return pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = pt.zcu.errorSetBits(), - }, mod, kind), - - .ptr_usize_type => return pool.getPointer(allocator, .{ - .elem_ctype = .usize, - }), - .ptr_const_comptime_int_type => return pool.getPointer(allocator, .{ - .elem_ctype = .void, - .@"const" = true, - }), - .manyptr_u8_type => return pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .nonstring = true, - }), - .manyptr_const_u8_type => return pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - .nonstring = true, - }), - .manyptr_const_u8_sentinel_0_type => return pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - }), - .slice_const_u8_type => { - const target = &mod.resolved_target.result; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - .nonstring = true, - }), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .slice_const_u8_sentinel_0_type => { - const target = &mod.resolved_target.result; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - }), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - - .manyptr_const_slice_const_u8_type => { - const target = &mod.resolved_target.result; - var fields: [2]Info.Field = .{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - .nonstring = true, - }), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - const slice_const_u8 = try pool.fromFields(allocator, .@"struct", &fields, kind); - return pool.getPointer(allocator, .{ - .elem_ctype = slice_const_u8, - .@"const" = true, - }); - }, - .slice_const_slice_const_u8_type => { - const target = &mod.resolved_target.result; - var fields: [2]Info.Field = .{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.getPointer(allocator, .{ - .elem_ctype = .u8, - .@"const" = true, - .nonstring = true, - }), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - const slice_const_u8 = try pool.fromFields(allocator, .@"struct", &fields, .forward); - fields = .{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.getPointer(allocator, .{ - .elem_ctype = slice_const_u8, - .@"const" = true, - }), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - - .vector_8_i8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i8, - .len = 8, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_i8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i8, - .len = 16, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_32_i8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i8, - .len = 32, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_64_i8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i8, - .len = 64, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_1_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 1, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 2, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 4, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 8, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 16, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_32_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 32, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_64_u8_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u8, - .len = 64, - .nonstring = true, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u8.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_i16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i16, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_i16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i16, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_i16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i16, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_i16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i16, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_32_i16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i16, - .len = 32, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_u16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u16, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_u16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u16, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_u16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u16, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_32_u16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u16, - .len = 32, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_i32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i32, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_i32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i32, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_i32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i32, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_i32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i32, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_u32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u32, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_u32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u32, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_u32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u32, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_i64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i64, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_i64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i64, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_i64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .i64, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.i64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_u64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u64, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_u64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u64, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_u64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u64, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_1_u128_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u128, - .len = 1, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u128.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_u128_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .u128, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u128.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_1_u256_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = try pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = 256, - }, mod, kind), - .len = 1, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.u256.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_f16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f16, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_f16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f16, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_f16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f16, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_32_f16_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f16, - .len = 32, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f16.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_f32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f32, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_f32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f32, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_f32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f32, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_16_f32_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f32, - .len = 16, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f32.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_2_f64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f64, - .len = 2, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_4_f64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f64, - .len = 4, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_8_f64_type => { - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = .f64, - .len = 8, - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(Type.f64.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - - .undef, - .undef_bool, - .undef_usize, - .undef_u1, - .zero, - .zero_usize, - .zero_u1, - .zero_u8, - .one, - .one_usize, - .one_u1, - .one_u8, - .four_u8, - .negative_one, - .void_value, - .unreachable_value, - .null_value, - .bool_true, - .bool_false, - .empty_tuple, - .none, - => unreachable, // values, not types - - _ => |ip_index| switch (ip.indexToKey(ip_index)) { - .int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind), - .ptr_type => |ptr_info| switch (ptr_info.flags.size) { - .one, .many, .c => { - const elem_ctype = elem_ctype: { - if (ptr_info.packed_offset.host_size > 0 and - ptr_info.flags.vector_index == .none) - break :elem_ctype try pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = ptr_info.packed_offset.host_size * 8, - }, mod, .forward); - const elem: Info.Aligned = .{ - .ctype = try pool.fromType( - allocator, - scratch, - Type.fromInterned(ptr_info.child), - pt, - mod, - .forward, - ), - .alignas = AlignAs.fromAlignment(.{ - .@"align" = ptr_info.flags.alignment, - .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu), - }), - }; - break :elem_ctype if (elem.alignas.abiOrder().compare(.gte)) - elem.ctype - else - try pool.getAligned(allocator, elem); - }; - const elem_tag: Info.Tag = switch (elem_ctype.info(pool)) { - .aligned => |aligned_info| aligned_info.ctype.info(pool), - else => |elem_tag| elem_tag, - }; - return pool.getPointer(allocator, .{ - .elem_ctype = elem_ctype, - .@"const" = switch (elem_tag) { - .basic, - .pointer, - .aligned, - .array, - .vector, - .fwd_decl, - .aggregate, - => ptr_info.flags.is_const, - .function => false, - }, - .@"volatile" = ptr_info.flags.is_volatile, - .nonstring = elem_ctype.isAnyChar() and switch (ptr_info.sentinel) { - .none => true, - .zero_u8 => false, - else => |sentinel| !Value.fromInterned(sentinel).compareAllWithZero(.eq, zcu), - }, - }); - }, - .slice => { - const target = &mod.resolved_target.result; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .ptr }, - .ctype = try pool.fromType( - allocator, - scratch, - Type.fromInterned(ip.slicePtrType(ip_index)), - pt, - mod, - kind, - ), - .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target)), - }, - .{ - .name = .{ .index = .len }, - .ctype = .usize, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, target.ptrBitWidth())), - ), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - }, - .array_type => |array_info| { - const len = array_info.lenIncludingSentinel(); - if (len == 0) return .void; - const elem_type = Type.fromInterned(array_info.child); - const elem_ctype = try pool.fromType( - allocator, - scratch, - elem_type, - pt, - mod, - kind.noParameter().asComplete(), - ); - if (elem_ctype.index == .void) return .void; - const array_ctype = try pool.getArray(allocator, .{ - .elem_ctype = elem_ctype, - .len = len, - .nonstring = elem_ctype.isAnyChar() and switch (array_info.sentinel) { - .none => true, - .zero_u8 => false, - else => |sentinel| !Value.fromInterned(sentinel).compareAllWithZero(.eq, zcu), - }, - }); - if (!kind.isParameter()) return array_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = array_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .vector_type => |vector_info| { - if (vector_info.len == 0) return .void; - const elem_type = Type.fromInterned(vector_info.child); - const elem_ctype = try pool.fromType( - allocator, - scratch, - elem_type, - pt, - mod, - kind.noParameter().asComplete(), - ); - if (elem_ctype.index == .void) return .void; - const vector_ctype = try pool.getVector(allocator, .{ - .elem_ctype = elem_ctype, - .len = vector_info.len, - .nonstring = elem_ctype.isAnyChar(), - }); - if (!kind.isParameter()) return vector_ctype; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .array }, - .ctype = vector_ctype, - .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .opt_type => |payload_type| { - if (Type.fromInterned(payload_type).isNoReturn(zcu)) return .void; - const payload_ctype = try pool.fromType( - allocator, - scratch, - Type.fromInterned(payload_type), - pt, - mod, - kind.noParameter(), - ); - if (payload_ctype.index == .void) return .bool; - switch (payload_type) { - .anyerror_type => return payload_ctype, - else => switch (ip.indexToKey(payload_type)) { - .ptr_type => |payload_ptr_info| if (payload_ptr_info.flags.size != .c and - !payload_ptr_info.flags.is_allowzero) return payload_ctype, - .error_set_type, .inferred_error_set_type => return payload_ctype, - else => {}, - }, - } - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .is_null }, - .ctype = .bool, - .alignas = AlignAs.fromAbiAlignment(.@"1"), - }, - .{ - .name = .{ .index = .payload }, - .ctype = payload_ctype, - .alignas = AlignAs.fromAbiAlignment( - Type.fromInterned(payload_type).abiAlignment(zcu), - ), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .anyframe_type => unreachable, - .error_union_type => |error_union_info| { - const error_set_bits = pt.zcu.errorSetBits(); - const error_set_ctype = try pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = error_set_bits, - }, mod, kind); - if (Type.fromInterned(error_union_info.payload_type).isNoReturn(zcu)) return error_set_ctype; - const payload_type = Type.fromInterned(error_union_info.payload_type); - const payload_ctype = try pool.fromType( - allocator, - scratch, - payload_type, - pt, - mod, - kind.noParameter(), - ); - if (payload_ctype.index == .void) return error_set_ctype; - const target = &mod.resolved_target.result; - var fields = [_]Info.Field{ - .{ - .name = .{ .index = .@"error" }, - .ctype = error_set_ctype, - .alignas = AlignAs.fromAbiAlignment( - .fromByteUnits(std.zig.target.intAlignment(target, error_set_bits)), - ), - }, - .{ - .name = .{ .index = .payload }, - .ctype = payload_ctype, - .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)), - }, - }; - return pool.fromFields(allocator, .@"struct", &fields, kind); - }, - .simple_type => unreachable, - .struct_type => { - const loaded_struct = ip.loadStructType(ip_index); - switch (loaded_struct.layout) { - .auto, .@"extern" => { - const fwd_decl = try pool.getFwdDecl(allocator, .{ - .tag = .@"struct", - .name = .{ .index = ip_index }, - }); - if (kind.isForward()) return if (ty.hasRuntimeBits(zcu)) - fwd_decl - else - .void; - const scratch_top = scratch.items.len; - defer scratch.shrinkRetainingCapacity(scratch_top); - try scratch.ensureUnusedCapacity( - allocator, - loaded_struct.field_types.len * @typeInfo(Field).@"struct".fields.len, - ); - var hasher = Hasher.init; - var tag: Pool.Tag = .aggregate_struct; - var field_it = loaded_struct.iterateRuntimeOrder(ip); - while (field_it.next()) |field_index| { - const field_type = Type.fromInterned( - loaded_struct.field_types.get(ip)[field_index], - ); - const field_ctype = try pool.fromType( - allocator, - scratch, - field_type, - pt, - mod, - kind.noParameter(), - ); - if (field_ctype.index == .void) continue; - const field_name = try pool.string(allocator, loaded_struct.field_names.get(ip)[field_index].toSlice(ip)); - const field_alignas = AlignAs.fromAlignment(.{ - .@"align" = loaded_struct.field_aligns.getOrNone(ip, field_index), - .abi = field_type.abiAlignment(zcu), - }); - pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ - .name = field_name.index, - .ctype = field_ctype.index, - .flags = .{ .alignas = field_alignas }, - }); - if (field_alignas.abiOrder().compare(.lt)) - tag = .aggregate_struct_packed; - } - const fields_len: u32 = @intCast(@divExact( - scratch.items.len - scratch_top, - @typeInfo(Field).@"struct".fields.len, - )); - if (fields_len == 0) return .void; - try pool.ensureUnusedCapacity(allocator, 1); - const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ - .fwd_decl = fwd_decl.index, - .fields_len = fields_len, - }, fields_len * @typeInfo(Field).@"struct".fields.len); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); - }, - .@"packed" => return pool.fromType( - allocator, - scratch, - .fromInterned(loaded_struct.packed_backing_int_type), - pt, - mod, - kind, - ), - } - }, - .tuple_type => |tuple_info| { - const scratch_top = scratch.items.len; - defer scratch.shrinkRetainingCapacity(scratch_top); - try scratch.ensureUnusedCapacity(allocator, tuple_info.types.len * - @typeInfo(Field).@"struct".fields.len); - var hasher = Hasher.init; - for (0..tuple_info.types.len) |field_index| { - if (tuple_info.values.get(ip)[field_index] != .none) continue; - const field_type = Type.fromInterned( - tuple_info.types.get(ip)[field_index], - ); - const field_ctype = try pool.fromType( - allocator, - scratch, - field_type, - pt, - mod, - kind.noParameter(), - ); - if (field_ctype.index == .void) continue; - const field_name = try pool.fmt(allocator, "f{d}", .{field_index}); - pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ - .name = field_name.index, - .ctype = field_ctype.index, - .flags = .{ .alignas = AlignAs.fromAbiAlignment( - field_type.abiAlignment(zcu), - ) }, - }); - } - const fields_len: u32 = @intCast(@divExact( - scratch.items.len - scratch_top, - @typeInfo(Field).@"struct".fields.len, - )); - if (fields_len == 0) return .void; - if (kind.isForward()) { - try pool.ensureUnusedCapacity(allocator, 1); - const extra_index = try pool.addHashedExtra( - allocator, - &hasher, - FwdDeclAnon, - .{ .fields_len = fields_len }, - fields_len * @typeInfo(Field).@"struct".fields.len, - ); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - return pool.tagTrailingExtra( - allocator, - hasher, - .fwd_decl_struct_anon, - extra_index, - ); - } - const fwd_decl = try pool.fromType(allocator, scratch, ty, pt, mod, .forward); - try pool.ensureUnusedCapacity(allocator, 1); - const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ - .fwd_decl = fwd_decl.index, - .fields_len = fields_len, - }, fields_len * @typeInfo(Field).@"struct".fields.len); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index); - }, - .union_type => { - const loaded_union = ip.loadUnionType(ip_index); - switch (loaded_union.layout) { - .auto, .@"extern" => { - const fwd_decl = try pool.getFwdDecl(allocator, .{ - .tag = if (loaded_union.has_runtime_tag) .@"struct" else .@"union", - .name = .{ .index = ip_index }, - }); - if (kind.isForward()) return if (ty.hasRuntimeBits(zcu)) - fwd_decl - else - .void; - const loaded_tag = ip.loadEnumType(loaded_union.enum_tag_type); - const scratch_top = scratch.items.len; - defer scratch.shrinkRetainingCapacity(scratch_top); - try scratch.ensureUnusedCapacity( - allocator, - loaded_union.field_types.len * @typeInfo(Field).@"struct".fields.len, - ); - var hasher = Hasher.init; - var tag: Pool.Tag = .aggregate_union; - var payload_align: InternPool.Alignment = .@"1"; - for (0..loaded_union.field_types.len) |field_index| { - const field_type = Type.fromInterned( - loaded_union.field_types.get(ip)[field_index], - ); - if (field_type.isNoReturn(zcu)) continue; - const field_ctype = try pool.fromType( - allocator, - scratch, - field_type, - pt, - mod, - kind.noParameter(), - ); - if (field_ctype.index == .void) continue; - const field_name = try pool.string( - allocator, - loaded_tag.field_names.get(ip)[field_index].toSlice(ip), - ); - const field_alignas = AlignAs.fromAlignment(.{ - .@"align" = loaded_union.field_aligns.getOrNone(ip, field_index), - .abi = field_type.abiAlignment(zcu), - }); - pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ - .name = field_name.index, - .ctype = field_ctype.index, - .flags = .{ .alignas = field_alignas }, - }); - if (field_alignas.abiOrder().compare(.lt)) - tag = .aggregate_union_packed; - payload_align = payload_align.maxStrict(field_alignas.@"align"); - } - const fields_len: u32 = @intCast(@divExact( - scratch.items.len - scratch_top, - @typeInfo(Field).@"struct".fields.len, - )); - if (!loaded_union.has_runtime_tag) { - if (fields_len == 0) return .void; - try pool.ensureUnusedCapacity(allocator, 1); - const extra_index = try pool.addHashedExtra( - allocator, - &hasher, - Aggregate, - .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len }, - fields_len * @typeInfo(Field).@"struct".fields.len, - ); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); - } - try pool.ensureUnusedCapacity(allocator, 2); - var struct_fields: [2]Info.Field = undefined; - var struct_fields_len: usize = 0; - const tag_type = Type.fromInterned(loaded_tag.int_tag_type); - const tag_ctype: CType = try pool.fromType( - allocator, - scratch, - tag_type, - pt, - mod, - kind.noParameter(), - ); - if (tag_ctype.index != .void) { - struct_fields[struct_fields_len] = .{ - .name = .{ .index = .tag }, - .ctype = tag_ctype, - .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)), - }; - struct_fields_len += 1; - } - if (fields_len > 0) { - const payload_ctype = payload_ctype: { - const extra_index = try pool.addHashedExtra( - allocator, - &hasher, - AggregateAnon, - .{ - .index = ip_index, - .id = 0, - .fields_len = fields_len, - }, - fields_len * @typeInfo(Field).@"struct".fields.len, - ); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - break :payload_ctype pool.tagTrailingExtraAssumeCapacity( - hasher, - switch (tag) { - .aggregate_union => .aggregate_union_anon, - .aggregate_union_packed => .aggregate_union_packed_anon, - else => unreachable, - }, - extra_index, - ); - }; - if (payload_ctype.index != .void) { - struct_fields[struct_fields_len] = .{ - .name = .{ .index = .payload }, - .ctype = payload_ctype, - .alignas = AlignAs.fromAbiAlignment(payload_align), - }; - struct_fields_len += 1; - } - } - if (struct_fields_len == 0) return .void; - sortFields(struct_fields[0..struct_fields_len]); - return pool.getAggregate(allocator, .{ - .tag = .@"struct", - .name = .{ .fwd_decl = fwd_decl }, - .fields = struct_fields[0..struct_fields_len], - }); - }, - .@"packed" => return pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = @intCast(ty.bitSize(zcu)), - }, mod, kind), - } - }, - .opaque_type => return .void, - .enum_type => return pool.fromType( - allocator, - scratch, - .fromInterned(ip.loadEnumType(ip_index).int_tag_type), - pt, - mod, - kind, - ), - .func_type => |func_info| { - if (!ty.fnHasRuntimeBits(zcu)) return .void; - - const scratch_top = scratch.items.len; - defer scratch.shrinkRetainingCapacity(scratch_top); - try scratch.ensureUnusedCapacity(allocator, func_info.param_types.len); - var hasher = Hasher.init; - const return_type = Type.fromInterned(func_info.return_type); - const return_ctype: CType = - if (!Type.fromInterned(func_info.return_type).isNoReturn(zcu)) try pool.fromType( - allocator, - scratch, - return_type, - pt, - mod, - kind.asParameter(), - ) else .void; - for (0..func_info.param_types.len) |param_index| { - const param_type = Type.fromInterned( - func_info.param_types.get(ip)[param_index], - ); - const param_ctype = try pool.fromType( - allocator, - scratch, - param_type, - pt, - mod, - kind.asParameter(), - ); - if (param_ctype.index == .void) continue; - hasher.update(param_ctype.hash(pool)); - scratch.appendAssumeCapacity(@intFromEnum(param_ctype.index)); - } - const param_ctypes_len: u32 = @intCast(scratch.items.len - scratch_top); - try pool.ensureUnusedCapacity(allocator, 1); - const extra_index = try pool.addHashedExtra(allocator, &hasher, Function, .{ - .return_ctype = return_ctype.index, - .param_ctypes_len = param_ctypes_len, - }, param_ctypes_len); - pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); - return pool.tagTrailingExtraAssumeCapacity(hasher, switch (func_info.is_var_args) { - false => .function, - true => .function_varargs, - }, extra_index); - }, - .error_set_type, - .inferred_error_set_type, - => return pool.fromIntInfo(allocator, .{ - .signedness = .unsigned, - .bits = pt.zcu.errorSetBits(), - }, mod, kind), - - .undef, - .simple_value, - .variable, - .@"extern", - .func, - .int, - .err, - .error_union, - .enum_literal, - .enum_tag, - .float, - .ptr, - .slice, - .opt, - .aggregate, - .un, - .bitpack, - .memoized_call, - => unreachable, // values, not types - }, - } - } - - pub fn getOrPutAdapted( - pool: *Pool, - allocator: std.mem.Allocator, - source_pool: *const Pool, - source_ctype: CType, - pool_adapter: anytype, - ) !struct { CType, bool } { - const tag = source_pool.items.items(.tag)[ - source_ctype.toPoolIndex() orelse return .{ source_ctype, true } - ]; - try pool.ensureUnusedCapacity(allocator, 1); - const CTypeAdapter = struct { - pool: *const Pool, - source_pool: *const Pool, - source_info: Info, - pool_adapter: @TypeOf(pool_adapter), - pub fn hash(map_adapter: @This(), key_ctype: CType) Map.Hash { - return key_ctype.hash(map_adapter.source_pool); - } - pub fn eql(map_adapter: @This(), _: CType, _: void, pool_index: usize) bool { - return map_adapter.source_info.eqlAdapted( - map_adapter.source_pool, - .fromPoolIndex(pool_index), - map_adapter.pool, - map_adapter.pool_adapter, - ); - } - }; - const source_info = source_ctype.info(source_pool); - const gop = pool.map.getOrPutAssumeCapacityAdapted(source_ctype, CTypeAdapter{ - .pool = pool, - .source_pool = source_pool, - .source_info = source_info, - .pool_adapter = pool_adapter, - }); - errdefer _ = pool.map.pop(); - const ctype: CType = .fromPoolIndex(gop.index); - if (!gop.found_existing) switch (source_info) { - .basic => unreachable, - .pointer => |pointer_info| pool.items.appendAssumeCapacity(switch (pointer_info.nonstring) { - false => .{ - .tag = tag, - .data = @intFromEnum(pool_adapter.copy(pointer_info.elem_ctype).index), - }, - true => .{ - .tag = .nonstring, - .data = @intFromEnum(pool_adapter.copy(.{ .index = @enumFromInt( - source_pool.items.items(.data)[source_ctype.toPoolIndex().?], - ) }).index), - }, - }), - .aligned => |aligned_info| pool.items.appendAssumeCapacity(.{ - .tag = tag, - .data = try pool.addExtra(allocator, Aligned, .{ - .ctype = pool_adapter.copy(aligned_info.ctype).index, - .flags = .{ .alignas = aligned_info.alignas }, - }, 0), - }), - .array, .vector => |sequence_info| pool.items.appendAssumeCapacity(switch (sequence_info.nonstring) { - false => .{ - .tag = tag, - .data = switch (tag) { - .array_small, .vector => try pool.addExtra(allocator, SequenceSmall, .{ - .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index, - .len = @intCast(sequence_info.len), - }, 0), - .array_large => try pool.addExtra(allocator, SequenceLarge, .{ - .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index, - .len_lo = @truncate(sequence_info.len >> 0), - .len_hi = @truncate(sequence_info.len >> 32), - }, 0), - else => unreachable, - }, - }, - true => .{ - .tag = .nonstring, - .data = @intFromEnum(pool_adapter.copy(.{ .index = @enumFromInt( - source_pool.items.items(.data)[source_ctype.toPoolIndex().?], - ) }).index), - }, - }), - .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { - .anon => |fields| { - pool.items.appendAssumeCapacity(.{ - .tag = tag, - .data = try pool.addExtra(allocator, FwdDeclAnon, .{ - .fields_len = fields.len, - }, fields.len * @typeInfo(Field).@"struct".fields.len), - }); - for (0..fields.len) |field_index| { - const field = fields.at(field_index, source_pool); - const field_name = if (field.name.toPoolSlice(source_pool)) |slice| - try pool.string(allocator, slice) - else - field.name; - pool.addExtraAssumeCapacity(Field, .{ - .name = field_name.index, - .ctype = pool_adapter.copy(field.ctype).index, - .flags = .{ .alignas = field.alignas }, - }); - } - }, - .index => |index| pool.items.appendAssumeCapacity(.{ - .tag = tag, - .data = @intFromEnum(index), - }), - }, - .aggregate => |aggregate_info| { - pool.items.appendAssumeCapacity(.{ - .tag = tag, - .data = switch (aggregate_info.name) { - .anon => |anon| try pool.addExtra(allocator, AggregateAnon, .{ - .index = anon.index, - .id = anon.id, - .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), - .fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{ - .fwd_decl = pool_adapter.copy(fwd_decl).index, - .fields_len = aggregate_info.fields.len, - }, aggregate_info.fields.len * @typeInfo(Field).@"struct".fields.len), - }, - }); - for (0..aggregate_info.fields.len) |field_index| { - const field = aggregate_info.fields.at(field_index, source_pool); - const field_name = if (field.name.toPoolSlice(source_pool)) |slice| - try pool.string(allocator, slice) - else - field.name; - pool.addExtraAssumeCapacity(Field, .{ - .name = field_name.index, - .ctype = pool_adapter.copy(field.ctype).index, - .flags = .{ .alignas = field.alignas }, - }); - } - }, - .function => |function_info| { - pool.items.appendAssumeCapacity(.{ - .tag = tag, - .data = try pool.addExtra(allocator, Function, .{ - .return_ctype = pool_adapter.copy(function_info.return_ctype).index, - .param_ctypes_len = function_info.param_ctypes.len, - }, function_info.param_ctypes.len), - }); - for (0..function_info.param_ctypes.len) |param_index| pool.extra.appendAssumeCapacity( - @intFromEnum(pool_adapter.copy( - function_info.param_ctypes.at(param_index, source_pool), - ).index), - ); - }, - }; - assert(source_info.eqlAdapted(source_pool, ctype, pool, pool_adapter)); - assert(source_ctype.hash(source_pool) == ctype.hash(pool)); - return .{ ctype, gop.found_existing }; - } - - pub fn string(pool: *Pool, allocator: std.mem.Allocator, slice: []const u8) !String { - try pool.string_bytes.appendSlice(allocator, slice); - return pool.trailingString(allocator); - } - - pub fn fmt( - pool: *Pool, - allocator: std.mem.Allocator, - comptime fmt_str: []const u8, - fmt_args: anytype, - ) !String { - try pool.string_bytes.print(allocator, fmt_str, fmt_args); - return pool.trailingString(allocator); - } - - fn ensureUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator, len: u32) !void { - try pool.map.ensureUnusedCapacity(allocator, len); - try pool.items.ensureUnusedCapacity(allocator, len); - } - - const Hasher = struct { - const Impl = std.hash.Wyhash; - impl: Impl, - - const init: Hasher = .{ .impl = Impl.init(0) }; - - fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void { - inline for (@typeInfo(Extra).@"struct".fields) |field| { - const value = @field(extra, field.name); - switch (field.type) { - Pool.Tag, String, CType => unreachable, - CType.Index => hasher.update((CType{ .index = value }).hash(pool)), - String.Index => if ((String{ .index = value }).toPoolSlice(pool)) |slice| - hasher.update(slice) - else - hasher.update(@intFromEnum(value)), - else => hasher.update(value), - } - } - } - fn update(hasher: *Hasher, data: anytype) void { - switch (@TypeOf(data)) { - Pool.Tag => @compileError("pass tag to final"), - CType, CType.Index => @compileError("hash ctype.hash(pool) instead"), - String, String.Index => @compileError("hash string.slice(pool) instead"), - u32, InternPool.Index, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)), - []const u8 => hasher.impl.update(data), - else => @compileError("unhandled type: " ++ @typeName(@TypeOf(data))), - } - } - - fn final(hasher: Hasher, tag: Pool.Tag) Map.Hash { - var impl = hasher.impl; - impl.update(std.mem.asBytes(&tag)); - return @truncate(impl.final()); - } - }; - - fn tagData( - pool: *Pool, - allocator: std.mem.Allocator, - hasher: Hasher, - tag: Pool.Tag, - data: u32, - ) !CType { - try pool.ensureUnusedCapacity(allocator, 1); - const Key = struct { hash: Map.Hash, tag: Pool.Tag, data: u32 }; - const CTypeAdapter = struct { - pool: *const Pool, - pub fn hash(_: @This(), key: Key) Map.Hash { - return key.hash; - } - pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { - const rhs_item = ctype_adapter.pool.items.get(rhs_index); - return lhs_key.tag == rhs_item.tag and lhs_key.data == rhs_item.data; - } - }; - const gop = pool.map.getOrPutAssumeCapacityAdapted( - Key{ .hash = hasher.final(tag), .tag = tag, .data = data }, - CTypeAdapter{ .pool = pool }, - ); - if (!gop.found_existing) pool.items.appendAssumeCapacity(.{ .tag = tag, .data = data }); - return .fromPoolIndex(gop.index); - } - - fn tagExtra( - pool: *Pool, - allocator: std.mem.Allocator, - tag: Pool.Tag, - comptime Extra: type, - extra: Extra, - ) !CType { - var hasher = Hasher.init; - hasher.updateExtra(Extra, extra, pool); - return pool.tagTrailingExtra( - allocator, - hasher, - tag, - try pool.addExtra(allocator, Extra, extra, 0), - ); - } - - fn tagTrailingExtra( - pool: *Pool, - allocator: std.mem.Allocator, - hasher: Hasher, - tag: Pool.Tag, - extra_index: ExtraIndex, - ) !CType { - try pool.ensureUnusedCapacity(allocator, 1); - return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); - } - - fn tagTrailingExtraAssumeCapacity( - pool: *Pool, - hasher: Hasher, - tag: Pool.Tag, - extra_index: ExtraIndex, - ) CType { - const Key = struct { hash: Map.Hash, tag: Pool.Tag, extra: []const u32 }; - const CTypeAdapter = struct { - pool: *const Pool, - pub fn hash(_: @This(), key: Key) Map.Hash { - return key.hash; - } - pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { - const rhs_item = ctype_adapter.pool.items.get(rhs_index); - if (lhs_key.tag != rhs_item.tag) return false; - const rhs_extra = ctype_adapter.pool.extra.items[rhs_item.data..]; - return std.mem.startsWith(u32, rhs_extra, lhs_key.extra); - } - }; - const gop = pool.map.getOrPutAssumeCapacityAdapted( - Key{ .hash = hasher.final(tag), .tag = tag, .extra = pool.extra.items[extra_index..] }, - CTypeAdapter{ .pool = pool }, - ); - if (gop.found_existing) - pool.extra.shrinkRetainingCapacity(extra_index) - else - pool.items.appendAssumeCapacity(.{ .tag = tag, .data = extra_index }); - return .fromPoolIndex(gop.index); - } - - fn sortFields(fields: []Info.Field) void { - std.mem.sort(Info.Field, fields, {}, struct { - fn before(_: void, lhs_field: Info.Field, rhs_field: Info.Field) bool { - return lhs_field.alignas.order(rhs_field.alignas).compare(.gt); - } - }.before); - } - - fn trailingString(pool: *Pool, allocator: std.mem.Allocator) !String { - const start = pool.string_indices.getLast(); - const slice: []const u8 = pool.string_bytes.items[start..]; - if (slice.len >= 2 and slice[0] == 'f' and switch (slice[1]) { - '0' => slice.len == 2, - '1'...'9' => true, - else => false, - }) if (std.fmt.parseInt(u31, slice[1..], 10)) |unnamed| { - pool.string_bytes.shrinkRetainingCapacity(start); - return String.fromUnnamed(unnamed); - } else |_| {}; - if (std.meta.stringToEnum(String.Index, slice)) |index| { - pool.string_bytes.shrinkRetainingCapacity(start); - return .{ .index = index }; - } - - try pool.string_map.ensureUnusedCapacity(allocator, 1); - try pool.string_indices.ensureUnusedCapacity(allocator, 1); - - const gop = pool.string_map.getOrPutAssumeCapacityAdapted(slice, String.Adapter{ .pool = pool }); - if (gop.found_existing) - pool.string_bytes.shrinkRetainingCapacity(start) - else - pool.string_indices.appendAssumeCapacity(@intCast(pool.string_bytes.items.len)); - return String.fromPoolIndex(gop.index); - } - - const Item = struct { - tag: Pool.Tag, - data: u32, - }; - - const ExtraIndex = u32; - - const Tag = enum(u8) { - basic, - pointer, - pointer_const, - pointer_volatile, - pointer_const_volatile, - aligned, - array_small, - array_large, - vector, - nonstring, - fwd_decl_struct_anon, - fwd_decl_union_anon, - fwd_decl_struct, - fwd_decl_union, - aggregate_struct_anon, - aggregate_struct_packed_anon, - aggregate_union_anon, - aggregate_union_packed_anon, - aggregate_struct, - aggregate_struct_packed, - aggregate_union, - aggregate_union_packed, - function, - function_varargs, - }; - - const Aligned = struct { - ctype: CType.Index, - flags: Flags, - - const Flags = packed struct(u32) { - alignas: AlignAs, - _: u20 = 0, - }; - }; - - const SequenceSmall = struct { - elem_ctype: CType.Index, - len: u32, - }; - - const SequenceLarge = struct { - elem_ctype: CType.Index, - len_lo: u32, - len_hi: u32, - - fn len(extra: SequenceLarge) u64 { - return @as(u64, extra.len_lo) << 0 | - @as(u64, extra.len_hi) << 32; - } - }; - - const Field = struct { - name: String.Index, - ctype: CType.Index, - flags: Flags, - - const Flags = Aligned.Flags; - }; - - const FwdDeclAnon = struct { - fields_len: u32, - }; - - const AggregateAnon = struct { - index: InternPool.Index, - id: u32, - fields_len: u32, - }; - - const Aggregate = struct { - fwd_decl: CType.Index, - fields_len: u32, - }; - - const Function = struct { - return_ctype: CType.Index, - param_ctypes_len: u32, - }; - - fn addExtra( - pool: *Pool, - allocator: std.mem.Allocator, - comptime Extra: type, - extra: Extra, - trailing_len: usize, - ) !ExtraIndex { - try pool.extra.ensureUnusedCapacity( - allocator, - @typeInfo(Extra).@"struct".fields.len + trailing_len, - ); - defer pool.addExtraAssumeCapacity(Extra, extra); - return @intCast(pool.extra.items.len); - } - fn addExtraAssumeCapacity(pool: *Pool, comptime Extra: type, extra: Extra) void { - addExtraAssumeCapacityTo(&pool.extra, Extra, extra); - } - fn addExtraAssumeCapacityTo( - array: *std.ArrayList(u32), - comptime Extra: type, - extra: Extra, - ) void { - inline for (@typeInfo(Extra).@"struct".fields) |field| { - const value = @field(extra, field.name); - array.appendAssumeCapacity(switch (field.type) { - u32 => value, - CType.Index, String.Index, InternPool.Index => @intFromEnum(value), - Aligned.Flags => @bitCast(value), - else => @compileError("bad field type: " ++ field.name ++ ": " ++ - @typeName(field.type)), - }); - } - } - - fn addHashedExtra( - pool: *Pool, - allocator: std.mem.Allocator, - hasher: *Hasher, - comptime Extra: type, - extra: Extra, - trailing_len: usize, - ) !ExtraIndex { - hasher.updateExtra(Extra, extra, pool); - return pool.addExtra(allocator, Extra, extra, trailing_len); - } - fn addHashedExtraAssumeCapacity( - pool: *Pool, - hasher: *Hasher, - comptime Extra: type, - extra: Extra, - ) void { - hasher.updateExtra(Extra, extra, pool); - pool.addExtraAssumeCapacity(Extra, extra); - } - fn addHashedExtraAssumeCapacityTo( - pool: *Pool, - array: *std.ArrayList(u32), - hasher: *Hasher, - comptime Extra: type, - extra: Extra, - ) void { - hasher.updateExtra(Extra, extra, pool); - addExtraAssumeCapacityTo(array, Extra, extra); - } - - const ExtraTrail = struct { - extra_index: ExtraIndex, - - fn next( - extra_trail: *ExtraTrail, - len: u32, - comptime Extra: type, - pool: *const Pool, - ) []const Extra { - defer extra_trail.extra_index += @intCast(len); - return @ptrCast(pool.extra.items[extra_trail.extra_index..][0..len]); - } - }; - - fn getExtraTrail( - pool: *const Pool, - comptime Extra: type, - extra_index: ExtraIndex, - ) struct { extra: Extra, trail: ExtraTrail } { - var extra: Extra = undefined; - const fields = @typeInfo(Extra).@"struct".fields; - inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value| - @field(extra, field.name) = switch (field.type) { - u32 => value, - CType.Index, String.Index, InternPool.Index => @enumFromInt(value), - Aligned.Flags => @bitCast(value), - else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), - }; - return .{ - .extra = extra, - .trail = .{ .extra_index = extra_index + @as(ExtraIndex, @intCast(fields.len)) }, - }; - } - - fn getExtra(pool: *const Pool, comptime Extra: type, extra_index: ExtraIndex) Extra { - return pool.getExtraTrail(Extra, extra_index).extra; - } -}; - -pub const AlignAs = packed struct { - @"align": InternPool.Alignment, - abi: InternPool.Alignment, - - pub fn fromAlignment(alignas: AlignAs) AlignAs { - assert(alignas.abi != .none); - return .{ - .@"align" = if (alignas.@"align" != .none) alignas.@"align" else alignas.abi, - .abi = alignas.abi, - }; - } - pub fn fromAbiAlignment(abi: InternPool.Alignment) AlignAs { - assert(abi != .none); - return .{ .@"align" = abi, .abi = abi }; - } - pub fn fromByteUnits(@"align": u64, abi: u64) AlignAs { - return fromAlignment(.{ - .@"align" = InternPool.Alignment.fromByteUnits(@"align"), - .abi = InternPool.Alignment.fromNonzeroByteUnits(abi), - }); - } - - pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order { - return lhs.@"align".order(rhs.@"align"); - } - pub fn abiOrder(alignas: AlignAs) std.math.Order { - return alignas.@"align".order(alignas.abi); - } - pub fn toByteUnits(alignas: AlignAs) u64 { - return alignas.@"align".toByteUnits().?; - } -}; - -const std = @import("std"); -const assert = std.debug.assert; -const Writer = std.Io.Writer; - -const CType = @This(); -const InternPool = @import("../../InternPool.zig"); -const Module = @import("../../Package/Module.zig"); -const Type = @import("../../Type.zig"); -const Value = @import("../../Value.zig"); -const Zcu = @import("../../Zcu.zig"); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig new file mode 100644 index 0000000000..64e63e4d67 --- /dev/null +++ b/src/codegen/c/type.zig @@ -0,0 +1,1013 @@ +pub const CType = union(enum) { + pub const render_defs = @import("type/render_defs.zig"); + + // The first nodes are primitive types (or standard typedefs). + + void, + bool, + int: Int, + float: Float, + + // These next nodes are all typedefs, structs, or unions. + + @"fn": Type, + @"enum": Type, + bitpack: Type, + @"struct": Type, + union_auto: Type, + union_extern: Type, + slice: Type, + opt: Type, + arr: Type, + vec: Type, + errunion: struct { payload_ty: Type }, + aligned: struct { + ty: Type, + alignment: InternPool.Alignment, + }, + bigint: BigInt, + + // The remaining nodes have children. + + pointer: struct { + @"const": bool, + @"volatile": bool, + elem_ty: *const CType, + nonstring: bool, + }, + array: struct { + len: u64, + elem_ty: *const CType, + nonstring: bool, + }, + function: struct { + param_tys: []const CType, + ret_ty: *const CType, + varargs: bool, + }, + + /// Returns `true` if this node has a postfix operator, meaning an `[...]` or `(...)` appears + /// after the identifier in a declarator with this type. In this case, if this node is wrapped + /// in a pointer type, we will need to add parentheses due to operator precedence. + /// + /// For instance, when lowering a Zig declaration `foo: *const fn (c_int) void`, it would be a + /// bug to write the C declarator as `void *foo(int)`, because the `(int)` suffix declaring the + /// function type has higher precedence than the `*` prefix declaring the pointer type. Instead, + /// this type must be lowered as `void (*foo)(int)`. + fn kind(cty: *const CType) enum { + /// `cty` is just a C type specifier, i.e. a typedef or a named struct/union type. + specifier, + /// `cty` is a C function or array type. It will have a postfix "operator" in its suffix to + /// declare the type, either `(...)` (for a function type) or `[...]` (for an array type). + postfix_op, + /// `cty` is a C pointer type. Its prefix will end with "*". + pointer, + } { + return switch (cty.*) { + .void, + .bool, + .int, + .float, + .@"fn", + .@"enum", + .bitpack, + .@"struct", + .union_auto, + .union_extern, + .slice, + .opt, + .arr, + .vec, + .errunion, + .aligned, + .bigint, + => .specifier, + + .array, + .function, + => .postfix_op, + + .pointer => .pointer, + }; + } + + pub const Int = enum { + char, + + @"unsigned short", + @"unsigned int", + @"unsigned long", + @"unsigned long long", + + @"signed short", + @"signed int", + @"signed long", + @"signed long long", + + uint8_t, + uint16_t, + uint32_t, + uint64_t, + zig_u128, + + int8_t, + int16_t, + int32_t, + int64_t, + zig_i128, + + uintptr_t, + intptr_t, + + pub fn bits(int: Int, target: *const std.Target) u16 { + return switch (int) { + // zig fmt: off + .char => target.cTypeBitSize(.char), + + .@"unsigned short" => target.cTypeBitSize(.ushort), + .@"unsigned int" => target.cTypeBitSize(.uint), + .@"unsigned long" => target.cTypeBitSize(.ulong), + .@"unsigned long long" => target.cTypeBitSize(.ulonglong), + + .@"signed short" => target.cTypeBitSize(.short), + .@"signed int" => target.cTypeBitSize(.int), + .@"signed long" => target.cTypeBitSize(.long), + .@"signed long long" => target.cTypeBitSize(.longlong), + + .uintptr_t, .intptr_t => target.ptrBitWidth(), + + .uint8_t, .int8_t => 8, + .uint16_t, .int16_t => 16, + .uint32_t, .int32_t => 32, + .uint64_t, .int64_t => 64, + .zig_u128, .zig_i128 => 128, + // zig fmt: on + }; + } + }; + + pub const BigInt = struct { + limb_size: LimbSize, + /// Always greater than 1. + limbs_len: u16, + + pub const LimbSize = enum { + @"8", + @"16", + @"32", + @"64", + @"128", + pub fn bits(s: LimbSize) u8 { + return switch (s) { + .@"8" => 8, + .@"16" => 16, + .@"32" => 32, + .@"64" => 64, + .@"128" => 128, + }; + } + pub fn unsigned(s: LimbSize) Int { + return switch (s) { + .@"8" => .uint8_t, + .@"16" => .uint16_t, + .@"32" => .uint32_t, + .@"64" => .uint64_t, + .@"128" => .zig_u128, + }; + } + pub fn signed(s: LimbSize) Int { + return switch (s) { + .@"8" => .int8_t, + .@"16" => .int16_t, + .@"32" => .int32_t, + .@"64" => .int64_t, + .@"128" => .zig_i128, + }; + } + }; + }; + + pub const Float = enum { + @"long double", + zig_f16, + zig_f32, + zig_f64, + zig_f80, + zig_f128, + zig_u128, + zig_i128, + }; + + pub fn isStringElem(cty: CType) bool { + return switch (cty) { + .int => |int| switch (int) { + .char, .int8_t, .uint8_t => true, + else => false, + }, + else => false, + }; + } + + pub fn lower( + ty: Type, + deps: *Dependencies, + arena: Allocator, + zcu: *const Zcu, + ) Allocator.Error!CType { + return lowerInner(ty, false, deps, arena, zcu); + } + fn lowerInner( + start_ty: Type, + allow_incomplete: bool, + deps: *Dependencies, + arena: Allocator, + zcu: *const Zcu, + ) Allocator.Error!CType { + const gpa = zcu.comp.gpa; + const ip = &zcu.intern_pool; + var cur_ty = start_ty; + while (true) { + switch (cur_ty.zigTypeTag(zcu)) { + .type, + .comptime_int, + .comptime_float, + .undefined, + .null, + .enum_literal, + .@"opaque", + .noreturn, + .void, + => return .void, + + .bool => return .bool, + + .int, .error_set => switch (classifyInt(cur_ty, zcu)) { + .void => return .void, + .small => |s| return .{ .int = s }, + .big => |big| { + try deps.bigint.put(gpa, big, {}); + return .{ .bigint = big }; + }, + }, + + .float => return .{ .float = switch (cur_ty.toIntern()) { + .c_longdouble_type => .@"long double", + .f16_type => .zig_f16, + .f32_type => .zig_f32, + .f64_type => .zig_f64, + .f80_type => .zig_f80, + .f128_type => .zig_f128, + else => unreachable, + } }, + .vector => { + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .vec = cur_ty }; + }, + .array => { + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .arr = cur_ty }; + }, + + .pointer => { + const ptr = cur_ty.ptrInfo(zcu); + switch (ptr.flags.size) { + .slice => { + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .slice = cur_ty }; + }, + .one, .many, .c => { + const elem_ty: Type = .fromInterned(ptr.child); + const is_fn_ptr = elem_ty.zigTypeTag(zcu) == .@"fn"; + const elem_cty: CType = elem_cty: { + if (ptr.packed_offset.host_size > 0 and ptr.flags.vector_index == .none) { + switch (classifyBitInt(.unsigned, ptr.packed_offset.host_size * 8, zcu)) { + .void => break :elem_cty .void, + .small => |s| break :elem_cty .{ .int = s }, + .big => |big| { + try deps.bigint.put(gpa, big, {}); + break :elem_cty .{ .bigint = big }; + }, + } + } + if (ptr.flags.alignment != .none and !is_fn_ptr) { + // The pointer has an explicit alignment---if it's an underalignment + // then we need to use an "aligned" typedef. + const ptr_align = ptr.flags.alignment; + if (!alwaysHasLayout(elem_ty, ip) or + ptr_align.compareStrict(.lt, elem_ty.abiAlignment(zcu))) + { + const gop = try deps.aligned_type_fwd.getOrPut(gpa, elem_ty.toIntern()); + if (!gop.found_existing) gop.value_ptr.* = 0; + gop.value_ptr.* |= @as(u64, 1) << ptr_align.toLog2Units(); + break :elem_cty .{ .aligned = .{ + .ty = elem_ty, + .alignment = ptr_align, + } }; + } + } + break :elem_cty try .lowerInner(elem_ty, true, deps, arena, zcu); + }; + const elem_cty_buf = try arena.create(CType); + elem_cty_buf.* = elem_cty; + return .{ .pointer = .{ + .@"const" = ptr.flags.is_const and !is_fn_ptr, + .@"volatile" = ptr.flags.is_volatile and !is_fn_ptr, + .elem_ty = elem_cty_buf, + .nonstring = nonstring: { + if (!elem_cty.isStringElem()) break :nonstring false; + if (ptr.sentinel == .none) break :nonstring true; + break :nonstring Value.compareHetero( + .fromInterned(ptr.sentinel), + .neq, + .zero_comptime_int, + zcu, + ); + }, + } }; + }, + } + }, + + .@"fn" => { + const func_type = ip.indexToKey(cur_ty.toIntern()).func_type; + direct: { + const ret_ty: Type = .fromInterned(func_type.return_type); + if (!alwaysHasLayout(ret_ty, ip)) break :direct; + var params_len: usize = 0; // only counts parameter types with runtime bits + for (func_type.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + if (!alwaysHasLayout(param_ty, ip)) break :direct; + if (param_ty.hasRuntimeBits(zcu)) params_len += 1; + } + // We can actually write this function type directly! + if (!cur_ty.fnHasRuntimeBits(zcu)) return .void; + const ret_cty_buf = try arena.create(CType); + if (!ret_ty.hasRuntimeBits(zcu)) { + // Incomplete function return types must always be `void`. + ret_cty_buf.* = .void; + } else { + ret_cty_buf.* = try .lowerInner(ret_ty, allow_incomplete, deps, arena, zcu); + } + const param_cty_buf = try arena.alloc(CType, params_len); + var param_index: usize = 0; + for (func_type.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + if (!param_ty.hasRuntimeBits(zcu)) continue; + param_cty_buf[param_index] = try .lowerInner(param_ty, allow_incomplete, deps, arena, zcu); + param_index += 1; + } + assert(param_index == params_len); + return .{ .function = .{ + .ret_ty = ret_cty_buf, + .param_tys = param_cty_buf, + .varargs = func_type.is_var_args, + } }; + } + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .@"fn" = cur_ty }; + }, + + .@"struct" => { + try deps.addType(gpa, cur_ty, allow_incomplete); + switch (cur_ty.containerLayout(zcu)) { + .auto, .@"extern" => return .{ .@"struct" = cur_ty }, + .@"packed" => return .{ .bitpack = cur_ty }, + } + }, + .@"union" => { + try deps.addType(gpa, cur_ty, allow_incomplete); + switch (cur_ty.containerLayout(zcu)) { + .auto => return .{ .union_auto = cur_ty }, + .@"extern" => return .{ .union_extern = cur_ty }, + .@"packed" => return .{ .bitpack = cur_ty }, + } + }, + .@"enum" => { + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .@"enum" = cur_ty }; + }, + + .optional => { + // This query does not require any type resolution. + if (cur_ty.optionalReprIsPayload(zcu)) { + // Either a pointer-like optional, or an optional error set. Just lower the payload. + cur_ty = cur_ty.optionalChild(zcu); + continue; + } + if (alwaysHasLayout(cur_ty, ip)) switch (classifyOptional(cur_ty, zcu)) { + .error_set, .ptr_like, .slice_like => unreachable, // handled above + .npv_payload => return .void, + .opv_payload, .@"struct" => {}, + }; + try deps.addType(gpa, cur_ty, allow_incomplete); + return .{ .opt = cur_ty }; + }, + + .error_union => { + const payload_ty = cur_ty.errorUnionPayload(zcu); + if (allow_incomplete) { + try deps.errunion_type_fwd.put(gpa, payload_ty.toIntern(), {}); + } else { + try deps.errunion_type.put(gpa, payload_ty.toIntern(), {}); + } + return .{ .errunion = .{ + .payload_ty = payload_ty, + } }; + }, + + .frame, + .@"anyframe", + => unreachable, + } + comptime unreachable; + } + } + + pub fn classifyOptional(opt_ty: Type, zcu: *const Zcu) enum { + /// The optional is something like `?noreturn`; it lowers to `void`. + npv_payload, + /// The payload type is an error set; the representation matches that of the error set, with + /// the value 0 representing `null`. + error_set, + /// The payload type is a non-optional pointer; the NULL pointer is used for `null`. + ptr_like, + /// The payload type is a non-optional slice; a NULL pointer field is used for `null`. + slice_like, + /// The optional is something like `?void`; it lowers to a struct, but one containing only + /// one field `is_null` (the payload is omitted). + opv_payload, + /// The optional uses the "default" lowering of a struct with two fields, like this: + /// struct optional_1234 { payload_ty payload; bool is_null; } + @"struct", + } { + const payload_ty = opt_ty.optionalChild(zcu); + if (opt_ty.optionalReprIsPayload(zcu)) { + return switch (payload_ty.zigTypeTag(zcu)) { + .error_set => .error_set, + .pointer => if (payload_ty.isSlice(zcu)) .slice_like else .ptr_like, + else => unreachable, + }; + } else { + return switch (payload_ty.classify(zcu)) { + .no_possible_value => .npv_payload, + .one_possible_value => .opv_payload, + else => .@"struct", + }; + } + } + + pub const IntClass = union(enum) { + /// The integer type is zero-bit, so lowers to `void`. + void, + /// The integer is under 128 bits long, so lowers to this C integer type. + small: Int, + /// The integer is over 128 bits long, so lowers to an array of limbs. + big: BigInt, + }; + + /// Asserts that `ty` is an integer, enum, bitpack, or error set. + pub fn classifyInt(ty: Type, zcu: *const Zcu) IntClass { + const int_ty: Type = switch (ty.zigTypeTag(zcu)) { + .error_set => return classifyBitInt(.unsigned, zcu.errorSetBits(), zcu), + .@"enum" => ty.intTagType(zcu), + .@"struct", .@"union" => ty.bitpackBackingInt(zcu), + .int => ty, + else => unreachable, + }; + switch (int_ty.toIntern()) { + // zig fmt: off + .usize_type => return .{ .small = .uintptr_t }, + .isize_type => return .{ .small = .intptr_t }, + + .c_char_type => return .{ .small = .char }, + + .c_short_type => return .{ .small = .@"signed short" }, + .c_int_type => return .{ .small = .@"signed int" }, + .c_long_type => return .{ .small = .@"signed long" }, + .c_longlong_type => return .{ .small = .@"signed long long" }, + + .c_ushort_type => return .{ .small = .@"unsigned short" }, + .c_uint_type => return .{ .small = .@"unsigned int" }, + .c_ulong_type => return .{ .small = .@"unsigned long" }, + .c_ulonglong_type => return .{ .small = .@"unsigned long long" }, + // zig fmt: on + + else => { + const int = ty.intInfo(zcu); + return classifyBitInt(int.signedness, int.bits, zcu); + }, + } + } + fn classifyBitInt(signedness: std.builtin.Signedness, bits: u16, zcu: *const Zcu) IntClass { + return switch (bits) { + 0 => .void, + 1...8 => switch (signedness) { + .unsigned => .{ .small = .uint8_t }, + .signed => .{ .small = .int8_t }, + }, + 9...16 => switch (signedness) { + .unsigned => .{ .small = .uint16_t }, + .signed => .{ .small = .int16_t }, + }, + 17...32 => switch (signedness) { + .unsigned => .{ .small = .uint32_t }, + .signed => .{ .small = .int32_t }, + }, + 33...64 => switch (signedness) { + .unsigned => .{ .small = .uint64_t }, + .signed => .{ .small = .int64_t }, + }, + 65...128 => switch (signedness) { + .unsigned => .{ .small = .zig_u128 }, + .signed => .{ .small = .zig_i128 }, + }, + else => { + @branchHint(.unlikely); + const target = zcu.getTarget(); + const limb_bytes = std.zig.target.intAlignment(target, bits); + return .{ .big = .{ + .limb_size = switch (limb_bytes) { + 1 => .@"8", + 2 => .@"16", + 4 => .@"32", + 8 => .@"64", + 16 => .@"128", + else => unreachable, + }, + .limbs_len = @divExact( + std.zig.target.intByteSize(target, bits), + limb_bytes, + ), + } }; + }, + }; + } + + /// Describes a set of types which must be declared or completed in the C source file before + /// some string of rendered C code (such as a function), due to said C code using these types. + pub const Dependencies = struct { + /// Key is any Zig type which corresponds to a C `struct`, `union`, or `typedef`. That C + /// type must be declared and complete. + type: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + + /// Key is a Zig type which is the *payload* of an error union. The C `struct` type + /// corresponding to such an error union must be declared and complete. + /// + /// These are separate from `type` to avoid redundant types for every different error set + /// used with the same payload type---for instance a different C type for every `E!void`. + errunion_type: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + + /// Like `type`, but the type does not necessarily need to be completed yet: a forward + /// declaration is sufficient. + type_fwd: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + + /// Like `errunion_type`, but the type does not necessarily need to be completed yet: a + /// forward declaration is sufficient. + errunion_type_fwd: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + + /// Key is a Zig type; value is a bitmask of alignments. For every bit which is set, an + /// aligned typedef is required. For instance, if bit 3 is set, the C type 'aligned__8_foo' + /// must be declared through `typedef` (but not necessarily completed yet). + aligned_type_fwd: std.AutoArrayHashMapUnmanaged(InternPool.Index, u64), + + /// Key specifies a big-int type whose C `struct` must be declared and complete. + bigint: std.AutoArrayHashMapUnmanaged(BigInt, void), + + pub const empty: Dependencies = .{ + .type = .empty, + .errunion_type = .empty, + .type_fwd = .empty, + .errunion_type_fwd = .empty, + .aligned_type_fwd = .empty, + .bigint = .empty, + }; + + pub fn deinit(deps: *Dependencies, gpa: Allocator) void { + deps.type.deinit(gpa); + deps.errunion_type.deinit(gpa); + deps.type_fwd.deinit(gpa); + deps.errunion_type_fwd.deinit(gpa); + deps.aligned_type_fwd.deinit(gpa); + deps.bigint.deinit(gpa); + } + + pub fn clearRetainingCapacity(deps: *Dependencies) void { + deps.type.clearRetainingCapacity(); + deps.errunion_type.clearRetainingCapacity(); + deps.type_fwd.clearRetainingCapacity(); + deps.errunion_type_fwd.clearRetainingCapacity(); + deps.aligned_type_fwd.clearRetainingCapacity(); + deps.bigint.clearRetainingCapacity(); + } + + pub fn move(deps: *Dependencies) Dependencies { + const moved = deps.*; + deps.* = .empty; + return moved; + } + + fn addType(deps: *Dependencies, gpa: Allocator, ty: Type, allow_incomplete: bool) Allocator.Error!void { + if (allow_incomplete) { + try deps.type_fwd.put(gpa, ty.toIntern(), {}); + } else { + try deps.type.put(gpa, ty.toIntern(), {}); + } + } + }; + + /// Formats the bytes which appear *before* the identifier in a declarator. This includes the + /// type specifier and all "prefix type operators" in the declarator. e.g: + /// * for the declarator "int foo", writes "int " + /// * for the declarator "struct thing *foo", writes "struct thing *" + /// * for the declarator "void *(*foo)(int)", writes "void *(*" + pub fn fmtDeclaratorPrefix(cty: CType, zcu: *const Zcu) Formatter { + return .{ + .cty = cty, + .zcu = zcu, + .kind = .declarator_prefix, + }; + } + /// Formats the bytes which appear *before* the identifier in a declarator. This includes the + /// type specifier and all "prefix type operators" in the declarator. e.g: + /// * for the declarator "int foo", writes "" + /// * for the declarator "struct thing *foo", writes "" + /// * for the declarator "void *(*foo)(int)", writes ")(int)" + pub fn fmtDeclaratorSuffix(cty: CType, zcu: *const Zcu) Formatter { + return .{ + .cty = cty, + .zcu = zcu, + .kind = .declarator_suffix, + }; + } + /// Like `fmtDeclaratorSuffix`, except never emits a `zig_nonstring` annotation. + pub fn fmtDeclaratorSuffixIgnoreNonstring(cty: CType, zcu: *const Zcu) Formatter { + return .{ + .cty = cty, + .zcu = zcu, + .kind = .declarator_suffix_ignore_nonstring, + }; + } + /// Formats a type's full name, e.g. "int", "struct foo *", "void *(uint32_t)". + /// + /// This is almost identical to `fmtDeclaratorPrefix` followed by `fmtDeclaratorSuffix`, but + /// that sequence of calls may emit trailing whitespace where this one does not---for instance, + /// those calls would write the type "void" as "void ". + pub fn fmtTypeName(cty: CType, zcu: *const Zcu) Formatter { + return .{ + .cty = cty, + .zcu = zcu, + .kind = .type_name, + }; + } + + const Formatter = struct { + cty: CType, + zcu: *const Zcu, + kind: enum { type_name, declarator_prefix, declarator_suffix, declarator_suffix_ignore_nonstring }, + + pub fn format(ctx: Formatter, w: *Writer) Writer.Error!void { + switch (ctx.kind) { + .type_name => { + try ctx.cty.writeTypePrefix(w, ctx.zcu); + try ctx.cty.writeTypeSuffix(w, ctx.zcu); + }, + .declarator_prefix => { + try ctx.cty.writeTypePrefix(w, ctx.zcu); + switch (ctx.cty.kind()) { + .specifier => try w.writeByte(' '), // write "int " rather than "int" + .pointer => {}, // we already have something like "foo *" + .postfix_op => {}, // we already have something like "ret_ty " + } + }, + .declarator_suffix => { + try ctx.cty.writeTypeSuffix(w, ctx.zcu); + const nonstring = switch (ctx.cty) { + .array => |arr| arr.nonstring, + .pointer => |ptr| ptr.nonstring, + else => false, + }; + if (nonstring) try w.writeAll(" zig_nonstring"); + }, + .declarator_suffix_ignore_nonstring => { + try ctx.cty.writeTypeSuffix(w, ctx.zcu); + }, + } + } + }; + + fn writeTypePrefix(cty: CType, w: *Writer, zcu: *const Zcu) Writer.Error!void { + switch (cty) { + .void => try w.writeAll("void"), + .bool => try w.writeAll("bool"), + .int => |int| try w.writeAll(@tagName(int)), + .float => |float| try w.writeAll(@tagName(float)), + .@"fn" => |ty| try w.print("{f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .@"enum" => |ty| try w.print("enum__{f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .bitpack => |ty| try w.print("bitpack__{f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .@"struct" => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .union_auto => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .union_extern => |ty| try w.print("union {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .slice => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .opt => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .arr => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .vec => |ty| try w.print("struct {f}_{d}", .{ fmtZigType(ty, zcu), ty.toIntern() }), + .errunion => |eu| try w.print("struct errunion_{f}_{d}", .{ + fmtZigType(eu.payload_ty, zcu), + eu.payload_ty.toIntern(), + }), + .aligned => |aligned| try w.print("aligned__{d}_{f}_{d}", .{ + aligned.alignment.toByteUnits().?, + fmtZigType(aligned.ty, zcu), + aligned.ty.toIntern(), + }), + .bigint => |bigint| try w.print("struct int_{d}x{d}", .{ + bigint.limb_size.bits(), + bigint.limbs_len, + }), + + .pointer => |ptr| { + try ptr.elem_ty.writeTypePrefix(w, zcu); + switch (ptr.elem_ty.kind()) { + .pointer, .postfix_op => {}, + .specifier => { + // We want "foo *" or "foo const *" rather than "foo*" or "fooconst *". + try w.writeByte(' '); + }, + } + if (ptr.@"const") try w.writeAll("const "); + if (ptr.@"volatile") try w.writeAll("volatile "); + switch (ptr.elem_ty.kind()) { + .specifier, .pointer => {}, + .postfix_op => { + // Prefix "*" is lower precedence than postfix "(x)" or "[x]" so use parens + // to disambiguate; e.g. "void (*foo)(int)" instead of "void *foo(int)". + try w.writeByte('('); + }, + } + try w.writeByte('*'); + }, + + .array => |array| { + try array.elem_ty.writeTypePrefix(w, zcu); + switch (array.elem_ty.kind()) { + .pointer, .postfix_op => {}, + .specifier => { + // We want e.g. "struct foo [5]" rather than "struct foo[5]". + try w.writeByte(' '); + }, + } + }, + + .function => |function| { + try function.ret_ty.writeTypePrefix(w, zcu); + switch (function.ret_ty.kind()) { + .pointer, .postfix_op => {}, + .specifier => { + // We want e.g. "struct foo (void)" rather than "struct foo(void)". + try w.writeByte(' '); + }, + } + }, + } + } + fn writeTypeSuffix(cty: CType, w: *Writer, zcu: *const Zcu) Writer.Error!void { + switch (cty) { + // simple type specifiers + .void, + .bool, + .int, + .float, + .@"fn", + .@"enum", + .bitpack, + .@"struct", + .union_auto, + .union_extern, + .slice, + .opt, + .arr, + .vec, + .errunion, + .aligned, + .bigint, + => {}, + + .pointer => |ptr| { + // Match opening paren "(" write `writeTypePrefix`. + switch (ptr.elem_ty.kind()) { + .specifier, .pointer => {}, + .postfix_op => try w.writeByte(')'), + } + try ptr.elem_ty.writeTypeSuffix(w, zcu); + }, + + .array => |array| { + try w.print("[{d}]", .{array.len}); + try array.elem_ty.writeTypeSuffix(w, zcu); + }, + + .function => |function| { + if (function.param_tys.len == 0 and !function.varargs) { + try w.writeAll("(void)"); + } else { + try w.writeByte('('); + for (function.param_tys, 0..) |param_ty, param_index| { + if (param_index > 0) try w.writeAll(", "); + try param_ty.writeTypePrefix(w, zcu); + try param_ty.writeTypeSuffix(w, zcu); + } + if (function.varargs) { + if (function.param_tys.len > 0) try w.writeAll(", "); + try w.writeAll("..."); + } + try w.writeByte(')'); + } + try function.ret_ty.writeTypeSuffix(w, zcu); + }, + } + } + + /// Renders Zig types using only bytes allowed in C identifiers in a somewhat-understandable + /// way. The output is *not* guaranteed to be unique. + fn fmtZigType(ty: Type, zcu: *const Zcu) FormatZigType { + return .{ .ty = ty, .zcu = zcu }; + } + const FormatZigType = struct { + ty: Type, + zcu: *const Zcu, + pub fn format(ctx: FormatZigType, w: *Writer) Writer.Error!void { + const ty = ctx.ty; + const zcu = ctx.zcu; + const ip = &zcu.intern_pool; + switch (ty.zigTypeTag(zcu)) { + .frame => unreachable, + .@"anyframe" => unreachable, + + .type => try w.writeAll("type"), + .void => try w.writeAll("void"), + .bool => try w.writeAll("bool"), + .noreturn => try w.writeAll("noreturn"), + .comptime_int => try w.writeAll("comptime_int"), + .comptime_float => try w.writeAll("comptime_float"), + .enum_literal => try w.writeAll("enum_literal"), + .undefined => try w.writeAll("undefined"), + .null => try w.writeAll("null"), + + .int => switch (ty.toIntern()) { + .usize_type => try w.writeAll("usize"), + .isize_type => try w.writeAll("isize"), + .c_char_type => try w.writeAll("c_char"), + .c_short_type => try w.writeAll("c_short"), + .c_ushort_type => try w.writeAll("c_ushort"), + .c_int_type => try w.writeAll("c_int"), + .c_uint_type => try w.writeAll("c_uint"), + .c_long_type => try w.writeAll("c_long"), + .c_ulong_type => try w.writeAll("c_ulong"), + .c_longlong_type => try w.writeAll("c_longlong"), + .c_ulonglong_type => try w.writeAll("c_ulonglong"), + else => { + const info = ty.intInfo(zcu); + switch (info.signedness) { + .unsigned => try w.print("u{d}", .{info.bits}), + .signed => try w.print("i{d}", .{info.bits}), + } + }, + }, + .float => switch (ty.toIntern()) { + .c_longdouble_type => try w.writeAll("c_longdouble"), + .f16_type => try w.writeAll("f16"), + .f32_type => try w.writeAll("f32"), + .f64_type => try w.writeAll("f64"), + .f80_type => try w.writeAll("f80"), + .f128_type => try w.writeAll("f128"), + else => unreachable, + }, + .error_set => switch (ty.toIntern()) { + .anyerror_type => try w.writeAll("anyerror"), + else => try w.print("error_{d}", .{@intFromEnum(ty.toIntern())}), + }, + .optional => try w.print("opt_{f}", .{fmtZigType(ty.optionalChild(zcu), zcu)}), + .error_union => try w.print("errunion_{f}", .{fmtZigType(ty.errorUnionPayload(zcu), zcu)}), + + .pointer => switch (ty.ptrSize(zcu)) { + .one, .many, .c => try w.print("ptr_{f}", .{fmtZigType(ty.childType(zcu), zcu)}), + .slice => try w.print("slice_{f}", .{fmtZigType(ty.childType(zcu), zcu)}), + }, + .@"fn" => { + const func_type = ip.indexToKey(ty.toIntern()).func_type; + try w.writeAll("fn_"); // intentional double underscore to start + for (func_type.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + try w.print("_P{f}", .{fmtZigType(param_ty, zcu)}); + } + if (func_type.is_var_args) { + try w.writeAll("_VA"); + } + const ret_ty: Type = .fromInterned(func_type.return_type); + try w.print("_R{f}", .{fmtZigType(ret_ty, zcu)}); + }, + + .vector => try w.print("vec_{d}_{f}", .{ + ty.arrayLen(zcu), + fmtZigType(ty.childType(zcu), zcu), + }), + + .array => if (ty.sentinel(zcu)) |s| try w.print("arr_{d}s{d}_{f}", .{ + ty.arrayLen(zcu), + @intFromEnum(s.toIntern()), + fmtZigType(ty.childType(zcu), zcu), + }) else try w.print("arr_{d}_{f}", .{ + ty.arrayLen(zcu), + fmtZigType(ty.childType(zcu), zcu), + }), + + .@"struct" => if (ty.isTuple(zcu)) { + const len = ty.structFieldCount(zcu); + try w.print("tuple_{d}", .{len}); + for (0..len) |field_index| { + const field_ty = ty.fieldType(field_index, zcu); + try w.print("_{f}", .{fmtZigType(field_ty, zcu)}); + } + } else { + const name = ty.containerTypeName(ip).toSlice(ip); + try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)}); + }, + .@"opaque" => if (ty.toIntern() == .anyopaque_type) { + try w.writeAll("anyopaque"); + } else { + const name = ty.containerTypeName(ip).toSlice(ip); + try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)}); + }, + .@"union", .@"enum" => { + const name = ty.containerTypeName(ip).toSlice(ip); + try w.print("{f}", .{@import("../c.zig").fmtIdentUnsolo(name)}); + }, + } + } + }; + + /// Returns `true` if the layout of `ty` is known without any type resolution required. This + /// allows some types to be lowered directly where 'typedef' would otherwise be necessary. + fn alwaysHasLayout(ty: Type, ip: *const InternPool) bool { + return switch (ip.indexToKey(ty.toIntern())) { + .int_type, + .ptr_type, + .anyframe_type, + .simple_type, + .opaque_type, + .error_set_type, + .inferred_error_set_type, + => true, + + .struct_type, + .union_type, + .enum_type, + => false, + + .array_type => |arr| alwaysHasLayout(.fromInterned(arr.child), ip), + .vector_type => |vec| alwaysHasLayout(.fromInterned(vec.child), ip), + .opt_type => |child| alwaysHasLayout(.fromInterned(child), ip), + .error_union_type => |eu| alwaysHasLayout(.fromInterned(eu.payload_type), ip), + + .tuple_type => |tuple| for (tuple.types.get(ip)) |field_ty| { + if (!alwaysHasLayout(.fromInterned(field_ty), ip)) break false; + } else true, + + .func_type => |f| for (f.param_types.get(ip)) |param_ty| { + if (!alwaysHasLayout(.fromInterned(param_ty), ip)) break false; + } else alwaysHasLayout(.fromInterned(f.return_type), ip), + + // values, not types + .undef, + .simple_value, + .variable, + .@"extern", + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + .bitpack, + // memoization, not types + .memoized_call, + => unreachable, + }; + } +}; + +const Zcu = @import("../../Zcu.zig"); +const Type = @import("../../Type.zig"); +const Value = @import("../../Value.zig"); +const InternPool = @import("../../InternPool.zig"); + +const std = @import("std"); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; +const Writer = std.Io.Writer; diff --git a/src/codegen/c/type/render_defs.zig b/src/codegen/c/type/render_defs.zig new file mode 100644 index 0000000000..a34b0d6d3c --- /dev/null +++ b/src/codegen/c/type/render_defs.zig @@ -0,0 +1,651 @@ +/// Renders the `typedef` for an aligned type. +pub fn defineAligned( + ty: Type, + alignment: Alignment, + complete: bool, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + + const name_cty: CType = .{ .aligned = .{ + .ty = ty, + .alignment = alignment, + } }; + + const cty: CType = try .lower(ty, deps, arena, zcu); + + try w.writeAll("typedef "); + if (complete and alignment.compareStrict(.lt, ty.abiAlignment(zcu))) { + try w.print("zig_under_align({d}) ", .{alignment.toByteUnits().?}); + } + try w.print("{f}{f}{f}; /* align({d}) {f} */\n", .{ + cty.fmtDeclaratorPrefix(zcu), + name_cty.fmtTypeName(zcu), + cty.fmtDeclaratorSuffix(zcu), + alignment.toByteUnits().?, + ty.fmt(pt), + }); +} +/// Renders the definition of a big-int `struct`. +pub fn defineBigInt(big: CType.BigInt, w: *Writer, zcu: *const Zcu) Writer.Error!void { + const name_cty: CType = .{ .bigint = .{ + .limb_size = big.limb_size, + .limbs_len = big.limbs_len, + } }; + const limb_cty: CType = .{ .int = big.limb_size.unsigned() }; + const array_cty: CType = .{ .array = .{ + .len = big.limbs_len, + .elem_ty = &limb_cty, + .nonstring = limb_cty.isStringElem(), + } }; + try w.print("{f} {{ {f}limbs{f}; }}; /* {d} bits */\n", .{ + name_cty.fmtTypeName(zcu), + array_cty.fmtDeclaratorPrefix(zcu), + array_cty.fmtDeclaratorSuffix(zcu), + big.limb_size.bits() * @as(u17, big.limbs_len), + }); +} + +/// Renders a forward declaration of the `struct` which represents an error union whose payload type +/// is `payload_ty` (the error set type is unspecified). +pub fn errunionFwdDecl(payload_ty: Type, w: *Writer, zcu: *const Zcu) Writer.Error!void { + const name_cty: CType = .{ .errunion = .{ + .payload_ty = payload_ty, + } }; + try w.print("{f};\n", .{name_cty.fmtTypeName(zcu)}); +} +/// Renders the definition of the `struct` which represents an error union whose payload type is +/// `payload_ty` (the error set type is unspecified). +/// +/// Asserts that the layout of `payload_ty` is resolved. +pub fn errunionDefineComplete( + payload_ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + + payload_ty.assertHasLayout(zcu); + + const name_cty: CType = .{ .errunion = .{ + .payload_ty = payload_ty, + } }; + + const error_cty: CType = try .lower(.anyerror, deps, arena, zcu); + + if (payload_ty.hasRuntimeBits(zcu)) { + const payload_cty: CType = try .lower(payload_ty, deps, arena, zcu); + try w.print( + \\{f} {{ /* anyerror!{f} */ + \\ {f}payload{f}; + \\ {f}error{f}; + \\}}; + \\ + , .{ + name_cty.fmtTypeName(zcu), + payload_ty.fmt(pt), + payload_cty.fmtDeclaratorPrefix(zcu), + payload_cty.fmtDeclaratorSuffix(zcu), + error_cty.fmtDeclaratorPrefix(zcu), + error_cty.fmtDeclaratorSuffix(zcu), + }); + } else { + try w.print("{f} {{ {f}error{f}; }}; /* anyerror!{f} */\n", .{ + name_cty.fmtTypeName(zcu), + error_cty.fmtDeclaratorPrefix(zcu), + error_cty.fmtDeclaratorSuffix(zcu), + payload_ty.fmt(pt), + }); + } +} + +/// If the Zig type `ty` lowers to a `struct` or `union` type, renders a forward declaration of that +/// type. Does not write anything for error union types, because their forward declarations are +/// instead rendered by `errunionFwdDecl`. +pub fn fwdDecl(ty: Type, w: *Writer, zcu: *const Zcu) Writer.Error!void { + const name_cty: CType = switch (ty.zigTypeTag(zcu)) { + .@"struct" => switch (ty.containerLayout(zcu)) { + .auto, .@"extern" => .{ .@"struct" = ty }, + .@"packed" => return, + }, + .@"union" => switch (ty.containerLayout(zcu)) { + .auto => .{ .union_auto = ty }, + .@"extern" => .{ .union_extern = ty }, + .@"packed" => return, + }, + .pointer => if (ty.isSlice(zcu)) .{ .slice = ty } else return, + .optional => .{ .opt = ty }, + .array => .{ .arr = ty }, + .vector => .{ .vec = ty }, + else => return, + }; + try w.print("{f};\n", .{name_cty.fmtTypeName(zcu)}); +} + +/// If the Zig type `ty` lowers to a `typedef`, renders a typedef of that type to `void`, because +/// the type's layout is not resolved. This is only necessary for `typedef`s because a `struct` or +/// `union` which is never defined is already an incomplete type, just like `void`. +pub fn defineIncomplete(ty: Type, w: *Writer, pt: Zcu.PerThread) Writer.Error!void { + const zcu = pt.zcu; + const name_cty: CType = switch (ty.zigTypeTag(zcu)) { + .@"fn" => .{ .@"fn" = ty }, + .@"enum" => .{ .@"enum" = ty }, + .@"struct", .@"union" => switch (ty.containerLayout(zcu)) { + .auto, .@"extern" => return, + .@"packed" => .{ .bitpack = ty }, + }, + else => return, + }; + try w.print("typedef void {f}; /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); +} + +/// If the Zig type `ty` lowers to a `struct` or `union` type, or to a `typedef`, renders the +/// definition of that type. Does not write anything for error union types, because their +/// definitions are instead rendered by `errunionDefine`. +/// +/// Asserts that the layout of `ty` is resolved. +pub fn defineComplete( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + + ty.assertHasLayout(zcu); + + switch (ty.zigTypeTag(zcu)) { + .@"fn" => if (!ty.fnHasRuntimeBits(zcu)) { + const name_cty: CType = .{ .@"fn" = ty }; + try w.print("typedef void {f}; /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + } else { + const ip = &zcu.intern_pool; + const func_type = ip.indexToKey(ty.toIntern()).func_type; + + // While incomplete types are usually an acceptable substitute for "void", this is not + // true in function return types, where "void" is the only incomplete type permitted. + const actual_ret_ty: Type = .fromInterned(func_type.return_type); + const effective_ret_ty: Type = switch (actual_ret_ty.classify(zcu)) { + .no_possible_value => .noreturn, + .one_possible_value, .fully_comptime => .void, // no runtime bits + .partially_comptime, .runtime => actual_ret_ty, // yes runtime bits + }; + + const name_cty: CType = .{ .@"fn" = ty }; + const ret_cty: CType = try .lower(effective_ret_ty, deps, arena, zcu); + + try w.print("typedef {f}{f}(", .{ + ret_cty.fmtDeclaratorPrefix(zcu), + name_cty.fmtTypeName(zcu), + }); + var any_params = false; + for (func_type.param_types.get(ip)) |param_ty_ip| { + const param_ty: Type = .fromInterned(param_ty_ip); + if (!param_ty.hasRuntimeBits(zcu)) continue; + if (any_params) try w.writeAll(", "); + any_params = true; + const param_cty: CType = try .lower(param_ty, deps, arena, zcu); + try w.print("{f}", .{param_cty.fmtTypeName(zcu)}); + } + if (func_type.is_var_args) { + if (any_params) try w.writeAll(", "); + try w.writeAll("..."); + } else if (!any_params) { + try w.writeAll("void"); + } + try w.print("){f}; /* {f} */\n", .{ + ret_cty.fmtDeclaratorSuffixIgnoreNonstring(zcu), + ty.fmt(pt), + }); + }, + .@"enum" => { + const name_cty: CType = .{ .@"enum" = ty }; + const cty: CType = try .lower(ty.intTagType(zcu), deps, arena, zcu); + try w.print("typedef {f}{f}{f}; /* {f} */\n", .{ + cty.fmtDeclaratorPrefix(zcu), + name_cty.fmtTypeName(zcu), + cty.fmtDeclaratorSuffix(zcu), + ty.fmt(pt), + }); + }, + .@"struct" => if (ty.isTuple(zcu)) { + try defineTuple(ty, deps, arena, w, pt); + } else switch (ty.containerLayout(zcu)) { + .auto, .@"extern" => try defineStruct(ty, deps, arena, w, pt), + .@"packed" => try defineBitpack(ty, deps, arena, w, pt), + }, + .@"union" => switch (ty.containerLayout(zcu)) { + .auto => try defineUnionAuto(ty, deps, arena, w, pt), + .@"extern" => try defineUnionExtern(ty, deps, arena, w, pt), + .@"packed" => try defineBitpack(ty, deps, arena, w, pt), + }, + .pointer => if (ty.isSlice(zcu)) { + const name_cty: CType = .{ .slice = ty }; + const ptr_cty: CType = try .lower(ty.slicePtrFieldType(zcu), deps, arena, zcu); + try w.print( + \\{f} {{ /* {f} */ + \\ {f}ptr{f}; + \\ size_t len; + \\}}; + \\ + , .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + ptr_cty.fmtDeclaratorPrefix(zcu), + ptr_cty.fmtDeclaratorSuffix(zcu), + }); + }, + .optional => switch (CType.classifyOptional(ty, zcu)) { + .error_set, + .ptr_like, + .slice_like, + .npv_payload, + => {}, + + .opv_payload => { + const name_cty: CType = .{ .opt = ty }; + try w.print("{f} {{ bool is_null; }}; /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + }, + + .@"struct" => { + const name_cty: CType = .{ .opt = ty }; + const payload_cty: CType = try .lower(ty.optionalChild(zcu), deps, arena, zcu); + try w.print( + \\{f} {{ /* {f} */ + \\ {f}payload{f}; + \\ bool is_null; + \\}}; + \\ + , .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + payload_cty.fmtDeclaratorPrefix(zcu), + payload_cty.fmtDeclaratorSuffix(zcu), + }); + }, + }, + .array => if (ty.hasRuntimeBits(zcu)) { + const name_cty: CType = .{ .arr = ty }; + const elem_cty: CType = try .lower(ty.childType(zcu), deps, arena, zcu); + const array_cty: CType = .{ .array = .{ + .len = ty.arrayLenIncludingSentinel(zcu), + .elem_ty = &elem_cty, + .nonstring = nonstring: { + if (!elem_cty.isStringElem()) break :nonstring false; + const s = ty.sentinel(zcu) orelse break :nonstring true; + break :nonstring Value.compareHetero(s, .neq, .zero_comptime_int, zcu); + }, + } }; + try w.print("{f} {{ {f}array{f}; }}; /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + array_cty.fmtDeclaratorPrefix(zcu), + array_cty.fmtDeclaratorSuffix(zcu), + ty.fmt(pt), + }); + }, + .vector => if (ty.hasRuntimeBits(zcu)) { + const name_cty: CType = .{ .vec = ty }; + const elem_cty: CType = try .lower(ty.childType(zcu), deps, arena, zcu); + const array_cty: CType = .{ .array = .{ + .len = ty.arrayLenIncludingSentinel(zcu), + .elem_ty = &elem_cty, + .nonstring = elem_cty.isStringElem(), + } }; + try w.print("{f} {{ {f}array{f}; }}; /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + array_cty.fmtDeclaratorPrefix(zcu), + array_cty.fmtDeclaratorSuffix(zcu), + ty.fmt(pt), + }); + }, + else => {}, + } +} +fn defineBitpack( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + const name_cty: CType = .{ .bitpack = ty }; + const cty: CType = try .lower(ty.bitpackBackingInt(zcu), deps, arena, zcu); + try w.print("typedef {f}{f}{f}; /* {f} */\n", .{ + cty.fmtDeclaratorPrefix(zcu), + name_cty.fmtTypeName(zcu), + cty.fmtDeclaratorSuffix(zcu), + ty.fmt(pt), + }); +} +fn defineTuple( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + if (!ty.hasRuntimeBits(zcu)) return; + const ip = &zcu.intern_pool; + const tuple = ip.indexToKey(ty.toIntern()).tuple_type; + + // Fields cannot be underaligned, because tuple fields cannot have specified alignments. + // However, overaligned fields are possible thanks to intermediate zero-bit fields. + + const tuple_align = ty.abiAlignment(zcu); + + // If the alignment of other fields would not give the tuple sufficient alignment, we + // need to align the first field (which does not affect its offset, because 0 is always + // well-aligned) to indirectly specify the tuple alignment. + const overalign: bool = for (tuple.types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.defaultStructFieldAlignment(.auto, zcu); + if (natural_align.compareStrict(.gte, tuple_align)) break false; + } else true; + + const name_cty: CType = .{ .@"struct" = ty }; + try w.print("{f} {{ /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + var zig_offset: u64 = 0; + var c_offset: u64 = 0; + for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty_ip, field_val_ip, field_index| { + if (field_val_ip != .none) continue; // `comptime` field + const field_ty: Type = .fromInterned(field_ty_ip); + const field_align = field_ty.abiAlignment(zcu); + zig_offset = field_align.forward(zig_offset); + if (!field_ty.hasRuntimeBits(zcu)) continue; + c_offset = field_align.forward(c_offset); + if (zig_offset == 0 and overalign) { + // This is the first field; specify its alignment to align the tuple. + try w.print(" zig_align({d})", .{tuple_align.toByteUnits().?}); + } else if (zig_offset > c_offset) { + // This field needs to be overaligned compared to what its offset would otherwise be. + const need_align: Alignment = .fromLog2Units(@ctz(zig_offset)); + try w.print(" zig_align({d})", .{need_align.toByteUnits().?}); + c_offset = need_align.forward(c_offset); + assert(c_offset == zig_offset); + } + const field_cty: CType = try .lower(field_ty, deps, arena, zcu); + try w.print(" {f}f{d}{f};\n", .{ + field_cty.fmtDeclaratorPrefix(zcu), + field_index, + field_cty.fmtDeclaratorSuffix(zcu), + }); + const field_size = field_ty.abiSize(zcu); + zig_offset += field_size; + c_offset += field_size; + } + try w.writeAll("};\n"); +} +fn defineStruct( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + if (!ty.hasRuntimeBits(zcu)) return; + const ip = &zcu.intern_pool; + + const struct_type = ip.loadStructType(ty.toIntern()); + + // If there are any underaligned fields, we need to byte-pack the struct. + const pack: bool = pack: { + var it = struct_type.iterateRuntimeOrder(ip); + var offset: u64 = 0; + while (it.next()) |field_index| { + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu); + const natural_offset = natural_align.forward(offset); + const actual_offset = struct_type.field_offsets.get(ip)[field_index]; + if (actual_offset < natural_offset) break :pack true; + offset = actual_offset + field_ty.abiSize(zcu); + } + break :pack false; + }; + + // If the alignment of other fields would not give the struct sufficient alignment, we + // need to align the first field (which does not affect its offset, because 0 is always + // well-aligned) to indirectly specify the struct alignment. + const overalign: bool = switch (pack) { + true => struct_type.alignment.compareStrict(.gt, .@"1"), + false => overalign: { + var it = struct_type.iterateRuntimeOrder(ip); + while (it.next()) |field_index| { + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu); + if (natural_align.compareStrict(.gte, struct_type.alignment)) break :overalign false; + } + break :overalign true; + }, + }; + + if (pack) try w.writeAll("zig_packed("); + const name_cty: CType = .{ .@"struct" = ty }; + try w.print("{f} {{ /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + var it = struct_type.iterateRuntimeOrder(ip); + var offset: u64 = 0; + while (it.next()) |field_index| { + const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu); + const natural_offset = switch (pack) { + true => offset, + false => natural_align.forward(offset), + }; + const actual_offset = struct_type.field_offsets.get(ip)[field_index]; + if (actual_offset == 0 and overalign) { + // This is the first field; specify its alignment to align the struct. + try w.print(" zig_align({d})", .{struct_type.alignment.toByteUnits().?}); + } else if (actual_offset > natural_offset) { + // This field needs to be underaligned or overaligned compared to what its + // offset would otherwise be. + const need_align: Alignment = .fromLog2Units(@ctz(actual_offset)); + if (need_align.compareStrict(.lt, natural_align)) { + try w.print(" zig_under_align({d})", .{need_align.toByteUnits().?}); + } else { + try w.print(" zig_align({d})", .{need_align.toByteUnits().?}); + } + } + const field_cty: CType = try .lower(field_ty, deps, arena, zcu); + const field_name = struct_type.field_names.get(ip)[field_index].toSlice(ip); + try w.print(" {f}{f}{f};\n", .{ + field_cty.fmtDeclaratorPrefix(zcu), + fmtIdentSolo(field_name), + field_cty.fmtDeclaratorSuffix(zcu), + }); + offset = actual_offset + field_ty.abiSize(zcu); + } + assert(struct_type.alignment.forward(offset) == struct_type.size); + try w.writeByte('}'); + if (pack) try w.writeByte(')'); + try w.writeAll(";\n"); +} +fn defineUnionAuto( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + if (!ty.hasRuntimeBits(zcu)) return; + const ip = &zcu.intern_pool; + + const union_type = ip.loadUnionType(ty.toIntern()); + const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_type); + + // If there are any underaligned fields, we need to byte-pack the union. + const pack: bool = for (union_type.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.abiAlignment(zcu); + if (natural_align.compareStrict(.gt, union_type.alignment)) break true; + } else false; + + // If the alignment of other fields would not give the union sufficient alignment, we + // need to align the first field (which does not affect its offset, because 0 is always + // well-aligned) to indirectly specify the union alignment. + const overalign: bool = switch (pack) { + true => union_type.alignment.compareStrict(.gt, .@"1"), + false => for (union_type.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.abiAlignment(zcu); + if (natural_align.compareStrict(.gte, union_type.alignment)) break false; + } else overalign: { + if (union_type.has_runtime_tag) { + const tag_align = enum_tag_ty.abiAlignment(zcu); + if (tag_align.compareStrict(.gte, union_type.alignment)) break :overalign false; + } + break :overalign true; + }, + }; + + const payload_has_bits = !union_type.has_runtime_tag or union_type.size > enum_tag_ty.abiSize(zcu); + + const name_cty: CType = .{ .union_auto = ty }; + try w.print("{f} {{ /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + if (payload_has_bits) { + try w.writeByte(' '); + if (pack) try w.writeAll("zig_packed("); + try w.writeAll("union {\n"); + for (0..enum_tag_ty.enumFieldCount(zcu)) |field_index| { + const field_ty = ty.fieldType(field_index, zcu); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const field_name = enum_tag_ty.enumFieldName(field_index, zcu).toSlice(ip); + const field_cty: CType = try .lower(field_ty, deps, arena, zcu); + try w.writeAll(" "); + if (overalign and field_index == 0) { + // This is the first field; specify its alignment to align the union. + try w.print("zig_align({d}) ", .{union_type.alignment.toByteUnits().?}); + } + try w.print("{f}{f}{f};\n", .{ + field_cty.fmtDeclaratorPrefix(zcu), + fmtIdentSolo(field_name), + field_cty.fmtDeclaratorSuffix(zcu), + }); + } + try w.writeAll(" }"); + if (pack) try w.writeByte(')'); + try w.writeAll(" payload;\n"); + } + if (union_type.has_runtime_tag) { + const tag_cty: CType = try .lower(enum_tag_ty, deps, arena, zcu); + try w.print(" {f}tag{f};\n", .{ + tag_cty.fmtDeclaratorPrefix(zcu), + tag_cty.fmtDeclaratorSuffix(zcu), + }); + } + try w.writeAll("};\n"); +} +fn defineUnionExtern( + ty: Type, + deps: *CType.Dependencies, + arena: Allocator, + w: *Writer, + pt: Zcu.PerThread, +) (Allocator.Error || Writer.Error)!void { + const zcu = pt.zcu; + if (!ty.hasRuntimeBits(zcu)) return; + const ip = &zcu.intern_pool; + + const union_type = ip.loadUnionType(ty.toIntern()); + assert(!union_type.has_runtime_tag); + const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_type); + + // If there are any underaligned fields, we need to byte-pack the union. + const pack: bool = for (union_type.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.abiAlignment(zcu); + if (natural_align.compareStrict(.gt, union_type.alignment)) break true; + } else false; + + // If the alignment of other fields would not give the union sufficient alignment, we + // need to align the first field (which does not affect its offset, because 0 is always + // well-aligned) to indirectly specify the union alignment. + const overalign: bool = switch (pack) { + true => union_type.alignment.compareStrict(.gt, .@"1"), + false => for (union_type.field_types.get(ip)) |field_ty_ip| { + const field_ty: Type = .fromInterned(field_ty_ip); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const natural_align = field_ty.abiAlignment(zcu); + if (natural_align.compareStrict(.gte, union_type.alignment)) break false; + } else overalign: { + if (union_type.has_runtime_tag) { + const tag_align = enum_tag_ty.abiAlignment(zcu); + if (tag_align.compareStrict(.gte, union_type.alignment)) break :overalign false; + } + break :overalign true; + }, + }; + + if (pack) try w.writeAll("zig_packed("); + + const name_cty: CType = .{ .union_extern = ty }; + try w.print("{f} {{ /* {f} */\n", .{ + name_cty.fmtTypeName(zcu), + ty.fmt(pt), + }); + + for (0..enum_tag_ty.enumFieldCount(zcu)) |field_index| { + const field_ty = ty.fieldType(field_index, zcu); + if (!field_ty.hasRuntimeBits(zcu)) continue; + const field_name = enum_tag_ty.enumFieldName(field_index, zcu).toSlice(ip); + const field_cty: CType = try .lower(field_ty, deps, arena, zcu); + if (overalign and field_index == 0) { + // This is the first field; specify its alignment to align the union. + try w.print(" zig_align({d})", .{union_type.alignment.toByteUnits().?}); + } + try w.print(" {f}{f}{f};\n", .{ + field_cty.fmtDeclaratorPrefix(zcu), + fmtIdentSolo(field_name), + field_cty.fmtDeclaratorSuffix(zcu), + }); + } + try w.writeByte('}'); + if (pack) try w.writeByte(')'); + try w.writeAll(";\n"); +} + +const std = @import("std"); +const assert = std.debug.assert; +const Writer = std.Io.Writer; +const Allocator = std.mem.Allocator; + +const Zcu = @import("../../../Zcu.zig"); +const Type = @import("../../../Type.zig"); +const Value = @import("../../../Value.zig"); +const CType = @import("../type.zig").CType; +const Alignment = @import("../../../InternPool.zig").Alignment; + +const fmtIdentSolo = @import("../../c.zig").fmtIdentSolo; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 5735fe5e51..f8d1310b1c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -23,7 +23,6 @@ const Package = @import("../Package.zig"); const Air = @import("../Air.zig"); const Value = @import("../Value.zig"); const Type = @import("../Type.zig"); -const DebugConstPool = link.DebugConstPool; const codegen = @import("../codegen.zig"); const x86_64_abi = @import("x86_64/abi.zig"); const wasm_c_abi = @import("wasm/abi.zig"); @@ -532,8 +531,8 @@ pub const Object = struct { debug_file_map: std.AutoHashMapUnmanaged(Zcu.File.Index, Builder.Metadata), /// This pool *only* contains types (and does not contain `@as(type, undefined)`). - debug_type_pool: DebugConstPool, - /// Keyed on `DebugConstPool.Index`. + debug_type_pool: link.ConstPool, + /// Keyed on `link.ConstPool.Index`. debug_types: std.ArrayList(Builder.Metadata), /// Initially `.none`, set if the type `anyerror` is lowered to a debug type. The type will not /// actually be created until `emit`, which must resolve this reference with an appropriate enum @@ -1622,10 +1621,7 @@ pub const Object = struct { } fn flushPendingDebugTypes(o: *Object, pt: Zcu.PerThread) Allocator.Error!void { - o.debug_type_pool.flushPending(pt, .{ .llvm = o }) catch |err| switch (err) { - error.OutOfMemory => |e| return e, - else => unreachable, // TODO: stop self-hosted backends from returning all of this crap! - }; + try o.debug_type_pool.flushPending(pt, .{ .llvm = o }); } pub fn updateExports( @@ -1823,17 +1819,14 @@ pub const Object = struct { pub fn updateContainerType(o: *Object, pt: Zcu.PerThread, ty: InternPool.Index, success: bool) Allocator.Error!void { if (!o.builder.strip) { - o.debug_type_pool.updateContainerType(pt, .{ .llvm = o }, ty, success) catch |err| switch (err) { - error.OutOfMemory => |e| return e, - else => unreachable, // TODO: stop self-hosted backends from returning all of this crap! - }; + try o.debug_type_pool.updateContainerType(pt, .{ .llvm = o }, ty, success); } } - /// Should only be called by the `DebugConstPool` implementation. + /// Should only be called by the `link.ConstPool` implementation. /// /// `val` is always a type because `o.debug_type_pool` only contains types. - pub fn addConst(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void { + pub fn addConst(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void { const zcu = pt.zcu; const gpa = zcu.comp.gpa; assert(zcu.intern_pool.typeOf(val) == .type_type); @@ -1846,10 +1839,10 @@ pub const Object = struct { o.debug_anyerror_fwd_ref = fwd_ref.toOptional(); } } - /// Should only be called by the `DebugConstPool` implementation. + /// Should only be called by the `link.ConstPool` implementation. /// /// `val` is always a type because `o.debug_type_pool` only contains types. - pub fn updateConstIncomplete(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void { + pub fn updateConstIncomplete(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void { assert(pt.zcu.intern_pool.typeOf(val) == .type_type); const fwd_ref = o.debug_types.items[@intFromEnum(index)]; assert(val != .anyerror_type); @@ -1857,10 +1850,10 @@ pub const Object = struct { const debug_incomplete_type = try o.builder.debugSignedType(name_str, 0); o.builder.resolveDebugForwardReference(fwd_ref, debug_incomplete_type); } - /// Should only be called by the `DebugConstPool` implementation. + /// Should only be called by the `link.ConstPool` implementation. /// /// `val` is always a type because `o.debug_type_pool` only contains types. - pub fn updateConst(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void { + pub fn updateConst(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void { assert(pt.zcu.intern_pool.typeOf(val) == .type_type); const fwd_ref = o.debug_types.items[@intFromEnum(index)]; if (val == .anyerror_type) { @@ -1890,10 +1883,7 @@ pub const Object = struct { fn getDebugType(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error!Builder.Metadata { assert(!o.builder.strip); - const index = o.debug_type_pool.get(pt, .{ .llvm = o }, ty.toIntern()) catch |err| switch (err) { - error.OutOfMemory => |e| return e, - else => unreachable, // TODO: stop self-hosted backends from returning all of this crap! - }; + const index = try o.debug_type_pool.get(pt, .{ .llvm = o }, ty.toIntern()); return o.debug_types.items[@intFromEnum(index)]; } diff --git a/src/link.zig b/src/link.zig index c81737484e..3eb4add772 100644 --- a/src/link.zig +++ b/src/link.zig @@ -29,7 +29,7 @@ const codegen = @import("codegen.zig"); pub const aarch64 = @import("link/aarch64.zig"); pub const LdScript = @import("link/LdScript.zig"); pub const Queue = @import("link/Queue.zig"); -pub const DebugConstPool = @import("link/DebugConstPool.zig"); +pub const ConstPool = @import("link/ConstPool.zig"); pub const Diags = struct { /// Stored here so that function definitions can distinguish between @@ -804,7 +804,7 @@ pub const File = struct { switch (base.tag) { .lld => unreachable, else => {}, - inline .elf => |tag| { + inline .elf, .c => |tag| { dev.check(tag.devFeature()); return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty, success); }, diff --git a/src/link/C.zig b/src/link/C.zig index 93e771ebfc..3c71206cc7 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -1,3 +1,9 @@ +/// Unlike other linker implementations, `link.C` does not attempt to incrementally link its output, +/// because C has many language rules which make that impractical. Instead, we individually generate +/// each declaration (NAV), and the output is stitched together (alongside types and UAVs) in an +/// appropriate order in `flush`. +const C = @This(); + const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; @@ -5,7 +11,6 @@ const Allocator = std.mem.Allocator; const fs = std.fs; const Path = std.Build.Cache.Path; -const C = @This(); const build_options = @import("build_options"); const Zcu = @import("../Zcu.zig"); const Module = @import("../Package/Module.zig"); @@ -19,40 +24,45 @@ const Type = @import("../Type.zig"); const Value = @import("../Value.zig"); const AnyMir = @import("../codegen.zig").AnyMir; -pub const zig_h = "#include \"zig.h\"\n"; - base: link.File, -/// This linker backend does not try to incrementally link output C source code. -/// Instead, it tracks all declarations in this table, and iterates over it -/// in the flush function, stitching pre-rendered pieces of C code together. -navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock), -/// All the string bytes of rendered C code, all squished into one array. -/// While in progress, a separate buffer is used, and then when finished, the -/// buffer is copied into this one. + +/// All the string bytes of rendered C code, all squished into one array. `String` is used to refer +/// to specific slices of this array, used for the rendered C code of an individual UAV/NAV/type. +/// +/// During code generation for functions, a separate buffer is used, and the contents of that buffer +/// are copied into `string_bytes` when the function is emitted by `updateFunc`. string_bytes: std.ArrayList(u8), -/// Tracks all the anonymous decls that are used by all the decls so they can -/// be rendered during flush(). -uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock), -/// Sparse set of uavs that are overaligned. Underaligned anon decls are -/// lowered the same as ABI-aligned anon decls. The keys here are a subset of -/// the keys of `uavs`. -aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), -exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock), -exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock), +/// Like with `string_bytes`, we concatenate all type dependencies into one array, and slice into it +/// for specific groups of dependencies. These values are indices into `type_pool`, and thus also +/// into `types`. We store these instead of `InternPool.Index` because it lets us avoid some hash +/// map lookups in `flush`. +type_dependencies: std.ArrayList(link.ConstPool.Index), +/// For storing dependencies on "aligned" versions of types, we must associate each type with a +/// bitmask of required alignments. As with `type_dependencies`, we concatenate all such masks into +/// one array. +align_dependency_masks: std.ArrayList(u64), -/// Optimization, `updateDecl` reuses this buffer rather than creating a new -/// one with every call. -fwd_decl_buf: []u8, -/// Optimization, `updateDecl` reuses this buffer rather than creating a new -/// one with every call. -code_header_buf: []u8, -/// Optimization, `updateDecl` reuses this buffer rather than creating a new -/// one with every call. -code_buf: []u8, -/// Optimization, `flush` reuses this buffer rather than creating a new -/// one with every call. -scratch_buf: []u32, +/// All NAVs, regardless of whether they are functions or simple constants, are put in this map. +navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, RenderedDecl), +/// All UAVs which may be referenced are in this map. The UAV alignment is not included in the +/// rendered C code stored here, because we don't know the alignment a UAV needs until `flush`. +uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, RenderedDecl), +/// Contains all types which are needed by some other rendered code. Does not contain any constants +/// other than types. +type_pool: link.ConstPool, +/// Indices are `link.ConstPool.Index` from `type_pool`. Contains rendered C code for every type +/// which may be referenced. Logic in `flush` will perform the appropriate topological sort to emit +/// these type definitions in an order which C allows. +types: std.ArrayList(RenderedType), + +/// The set of big int types required by *any* generated code so far. These are always safe to emit, +/// so they do not participate in the dependency graph traversal in `flush`. Therefore, redundant +/// big-int types may be emitted under incremental compilation. +bigint_types: std.AutoArrayHashMapUnmanaged(codegen.CType.BigInt, void), + +exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, String), +exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, String), /// A reference into `string_bytes`. const String = extern struct { @@ -64,50 +74,320 @@ const String = extern struct { .len = 0, }; - fn concat(lhs: String, rhs: String) String { - assert(lhs.start + lhs.len == rhs.start); + fn get(s: String, c: *C) []const u8 { + return c.string_bytes.items[s.start..][0..s.len]; + } +}; + +const CTypeDependencies = struct { + len: u32, + errunion_len: u32, + fwd_len: u32, + errunion_fwd_len: u32, + aligned_fwd_len: u32, + + /// Index into `C.type_dependencies`. Starting at this index are: + /// * `len` dependencies on complete types + /// * `errunion_len` dependencies on complete error union types + /// * `fwd_len` dependencies on forward-declared types + /// * `errunion_fwd_len` dependencies on forward-declared error union types + /// * `aligned_fwd_len` dependencies on aligned types + type_start: u32, + /// Index into `C.align_dependency_masks`. Starting at this index are `aligned_type_fwd_len` + /// items containing the bitmasks for each aligned type (in `C.type_dependencies`). + align_mask_start: u32, + + const Resolved = struct { + type: []const link.ConstPool.Index, + errunion_type: []const link.ConstPool.Index, + type_fwd: []const link.ConstPool.Index, + errunion_type_fwd: []const link.ConstPool.Index, + aligned_type_fwd: []const link.ConstPool.Index, + aligned_type_masks: []const u64, + }; + + fn get(td: *const CTypeDependencies, c: *const C) Resolved { + const types_overlong = c.type_dependencies.items[td.type_start..]; return .{ - .start = lhs.start, - .len = lhs.len + rhs.len, + .type = types_overlong[0..td.len], + .errunion_type = types_overlong[td.len..][0..td.errunion_len], + .type_fwd = types_overlong[td.len + td.errunion_len ..][0..td.fwd_len], + .errunion_type_fwd = types_overlong[td.len + td.errunion_len + td.fwd_len ..][0..td.errunion_fwd_len], + .aligned_type_fwd = types_overlong[td.len + td.errunion_len + td.fwd_len + td.errunion_fwd_len ..][0..td.aligned_fwd_len], + .aligned_type_masks = c.align_dependency_masks.items[td.align_mask_start..][0..td.aligned_fwd_len], }; } + + const empty: CTypeDependencies = .{ + .len = 0, + .errunion_len = 0, + .fwd_len = 0, + .errunion_fwd_len = 0, + .aligned_fwd_len = 0, + .type_start = 0, + .align_mask_start = 0, + }; }; -/// Per-declaration data. -pub const AvBlock = struct { - fwd_decl: String = .empty, - code: String = .empty, - /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate - /// over each `Decl` and generate the definition for each used `CType` once. - ctype_pool: codegen.CType.Pool = .empty, - /// May contain string references to ctype_pool - lazy_fns: codegen.LazyFnMap = .{}, +const RenderedDecl = struct { + fwd_decl: String, + code: String, + ctype_deps: CTypeDependencies, + need_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), + need_tag_name_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), + need_never_tail_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), + need_never_inline_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void), - fn deinit(ab: *AvBlock, gpa: Allocator) void { - ab.lazy_fns.deinit(gpa); - ab.ctype_pool.deinit(gpa); - ab.* = undefined; + const init: RenderedDecl = .{ + .fwd_decl = .empty, + .code = .empty, + .ctype_deps = .empty, + .need_uavs = .empty, + .need_tag_name_funcs = .empty, + .need_never_tail_funcs = .empty, + .need_never_inline_funcs = .empty, + }; + + fn deinit(rd: *RenderedDecl, gpa: Allocator) void { + rd.need_uavs.deinit(gpa); + rd.need_tag_name_funcs.deinit(gpa); + rd.need_never_tail_funcs.deinit(gpa); + rd.need_never_inline_funcs.deinit(gpa); + rd.* = undefined; + } + + /// We are about to re-render this declaration, but we want to reuse the existing buffers, so + /// call `clearRetainCapacity` on the containers. Sets `fwd_decl` and `code` to `undefined`, + /// because we shouldn't be using the old values any longer. + fn clearRetainingCapacity(rd: *RenderedDecl) void { + rd.fwd_decl = undefined; + rd.code = undefined; + rd.need_uavs.clearRetainingCapacity(); + rd.need_tag_name_funcs.clearRetainingCapacity(); + rd.need_never_tail_funcs.clearRetainingCapacity(); + rd.need_never_inline_funcs.clearRetainingCapacity(); } }; -/// Per-exported-symbol data. -pub const ExportedBlock = struct { - fwd_decl: String = .empty, +const RenderedType = struct { + /// If this type lowers to an aggregate, this is a forward declaration of its struct/union tag. + /// Otherwise, this is `.empty`. + /// + /// Populated immediately and never changes. + fwd_decl: String, + + /// A forward declaration of an error union type with this type as its *payload*. + /// + /// Populated immediately and never changes. + errunion_fwd_decl: String, + + /// If this type lowers to an aggregate, this is the struct/union definition. + /// If this type lowers to a typedef, this is that typedef. + /// Otherwise, this is `.empty`. + definition: String, + /// The `struct` definition for an error union type with this type as its *payload*. + /// + /// This string is empty iff the payload type does not have a resolved layout. If the layout is + /// resolved, the error union struct is defined, even if the payload type lacks runtime bits. + errunion_definition: String, + + /// Dependencies which must be satisfied before emitting the name of this type. As such, they + /// must be satisfied before emitting `errunion_definition` or any aligned typedef. + /// + /// Populated immediately and never changes. + deps: CTypeDependencies, + + /// Dependencies which must be satisfied before emitting `definition`. + definition_deps: CTypeDependencies, }; -pub fn getString(this: C, s: String) []const u8 { - return this.string_bytes.items[s.start..][0..s.len]; +/// Only called by `link.ConstPool` due to `c.type_pool`, so `val` is always a type. +pub fn addConst( + c: *C, + pt: Zcu.PerThread, + pool_index: link.ConstPool.Index, + val: InternPool.Index, +) Allocator.Error!void { + const zcu = pt.zcu; + const gpa = zcu.comp.gpa; + assert(zcu.intern_pool.typeOf(val) == .type_type); + assert(@intFromEnum(pool_index) == c.types.items.len); + + const ty: Type = .fromInterned(val); + + const fwd_decl: String = fwd_decl: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.CType.render_defs.fwdDecl(ty, &aw.writer, zcu) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + break :fwd_decl .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + const errunion_fwd_decl: String = errunion_fwd_decl: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.CType.render_defs.errunionFwdDecl(ty, &aw.writer, zcu) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + break :errunion_fwd_decl .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + try c.types.append(gpa, .{ + .fwd_decl = fwd_decl, + .errunion_fwd_decl = errunion_fwd_decl, + // This field will be populated just below. + .deps = undefined, + // The remaining fields will be populated later by either `updateConstIncomplete` or + // `updateConstComplete` (it is guaranteed that at least one will be called). + .definition = undefined, + .errunion_definition = undefined, + .definition_deps = undefined, + }); + + { + // Find the dependencies required to just render the type `ty`. + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); + var deps: codegen.CType.Dependencies = .empty; + defer deps.deinit(gpa); + _ = try codegen.CType.lower(ty, &deps, arena.allocator(), zcu); + // This call may add more items to `c.types`. + const type_deps = try c.addCTypeDependencies(pt, &deps); + c.types.items[@intFromEnum(pool_index)].deps = type_deps; + } } -pub fn addString(this: *C, s: []const u8) Allocator.Error!String { - const comp = this.base.comp; - const gpa = comp.gpa; - try this.string_bytes.appendSlice(gpa, s); - return .{ - .start = @intCast(this.string_bytes.items.len - s.len), - .len = @intCast(s.len), +/// Only called by `link.ConstPool` due to `c.type_pool`, so `val` is always a type. +pub fn updateConstIncomplete( + c: *C, + pt: Zcu.PerThread, + index: link.ConstPool.Index, + val: InternPool.Index, +) Allocator.Error!void { + const zcu = pt.zcu; + const gpa = zcu.comp.gpa; + + assert(zcu.intern_pool.typeOf(val) == .type_type); + const ty: Type = .fromInterned(val); + + const rendered: *RenderedType = &c.types.items[@intFromEnum(index)]; + + rendered.errunion_definition = .empty; + rendered.definition_deps = .empty; + rendered.definition = definition: { + if (rendered.fwd_decl.len != 0) { + // This is a struct or union type. We will never complete it, but we must forward + // declare it to ensure that its first usage does not appear in a different scope. + break :definition rendered.fwd_decl; + } + // Otherwise, we might need to `typedef` to `void`. + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.CType.render_defs.defineIncomplete(ty, &aw.writer, pt) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + break :definition .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; }; } +/// Only called by `link.ConstPool` due to `c.type_pool`, so `val` is always a type. +pub fn updateConst( + c: *C, + pt: Zcu.PerThread, + index: link.ConstPool.Index, + val: InternPool.Index, +) Allocator.Error!void { + const zcu = pt.zcu; + const gpa = zcu.comp.gpa; + + assert(zcu.intern_pool.typeOf(val) == .type_type); + const ty: Type = .fromInterned(val); + + const rendered: *RenderedType = &c.types.items[@intFromEnum(index)]; + + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); + + var deps: codegen.CType.Dependencies = .empty; + defer deps.deinit(gpa); + + { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.CType.render_defs.errunionDefineComplete( + ty, + &deps, + arena.allocator(), + &aw.writer, + pt, + ) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + }; + rendered.errunion_definition = .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + } + + deps.clearRetainingCapacity(); + + { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.CType.render_defs.defineComplete( + ty, + &deps, + arena.allocator(), + &aw.writer, + pt, + ) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + }; + // Remove dependency on a forward declaration of ourselves; we're defining this type so that + // forward declaration obviously exists! + _ = deps.type_fwd.swapRemove(ty.toIntern()); + rendered.definition = .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + } + + { + // This call invalidates `rendered`. + const definition_deps = try c.addCTypeDependencies(pt, &deps); + c.types.items[@intFromEnum(index)].definition_deps = definition_deps; + } +} + +fn addString(c: *C, vec: []const []const u8) Allocator.Error!String { + const gpa = c.base.comp.gpa; + + var len: u32 = 0; + for (vec) |s| len += @intCast(s.len); + try c.string_bytes.ensureUnusedCapacity(gpa, len); + + const start: u32 = @intCast(c.string_bytes.items.len); + for (vec) |s| c.string_bytes.appendSliceAssumeCapacity(s); + assert(c.string_bytes.items.len == start + len); + + return .{ .start = start, .len = len }; +} pub fn open( arena: Allocator, @@ -156,267 +436,622 @@ pub fn createEmpty( .file = file, .build_id = options.build_id, }, - .navs = .empty, .string_bytes = .empty, + .type_dependencies = .empty, + .align_dependency_masks = .empty, + .navs = .empty, .uavs = .empty, - .aligned_uavs = .empty, + .type_pool = .empty, + .types = .empty, + .bigint_types = .empty, .exported_navs = .empty, .exported_uavs = .empty, - .fwd_decl_buf = &.{}, - .code_header_buf = &.{}, - .code_buf = &.{}, - .scratch_buf = &.{}, }; return c_file; } -pub fn deinit(self: *C) void { - const gpa = self.base.comp.gpa; +pub fn deinit(c: *C) void { + const gpa = c.base.comp.gpa; - for (self.navs.values()) |*db| { - db.deinit(gpa); - } - self.navs.deinit(gpa); + for (c.navs.values()) |*r| r.deinit(gpa); + for (c.uavs.values()) |*r| r.deinit(gpa); - for (self.uavs.values()) |*db| { - db.deinit(gpa); - } - self.uavs.deinit(gpa); - self.aligned_uavs.deinit(gpa); + c.string_bytes.deinit(gpa); + c.type_dependencies.deinit(gpa); + c.align_dependency_masks.deinit(gpa); + c.navs.deinit(gpa); + c.uavs.deinit(gpa); + c.type_pool.deinit(gpa); + c.types.deinit(gpa); + c.bigint_types.deinit(gpa); + c.exported_navs.deinit(gpa); + c.exported_uavs.deinit(gpa); +} - self.exported_navs.deinit(gpa); - self.exported_uavs.deinit(gpa); - - self.string_bytes.deinit(gpa); - gpa.free(self.fwd_decl_buf); - gpa.free(self.code_header_buf); - gpa.free(self.code_buf); - gpa.free(self.scratch_buf); +pub fn updateContainerType( + c: *C, + pt: Zcu.PerThread, + ty: InternPool.Index, + success: bool, +) link.File.UpdateContainerTypeError!void { + try c.type_pool.updateContainerType(pt, .{ .c = c }, ty, success); } pub fn updateFunc( - self: *C, + c: *C, pt: Zcu.PerThread, func_index: InternPool.Index, mir: *AnyMir, -) link.File.UpdateNavError!void { +) Allocator.Error!void { const zcu = pt.zcu; const gpa = zcu.gpa; - const func = zcu.funcInfo(func_index); + const nav = zcu.funcInfo(func_index).owner_nav; - const gop = try self.navs.getOrPut(gpa, func.owner_nav); - if (gop.found_existing) gop.value_ptr.deinit(gpa); - gop.value_ptr.* = .{ - .code = .empty, - .fwd_decl = .empty, - .ctype_pool = mir.c.ctype_pool.move(), - .lazy_fns = mir.c.lazy_fns.move(), + const rendered_decl: *RenderedDecl = rd: { + const gop = try c.navs.getOrPut(gpa, nav); + if (gop.found_existing) gop.value_ptr.deinit(gpa); + break :rd gop.value_ptr; }; - gop.value_ptr.fwd_decl = try self.addString(mir.c.fwd_decl); - const code_header = try self.addString(mir.c.code_header); - const code = try self.addString(mir.c.code); - gop.value_ptr.code = code_header.concat(code); - try self.addUavsFromCodegen(&mir.c.uavs); -} + c.navs.lockPointers(); + defer c.navs.unlockPointers(); -fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) link.File.FlushError!void { - const gpa = self.base.comp.gpa; - const uav = self.uavs.keys()[i]; - - var object: codegen.Object = .{ - .dg = .{ - .gpa = gpa, - .pt = pt, - .mod = pt.zcu.root_mod, - .error_msg = null, - .pass = .{ .uav = uav }, - .is_naked_fn = false, - .expected_block = null, - .fwd_decl = undefined, - .ctype_pool = .empty, - .scratch = .initBuffer(self.scratch_buf), - .uavs = .empty, - }, - .code_header = undefined, - .code = undefined, - .indent_counter = 0, + rendered_decl.* = .{ + .fwd_decl = try c.addString(&.{mir.c.fwd_decl}), + .code = try c.addString(&.{ mir.c.code_header, mir.c.code }), + .ctype_deps = try c.addCTypeDependencies(pt, &mir.c.ctype_deps), + .need_uavs = mir.c.need_uavs.move(), + .need_tag_name_funcs = mir.c.need_tag_name_funcs.move(), + .need_never_tail_funcs = mir.c.need_never_tail_funcs.move(), + .need_never_inline_funcs = mir.c.need_never_inline_funcs.move(), }; - object.dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf); - object.code = .initOwnedSlice(gpa, self.code_buf); - defer { - object.dg.uavs.deinit(gpa); - object.dg.ctype_pool.deinit(object.dg.gpa); - self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice(); - self.code_buf = object.code.toArrayList().allocatedSlice(); - self.scratch_buf = object.dg.scratch.allocatedSlice(); + const old_uavs_len = c.uavs.count(); + try c.uavs.ensureUnusedCapacity(gpa, rendered_decl.need_uavs.count()); + for (rendered_decl.need_uavs.keys()) |val| { + const gop = c.uavs.getOrPutAssumeCapacity(val); + if (gop.found_existing) { + assert(gop.index < old_uavs_len); + } else { + assert(gop.index >= old_uavs_len); + } } - try object.dg.ctype_pool.init(gpa); + try c.updateNewUavs(pt, old_uavs_len); - const c_value: codegen.CValue = .{ .constant = Value.fromInterned(uav) }; - const alignment: Alignment = self.aligned_uavs.get(uav) orelse .none; - codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) { - error.AnalysisFail => { - @panic("TODO: C backend AnalysisFail on anonymous decl"); - //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?); - //return; - }, - error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, - }; - - try self.addUavsFromCodegen(&object.dg.uavs); - - object.dg.ctype_pool.freeUnusedCapacity(gpa); - self.uavs.values()[i] = .{ - .fwd_decl = try self.addString(object.dg.fwd_decl.written()), - .code = try self.addString(object.code.written()), - .ctype_pool = object.dg.ctype_pool.move(), - }; + try c.type_pool.flushPending(pt, .{ .c = c }); } -pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void { +pub fn updateNav( + c: *C, + pt: Zcu.PerThread, + nav_index: InternPool.Nav.Index, +) Allocator.Error!void { const tracy = trace(@src()); defer tracy.end(); - const gpa = self.base.comp.gpa; + const gpa = c.base.comp.gpa; const zcu = pt.zcu; const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); - const nav_init = switch (ip.indexToKey(nav.status.fully_resolved.val)) { + switch (ip.indexToKey(nav.status.fully_resolved.val)) { .func => return, - .@"extern" => .none, - .variable => |variable| variable.init, - else => nav.status.fully_resolved.val, + .@"extern" => {}, + else => { + const nav_ty: Type = .fromInterned(nav.typeOf(ip)); + if (!nav_ty.hasRuntimeBits(zcu)) { + if (c.navs.fetchSwapRemove(nav_index)) |kv| { + var old_rendered = kv.value; + old_rendered.deinit(gpa); + } + return; + } + }, + } + + const rendered_decl: *RenderedDecl = rd: { + const gop = try c.navs.getOrPut(gpa, nav_index); + if (gop.found_existing) { + gop.value_ptr.clearRetainingCapacity(); + } else { + gop.value_ptr.* = .init; + } + break :rd gop.value_ptr; }; - if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) return; + c.navs.lockPointers(); + defer c.navs.unlockPointers(); - const gop = try self.navs.getOrPut(gpa, nav_index); - errdefer _ = self.navs.pop(); - if (!gop.found_existing) gop.value_ptr.* = .{}; - const ctype_pool = &gop.value_ptr.ctype_pool; - try ctype_pool.init(gpa); - ctype_pool.clearRetainingCapacity(); + { + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); - var object: codegen.Object = .{ - .dg = .{ + var dg: codegen.DeclGen = .{ .gpa = gpa, + .arena = arena.allocator(), .pt = pt, .mod = zcu.navFileScope(nav_index).mod.?, .error_msg = null, - .pass = .{ .nav = nav_index }, + .owner_nav = nav_index.toOptional(), .is_naked_fn = false, .expected_block = null, - .fwd_decl = undefined, - .ctype_pool = ctype_pool.*, - .scratch = .initBuffer(self.scratch_buf), - .uavs = .empty, - }, - .code_header = undefined, - .code = undefined, - .indent_counter = 0, - }; - object.dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf); - object.code = .initOwnedSlice(gpa, self.code_buf); - defer { - object.dg.uavs.deinit(gpa); - ctype_pool.* = object.dg.ctype_pool.move(); - ctype_pool.freeUnusedCapacity(gpa); + .ctype_deps = .empty, + .uavs = rendered_decl.need_uavs.move(), + }; - self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice(); - self.code_buf = object.code.toArrayList().allocatedSlice(); - self.scratch_buf = object.dg.scratch.allocatedSlice(); + defer { + rendered_decl.need_uavs = dg.uavs.move(); + dg.ctype_deps.deinit(gpa); + } + + rendered_decl.fwd_decl = fwd_decl: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.genDeclFwd(&dg, &aw.writer) catch |err| switch (err) { + error.AnalysisFail => switch (zcu.codegenFailMsg(nav_index, dg.error_msg.?)) { + error.CodegenFail => return, + error.OutOfMemory => |e| return e, + }, + error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, + }; + break :fwd_decl .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + rendered_decl.code = code: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.genDecl(&dg, &aw.writer) catch |err| switch (err) { + error.AnalysisFail => switch (zcu.codegenFailMsg(nav_index, dg.error_msg.?)) { + error.CodegenFail => return, + error.OutOfMemory => |e| return e, + }, + error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, + }; + break :code .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + rendered_decl.ctype_deps = try c.addCTypeDependencies(pt, &dg.ctype_deps); } - codegen.genDecl(&object) catch |err| switch (err) { - error.AnalysisFail => switch (zcu.codegenFailMsg(nav_index, object.dg.error_msg.?)) { - error.CodegenFail => return, - error.OutOfMemory => |e| return e, - }, - error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, - }; - gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.written()); - gop.value_ptr.code = try self.addString(object.code.written()); - try self.addUavsFromCodegen(&object.dg.uavs); + const old_uavs_len = c.uavs.count(); + try c.uavs.ensureUnusedCapacity(gpa, rendered_decl.need_uavs.count()); + for (rendered_decl.need_uavs.keys()) |val| { + const gop = c.uavs.getOrPutAssumeCapacity(val); + if (gop.found_existing) { + assert(gop.index < old_uavs_len); + } else { + assert(gop.index >= old_uavs_len); + } + } + try c.updateNewUavs(pt, old_uavs_len); + + try c.type_pool.flushPending(pt, .{ .c = c }); } -pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void { - // The C backend does not have the ability to fix line numbers without re-generating - // the entire Decl. - _ = self; +/// Unlike `updateNav` and `updateFunc`, this does *not* add newly-discovered UAVs to `c.uavs`. The +/// caller is instead responsible for doing that (by iterating `rendered_decl.need_uavs`). However, +/// this function *does* still add newly-discovered *types* to `c.type_pool`. +/// +/// This function does not accept an alignment for the UAV, because the alignment needed on a UAV is +/// not known until `flush` (since we need to have seen all uses of the UAV first). Instead, `flush` +/// will prefix the UAV definition with an appropriate alignment annotation if necessary. +fn updateUav( + c: *C, + pt: Zcu.PerThread, + val: Value, + rendered_decl: *RenderedDecl, +) Allocator.Error!void { + const tracy = trace(@src()); + defer tracy.end(); + + const gpa = c.base.comp.gpa; + + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); + + var dg: codegen.DeclGen = .{ + .gpa = gpa, + .arena = arena.allocator(), + .pt = pt, + .mod = pt.zcu.root_mod, + .error_msg = null, + .owner_nav = .none, + .is_naked_fn = false, + .expected_block = null, + .ctype_deps = .empty, + .uavs = .empty, + }; + defer { + rendered_decl.need_uavs = dg.uavs.move(); + dg.ctype_deps.deinit(gpa); + } + + rendered_decl.fwd_decl = fwd_decl: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.genDeclValueFwd(&dg, &aw.writer, .{ + .name = .{ .constant = val }, + .@"const" = true, + .@"threadlocal" = false, + .init_val = val, + }) catch |err| switch (err) { + error.AnalysisFail => { + @panic("TODO: CBE error.AnalysisFail on uav"); + }, + error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, + }; + break :fwd_decl .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + rendered_decl.code = code: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.genDeclValue(&dg, &aw.writer, .{ + .name = .{ .constant = val }, + .@"const" = true, + .@"threadlocal" = false, + .init_val = val, + }) catch |err| switch (err) { + error.AnalysisFail => { + @panic("TODO: CBE error.AnalysisFail on uav"); + }, + error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, + }; + break :code .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; + }; + + rendered_decl.ctype_deps = try c.addCTypeDependencies(pt, &dg.ctype_deps); +} + +pub fn updateLineNumber(c: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) error{}!void { + // The C backend does not currently emit "#line" directives. Even if it did, it would not be + // capable of updating those line numbers without re-generating the entire declaration. + _ = c; _ = pt; _ = ti_id; } -fn abiDefines(w: *std.Io.Writer, target: *const std.Target) !void { - switch (target.abi) { - .msvc, .itanium => try w.writeAll("#define ZIG_TARGET_ABI_MSVC\n"), - else => {}, - } - try w.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{ - target.cMaxIntAlignment(), - }); -} - -pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { - _ = arena; // Has the same lifetime as the call to Compilation.update. - +pub fn flush(c: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const tracy = trace(@src()); defer tracy.end(); const sub_prog_node = prog_node.start("Flush Module", 0); defer sub_prog_node.end(); - const comp = self.base.comp; + const comp = c.base.comp; const diags = &comp.link_diags; const gpa = comp.gpa; const io = comp.io; - const zcu = self.base.comp.zcu.?; + const zcu = c.base.comp.zcu.?; const ip = &zcu.intern_pool; + const target = zcu.getTarget(); const pt: Zcu.PerThread = .activate(zcu, tid); defer pt.deactivate(); + // If it's somehow not made it into the pool, we need to generate the type `[:0]const u8` for + // error names. + const slice_const_u8_sentinel_0_pool_index = try c.type_pool.get( + pt, + .{ .c = c }, + .slice_const_u8_sentinel_0_type, + ); + try c.type_pool.flushPending(pt, .{ .c = c }); + + // Find the set of referenced NAVs; these are the ones we'll emit. It is important in this + // backend that we only emit referenced NAVs, because other ones may contain code from past + // incremental updates which is invalid C (due to e.g. types changing). Machine code backends + // don't have this problem because there are, of course, no type checking performed when you + // *execute* a binary! + var need_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty; + defer need_navs.deinit(gpa); { - var i: usize = 0; - while (i < self.uavs.count()) : (i += 1) { - try self.updateUav(pt, i); + const unit_references = try zcu.resolveReferences(); + for (c.navs.keys()) |nav| { + const nav_val = ip.getNav(nav).status.fully_resolved.val; + const check_unit: ?InternPool.AnalUnit = switch (ip.indexToKey(nav_val)) { + else => .wrap(.{ .nav_val = nav }), + .func => .wrap(.{ .func = nav_val }), + // TODO: this is a hack to deal with the fact that there's currently no good way to + // know which `extern`s are alive. This can and will break in certain patterns of + // incremental update. We kind of need to think a bit more about how the frontend + // actually represents `extern`, it's a bit awkward right now. + .@"extern" => null, + }; + if (check_unit) |u| { + if (!unit_references.contains(u)) continue; + } + try need_navs.putNoClobber(gpa, nav, {}); } } - // This code path happens exclusively with -ofmt=c. The flush logic for - // emit-h is in `flushEmitH` below. + // Using our knowledge of which NAVs are referenced, we now need to discover the set of UAVs and + // C types which are referenced (and hence must be emitted). As above, this is necessary to make + // sure we only emit valid C code. + // + // At the same time, we will discover the set of lazy functions which are referenced. - var f: Flush = .{ - .ctype_pool = .empty, - .ctype_global_from_decl_map = .empty, - .ctypes = .empty, + var need_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .empty; + defer need_uavs.deinit(gpa); - .lazy_ctype_pool = .empty, - .lazy_fns = .empty, - .lazy_fwd_decl = .empty, - .lazy_code = .empty, + var need_types: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, void) = .empty; + defer need_types.deinit(gpa); + var need_errunion_types: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, void) = .empty; + defer need_errunion_types.deinit(gpa); + var need_aligned_types: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, u64) = .empty; + defer need_aligned_types.deinit(gpa); - .all_buffers = .empty, - .file_size = 0, - }; + var need_tag_name_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty; + defer need_tag_name_funcs.deinit(gpa); + + var need_never_tail_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty; + defer need_never_tail_funcs.deinit(gpa); + + var need_never_inline_funcs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty; + defer need_never_inline_funcs.deinit(gpa); + + // As mentioned above, we need this type for error names. + try need_types.put(gpa, slice_const_u8_sentinel_0_pool_index, {}); + + // Every exported NAV should have been discovered via `zcu.resolveReferences`... + for (c.exported_navs.keys()) |nav| assert(need_navs.contains(nav)); + // ...but we *do* need to add exported UAVs to the set. + try need_uavs.ensureUnusedCapacity(gpa, c.exported_uavs.count()); + for (c.exported_uavs.keys()) |uav| { + const gop = need_uavs.getOrPutAssumeCapacity(uav); + if (!gop.found_existing) gop.value_ptr.* = .none; + } + + // For every referenced NAV, some UAVs, C types, and lazy functions may be referenced. + for (need_navs.keys()) |nav| { + const rendered = c.navs.getPtr(nav).?; + try mergeNeededCTypes( + c, + &need_types, + &need_errunion_types, + &need_aligned_types, + &rendered.ctype_deps, + ); + try mergeNeededUavs(zcu, &need_uavs, &rendered.need_uavs); + + try need_tag_name_funcs.ensureUnusedCapacity(gpa, rendered.need_tag_name_funcs.count()); + for (rendered.need_tag_name_funcs.keys()) |enum_type| { + need_tag_name_funcs.putAssumeCapacity(enum_type, {}); + } + + try need_never_tail_funcs.ensureUnusedCapacity(gpa, rendered.need_never_tail_funcs.count()); + for (rendered.need_never_tail_funcs.keys()) |fn_nav| { + need_never_tail_funcs.putAssumeCapacity(fn_nav, {}); + } + + try need_never_inline_funcs.ensureUnusedCapacity(gpa, rendered.need_never_inline_funcs.count()); + for (rendered.need_never_inline_funcs.keys()) |fn_nav| { + need_never_inline_funcs.putAssumeCapacity(fn_nav, {}); + } + } + + // UAVs may reference other UAVs or C types. + { + var index: usize = 0; + while (need_uavs.count() > index) : (index += 1) { + const val = need_uavs.keys()[index]; + const rendered = c.uavs.getPtr(val).?; + try mergeNeededCTypes( + c, + &need_types, + &need_errunion_types, + &need_aligned_types, + &rendered.ctype_deps, + ); + try mergeNeededUavs(zcu, &need_uavs, &rendered.need_uavs); + } + } + + // Finally, C types may reference other C types. + { + var index: usize = 0; + var errunion_index: usize = 0; + var aligned_index: usize = 0; + while (true) { + if (index < need_types.count()) { + const pool_index = need_types.keys()[index]; + const rendered = &c.types.items[@intFromEnum(pool_index)]; + try mergeNeededCTypes( + c, + &need_types, + &need_errunion_types, + &need_aligned_types, + &rendered.definition_deps, // we're tasked with emitting the *definition* of this type + ); + index += 1; + continue; + } + + if (errunion_index < need_errunion_types.count()) { + const payload_pool_index = need_errunion_types.keys()[errunion_index]; + const rendered = &c.types.items[@intFromEnum(payload_pool_index)]; + try mergeNeededCTypes( + c, + &need_types, + &need_errunion_types, + &need_aligned_types, + &rendered.deps, // the error union type requires emitting this type's *name* + ); + errunion_index += 1; + continue; + } + + if (aligned_index < need_aligned_types.count()) { + const pool_index = need_aligned_types.keys()[aligned_index]; + const rendered = &c.types.items[@intFromEnum(pool_index)]; + try mergeNeededCTypes( + c, + &need_types, + &need_errunion_types, + &need_aligned_types, + &rendered.deps, // an aligned typedef requires emitting this type's *name* + ); + aligned_index += 1; + continue; + } + + break; + } + } + + // Now that we know which types are required, generate aligned typedefs. One buffer per aligned + // type, with *all* aligned typedefs for that type. + const aligned_type_strings = try arena.alloc([]const u8, need_aligned_types.count()); + { + var aw: std.Io.Writer.Allocating = .init(gpa); + defer aw.deinit(); + var unused_deps: codegen.CType.Dependencies = .empty; + defer unused_deps.deinit(gpa); + for ( + need_aligned_types.keys(), + need_aligned_types.values(), + aligned_type_strings, + ) |pool_index, align_mask, *str_out| { + const ty: Type = .fromInterned(pool_index.val(&c.type_pool)); + const has_layout = c.types.items[@intFromEnum(pool_index)].errunion_definition.len > 0; + for (0..@bitSizeOf(@TypeOf(align_mask))) |bit_index| { + switch (@as(u1, @truncate(align_mask >> @intCast(bit_index)))) { + 0 => continue, + 1 => {}, + } + codegen.CType.render_defs.defineAligned( + ty, + .fromLog2Units(@intCast(bit_index)), + has_layout, + &unused_deps, + arena, + &aw.writer, + pt, + ) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + }; + } + str_out.* = try arena.dupe(u8, aw.written()); + aw.clearRetainingCapacity(); + } + } + + // We have discovered the full set of NAVs, UAVs, and types we need to emit, and will now begin + // to build the output buffer. Our strategy is to emit the C source in this order: + // + // * ABI defines and `#include "zig.h"` + // * Big-int type definitions + // * Other CType definitions (traversing the dependency graph to sort topologically) + // * Global assembly + // * UAV exports + // * NAV exports + // * UAV forward declarations + // * NAV forward declarations + // * Lazy declarations (error names; @tagName functions; never_tail/never_inline wrappers) + // * UAV definitions + // * NAV definitions + // + // Most of these sections are order-independent within themselves, with the exception of the + // type definitions, which must be ordered to avoid a struct/union from embedding a type which + // is currently incomplete. + // + // When emitting UAV forward declarations, if the UAV requires alignment, we must prefix it with + // an alignment annotation. We couldn't emit the alignment into the UAV's `RenderedDecl` because + // we couldn't have known the required alignment until now! + + var f: Flush = .{ .all_buffers = .empty, .file_size = 0 }; defer f.deinit(gpa); - var abi_defines_aw: std.Io.Writer.Allocating = .init(gpa); - defer abi_defines_aw.deinit(); - abiDefines(&abi_defines_aw.writer, zcu.getTarget()) catch |err| switch (err) { - error.WriteFailed => return error.OutOfMemory, - }; + // We know exactly what we'll be emitting, so can reserve capacity for all of our buffers! - // Covers defines, zig.h, ctypes, asm, lazy fwd. - try f.all_buffers.ensureUnusedCapacity(gpa, 5); + try f.all_buffers.ensureUnusedCapacity(gpa, 3 + // ABI defines and `#include "zig.h"` + 1 + // Big-int type definitions + need_types.count() + // `RenderedType.fwd_decl` (worst-case) + need_types.count() + // `RenderedType.definition` + need_errunion_types.count() + // `RenderedType.errunion_fwd_decl` (worst-case) + need_errunion_types.count() + // `RenderedType.errunion_definition` + need_aligned_types.count() + // `aligned_type_strings` + 1 + // Global assembly + c.exported_uavs.count() + // UAV export block + c.exported_navs.count() + // NAV export block + need_uavs.count() + // UAV forward declarations + need_navs.count() + // NAV forward declarations + 1 + // Lazy declarations + need_uavs.count() * 3 + // UAV definitions ("static ", "zig_align(4)", "") + need_navs.count() * 2); // NAV definitions ("static ", "") - f.appendBufAssumeCapacity(abi_defines_aw.written()); - f.appendBufAssumeCapacity(zig_h); + // ABI defines and `#include "zig.h"` + switch (target.abi) { + .msvc, .itanium => f.appendBufAssumeCapacity("#define ZIG_TARGET_ABI_MSVC\n"), + else => {}, + } + f.appendBufAssumeCapacity(try std.fmt.allocPrint( + arena, + "#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", + .{target.cMaxIntAlignment()}, + )); + f.appendBufAssumeCapacity( + \\#include "zig.h" + \\ + ); - const ctypes_index = f.all_buffers.items.len; - f.all_buffers.items.len += 1; + // Big-int type definitions + var bigint_aw: std.Io.Writer.Allocating = .init(gpa); + defer bigint_aw.deinit(); + for (c.bigint_types.keys()) |bigint| { + codegen.CType.render_defs.defineBigInt(bigint, &bigint_aw.writer, zcu) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + } + f.appendBufAssumeCapacity(bigint_aw.written()); + // CType definitions + { + var ft: FlushTypes = .{ + .c = c, + .f = &f, + .aligned_types = &need_aligned_types, + .aligned_type_strings = aligned_type_strings, + .status = .empty, + .errunion_status = .empty, + .aligned_status = .empty, + }; + defer { + ft.status.deinit(gpa); + ft.errunion_status.deinit(gpa); + ft.aligned_status.deinit(gpa); + } + try ft.status.ensureUnusedCapacity(gpa, need_types.count()); + try ft.errunion_status.ensureUnusedCapacity(gpa, need_errunion_types.count()); + try ft.aligned_status.ensureUnusedCapacity(gpa, need_aligned_types.count()); + + for (need_types.keys()) |pool_index| { + ft.doType(pool_index); + } + for (need_errunion_types.keys()) |pool_index| { + ft.doErrunionType(pool_index); + } + for (need_aligned_types.keys()) |pool_index| { + ft.doAlignedTypeFwd(pool_index); + } + } + + // Global assembly var asm_aw: std.Io.Writer.Allocating = .init(gpa); defer asm_aw.deinit(); codegen.genGlobalAsm(zcu, &asm_aw.writer) catch |err| switch (err) { @@ -424,435 +1059,228 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P }; f.appendBufAssumeCapacity(asm_aw.written()); - const lazy_index = f.all_buffers.items.len; - f.all_buffers.items.len += 1; - - try f.lazy_ctype_pool.init(gpa); - try self.flushErrDecls(pt, &f); - - // Unlike other backends, the .c code we are emitting has order-dependent decls. - // `CType`s, forward decls, and non-functions first. - - { - var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; - defer export_names.deinit(gpa); - try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); - for (zcu.single_exports.values()) |export_index| { - export_names.putAssumeCapacity(export_index.ptr(zcu).opts.name, {}); - } - for (zcu.multi_exports.values()) |info| { - try export_names.ensureUnusedCapacity(gpa, info.len); - for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| { - export_names.putAssumeCapacity(@"export".opts.name, {}); - } - } - - for (self.uavs.keys(), self.uavs.values()) |uav, *av_block| try self.flushAvBlock( - pt, - zcu.root_mod, - &f, - av_block, - self.exported_uavs.getPtr(uav), - export_names, - .none, - ); - - for (self.navs.keys(), self.navs.values()) |nav, *av_block| try self.flushAvBlock( - pt, - zcu.navFileScope(nav).mod.?, - &f, - av_block, - self.exported_navs.getPtr(nav), - export_names, - if (ip.getNav(nav).getExtern(ip) != null) - ip.getNav(nav).name.toOptional() - else - .none, - ); + var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; + defer export_names.deinit(gpa); + try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); + for (zcu.single_exports.values()) |export_index| { + export_names.putAssumeCapacity(export_index.ptr(zcu).opts.name, {}); } - - { - // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes. - // This ensures that every lazy CType.Index exactly matches the global CType.Index. - try f.ctype_pool.init(gpa); - try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool); - - for (self.uavs.keys(), self.uavs.values()) |uav, av_block| { - try self.flushCTypes(zcu, &f, .{ .uav = uav }, &av_block.ctype_pool); - } - - for (self.navs.keys(), self.navs.values()) |nav, av_block| { - try self.flushCTypes(zcu, &f, .{ .nav = nav }, &av_block.ctype_pool); + for (zcu.multi_exports.values()) |info| { + try export_names.ensureUnusedCapacity(gpa, info.len); + for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| { + export_names.putAssumeCapacity(@"export".opts.name, {}); } } - f.all_buffers.items[ctypes_index] = f.ctypes.items; - f.file_size += f.ctypes.items.len; + // UAV export block + for (c.exported_uavs.values()) |code| { + f.appendBufAssumeCapacity(code.get(c)); + } - f.all_buffers.items[lazy_index] = f.lazy_fwd_decl.items; - f.file_size += f.lazy_fwd_decl.items.len; + // NAV export block + for (c.exported_navs.values()) |code| { + f.appendBufAssumeCapacity(code.get(c)); + } - // Now the code. - try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.uavs.count() + self.navs.count()) * 2); - f.appendBufAssumeCapacity(f.lazy_code.items); - for (self.uavs.keys(), self.uavs.values()) |uav, av_block| f.appendCodeAssumeCapacity( - if (self.exported_uavs.contains(uav)) .default else switch (ip.indexToKey(uav)) { - .@"extern" => .zig_extern, - else => .static, - }, - self.getString(av_block.code), - ); - for (self.navs.keys(), self.navs.values()) |nav, av_block| f.appendCodeAssumeCapacity(storage: { - if (self.exported_navs.contains(nav)) break :storage .default; - if (ip.getNav(nav).getExtern(ip) != null) break :storage .zig_extern; - break :storage .static; - }, self.getString(av_block.code)); + // UAV forward declarations + for (need_uavs.keys()) |val| { + if (c.exported_uavs.contains(val)) continue; // the export was the declaration + const fwd_decl = c.uavs.getPtr(val).?.fwd_decl; + f.appendBufAssumeCapacity(fwd_decl.get(c)); + } - const file = self.base.file.?; + // NAV forward declarations + for (need_navs.keys()) |nav| { + if (c.exported_navs.contains(nav)) continue; // the export was the declaration + if (ip.getNav(nav).getExtern(ip)) |e| { + if (export_names.contains(e.name)) continue; + } + const fwd_decl = c.navs.getPtr(nav).?.fwd_decl; + f.appendBufAssumeCapacity(fwd_decl.get(c)); + } + + // Lazy declarations + var lazy_decls_aw: std.Io.Writer.Allocating = .init(gpa); + defer lazy_decls_aw.deinit(); + { + var lazy_dg: codegen.DeclGen = .{ + .gpa = gpa, + .arena = arena, + .pt = pt, + .mod = pt.zcu.root_mod, + .owner_nav = .none, + .is_naked_fn = false, + .expected_block = null, + .error_msg = null, + .ctype_deps = .empty, + .uavs = .empty, + }; + defer { + assert(lazy_dg.uavs.count() == 0); + lazy_dg.ctype_deps.deinit(gpa); + } + const slice_const_u8_sentinel_0_cty: codegen.CType = try .lower( + .slice_const_u8_sentinel_0, + &lazy_dg.ctype_deps, + arena, + zcu, + ); + const slice_const_u8_sentinel_0_name = try std.fmt.allocPrint( + arena, + "{f}", + .{slice_const_u8_sentinel_0_cty.fmtTypeName(zcu)}, + ); + codegen.genErrDecls(zcu, &lazy_decls_aw.writer, slice_const_u8_sentinel_0_name) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + for (need_tag_name_funcs.keys()) |enum_ty_ip| { + const enum_ty: Type = .fromInterned(enum_ty_ip); + const enum_cty: codegen.CType = try .lower( + enum_ty, + &lazy_dg.ctype_deps, + arena, + zcu, + ); + codegen.genTagNameFn( + zcu, + &lazy_decls_aw.writer, + slice_const_u8_sentinel_0_name, + enum_ty, + try std.fmt.allocPrint(arena, "{f}", .{enum_cty.fmtTypeName(zcu)}), + ) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + }; + } + for (need_never_tail_funcs.keys()) |fn_nav| { + codegen.genLazyCallModifierFn(&lazy_dg, fn_nav, .never_tail, &lazy_decls_aw.writer) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + error.AnalysisFail => unreachable, + }; + } + for (need_never_inline_funcs.keys()) |fn_nav| { + codegen.genLazyCallModifierFn(&lazy_dg, fn_nav, .never_inline, &lazy_decls_aw.writer) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + error.AnalysisFail => unreachable, + }; + } + } + f.appendBufAssumeCapacity(lazy_decls_aw.written()); + + // UAV definitions + for (need_uavs.keys(), need_uavs.values()) |val, overalign| { + const code = c.uavs.getPtr(val).?.code; + if (code.len == 0) continue; + if (!c.exported_uavs.contains(val)) { + f.appendBufAssumeCapacity("static "); + } + if (overalign != .none) { + // As long as `Alignment` isn't too big, it's reasonable to just generate all possible + // alignment annotations statically into a LUT, which avoids allocating strings on this + // path. + comptime assert(@bitSizeOf(Alignment) < 8); + const table_len = (1 << @bitSizeOf(Alignment)) - 1; + const table: [table_len][]const u8 = comptime table: { + @setEvalBranchQuota(16_000); + var table: [table_len][]const u8 = undefined; + for (&table, 0..) |*str, log2_align| { + const byte_align = Alignment.fromLog2Units(log2_align).toByteUnits().?; + str.* = std.fmt.comptimePrint("zig_align({d}) ", .{byte_align}); + } + break :table table; + }; + f.appendBufAssumeCapacity(table[overalign.toLog2Units()]); + } + f.appendBufAssumeCapacity(code.get(c)); + } + + // NAV definitions + for (need_navs.keys()) |nav| { + const code = c.navs.getPtr(nav).?.code; + if (code.len == 0) continue; + if (!c.exported_navs.contains(nav)) { + const is_extern = ip.getNav(nav).getExtern(ip) != null; + f.appendBufAssumeCapacity(if (is_extern) "zig_extern " else "static "); + } + f.appendBufAssumeCapacity(code.get(c)); + } + + // We've collected all of our buffers; it's now time to actually write the file! + const file = c.base.file.?; file.setLength(io, f.file_size) catch |err| return diags.fail("failed to allocate file: {t}", .{err}); var fw = file.writer(io, &.{}); var w = &fw.interface; w.writeVecAll(f.all_buffers.items) catch |err| switch (err) { error.WriteFailed => return diags.fail("failed to write to '{f}': {s}", .{ - std.fmt.alt(self.base.emit, .formatEscapeChar), @errorName(fw.err.?), + std.fmt.alt(c.base.emit, .formatEscapeChar), @errorName(fw.err.?), }), }; } const Flush = struct { - ctype_pool: codegen.CType.Pool, - ctype_global_from_decl_map: std.ArrayList(codegen.CType), - ctypes: std.ArrayList(u8), - - lazy_ctype_pool: codegen.CType.Pool, - lazy_fns: LazyFns, - lazy_fwd_decl: std.ArrayList(u8), - lazy_code: std.ArrayList(u8), - /// We collect a list of buffers to write, and write them all at once with pwritev 😎 all_buffers: std.ArrayList([]const u8), /// Keeps track of the total bytes of `all_buffers`. file_size: u64, - const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void); - fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { if (buf.len == 0) return; f.all_buffers.appendAssumeCapacity(buf); f.file_size += buf.len; } - fn appendCodeAssumeCapacity(f: *Flush, storage: enum { default, zig_extern, static }, code: []const u8) void { - if (code.len == 0) return; - f.appendBufAssumeCapacity(switch (storage) { - .default => "\n", - .zig_extern => "\nzig_extern ", - .static => "\nstatic ", - }); - f.appendBufAssumeCapacity(code); - } - fn deinit(f: *Flush, gpa: Allocator) void { - f.ctype_pool.deinit(gpa); - assert(f.ctype_global_from_decl_map.items.len == 0); - f.ctype_global_from_decl_map.deinit(gpa); - f.ctypes.deinit(gpa); - f.lazy_ctype_pool.deinit(gpa); - f.lazy_fns.deinit(gpa); - f.lazy_fwd_decl.deinit(gpa); - f.lazy_code.deinit(gpa); f.all_buffers.deinit(gpa); } }; -const FlushDeclError = error{ - OutOfMemory, -}; - -fn flushCTypes( - self: *C, - zcu: *Zcu, - f: *Flush, - pass: codegen.DeclGen.Pass, - decl_ctype_pool: *const codegen.CType.Pool, -) FlushDeclError!void { - const gpa = self.base.comp.gpa; - const global_ctype_pool = &f.ctype_pool; - - const global_from_decl_map = &f.ctype_global_from_decl_map; - assert(global_from_decl_map.items.len == 0); - try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len); - defer global_from_decl_map.clearRetainingCapacity(); - - var ctypes_aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &f.ctypes); - const ctypes_bw = &ctypes_aw.writer; - defer f.ctypes = ctypes_aw.toArrayList(); - - for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| { - const PoolAdapter = struct { - global_from_decl_map: []const codegen.CType, - pub fn eql(pool_adapter: @This(), decl_ctype: codegen.CType, global_ctype: codegen.CType) bool { - return if (decl_ctype.toPoolIndex()) |decl_pool_index| - decl_pool_index < pool_adapter.global_from_decl_map.len and - pool_adapter.global_from_decl_map[decl_pool_index].eql(global_ctype) - else - decl_ctype.index == global_ctype.index; - } - pub fn copy(pool_adapter: @This(), decl_ctype: codegen.CType) codegen.CType { - return if (decl_ctype.toPoolIndex()) |decl_pool_index| - pool_adapter.global_from_decl_map[decl_pool_index] - else - decl_ctype; - } - }; - const decl_ctype = codegen.CType.fromPoolIndex(decl_ctype_pool_index); - const global_ctype, const found_existing = try global_ctype_pool.getOrPutAdapted( - gpa, - decl_ctype_pool, - decl_ctype, - PoolAdapter{ .global_from_decl_map = global_from_decl_map.items }, - ); - global_from_decl_map.appendAssumeCapacity(global_ctype); - codegen.genTypeDecl( - zcu, - ctypes_bw, - global_ctype_pool, - global_ctype, - pass, - decl_ctype_pool, - decl_ctype, - found_existing, - ) catch |err| switch (err) { - error.WriteFailed => return error.OutOfMemory, - }; - } -} - -fn flushErrDecls(self: *C, pt: Zcu.PerThread, f: *Flush) FlushDeclError!void { - const gpa = self.base.comp.gpa; - - var object: codegen.Object = .{ - .dg = .{ - .gpa = gpa, - .pt = pt, - .mod = pt.zcu.root_mod, - .error_msg = null, - .pass = .flush, - .is_naked_fn = false, - .expected_block = null, - .fwd_decl = undefined, - .ctype_pool = f.lazy_ctype_pool, - .scratch = .initBuffer(self.scratch_buf), - .uavs = .empty, - }, - .code_header = undefined, - .code = undefined, - .indent_counter = 0, - }; - object.dg.fwd_decl = .fromArrayList(gpa, &f.lazy_fwd_decl); - object.code = .fromArrayList(gpa, &f.lazy_code); - defer { - object.dg.uavs.deinit(gpa); - f.lazy_ctype_pool = object.dg.ctype_pool.move(); - f.lazy_ctype_pool.freeUnusedCapacity(gpa); - - f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList(); - f.lazy_code = object.code.toArrayList(); - self.scratch_buf = object.dg.scratch.allocatedSlice(); - } - - codegen.genErrDecls(&object) catch |err| switch (err) { - error.AnalysisFail => unreachable, - error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, - }; - - try self.addUavsFromCodegen(&object.dg.uavs); -} - -fn flushLazyFn( - self: *C, - pt: Zcu.PerThread, - mod: *Module, - f: *Flush, - lazy_ctype_pool: *const codegen.CType.Pool, - lazy_fn: codegen.LazyFnMap.Entry, -) FlushDeclError!void { - const gpa = self.base.comp.gpa; - - var object: codegen.Object = .{ - .dg = .{ - .gpa = gpa, - .pt = pt, - .mod = mod, - .error_msg = null, - .pass = .flush, - .is_naked_fn = false, - .expected_block = null, - .fwd_decl = undefined, - .ctype_pool = f.lazy_ctype_pool, - .scratch = .initBuffer(self.scratch_buf), - .uavs = .empty, - }, - .code_header = undefined, - .code = undefined, - .indent_counter = 0, - }; - object.dg.fwd_decl = .fromArrayList(gpa, &f.lazy_fwd_decl); - object.code = .fromArrayList(gpa, &f.lazy_code); - defer { - // If this assert trips just handle the anon_decl_deps the same as - // `updateFunc()` does. - assert(object.dg.uavs.count() == 0); - f.lazy_ctype_pool = object.dg.ctype_pool.move(); - f.lazy_ctype_pool.freeUnusedCapacity(gpa); - - f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList(); - f.lazy_code = object.code.toArrayList(); - self.scratch_buf = object.dg.scratch.allocatedSlice(); - } - - codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) { - error.AnalysisFail => unreachable, - error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, - }; -} - -fn flushLazyFns( - self: *C, - pt: Zcu.PerThread, - mod: *Module, - f: *Flush, - lazy_ctype_pool: *const codegen.CType.Pool, - lazy_fns: codegen.LazyFnMap, -) FlushDeclError!void { - const gpa = self.base.comp.gpa; - try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(lazy_fns.count())); - - var it = lazy_fns.iterator(); - while (it.next()) |entry| { - const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); - if (gop.found_existing) continue; - gop.value_ptr.* = {}; - try self.flushLazyFn(pt, mod, f, lazy_ctype_pool, entry); - } -} - -fn flushAvBlock( - self: *C, - pt: Zcu.PerThread, - mod: *Module, - f: *Flush, - av_block: *const AvBlock, - exported_block: ?*const ExportedBlock, - export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void), - extern_name: InternPool.OptionalNullTerminatedString, -) FlushDeclError!void { - const gpa = self.base.comp.gpa; - try self.flushLazyFns(pt, mod, f, &av_block.ctype_pool, av_block.lazy_fns); - try f.all_buffers.ensureUnusedCapacity(gpa, 1); - // avoid emitting extern decls that are already exported - if (extern_name.unwrap()) |name| if (export_names.contains(name)) return; - f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported| - exported.fwd_decl - else - av_block.fwd_decl)); -} - -pub fn flushEmitH(zcu: *Zcu) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (true) return; // emit-h is regressed - - const emit_h = zcu.emit_h orelse return; - const io = zcu.comp.io; - - // We collect a list of buffers to write, and write them all at once with pwritev 😎 - const num_buffers = emit_h.decl_table.count() + 1; - var all_buffers = try std.array_list.Managed(std.posix.iovec_const).initCapacity(zcu.gpa, num_buffers); - defer all_buffers.deinit(); - - var file_size: u64 = zig_h.len; - if (zig_h.len != 0) { - all_buffers.appendAssumeCapacity(.{ - .base = zig_h, - .len = zig_h.len, - }); - } - - for (emit_h.decl_table.keys()) |decl_index| { - const decl_emit_h = emit_h.declPtr(decl_index); - const buf = decl_emit_h.fwd_decl.items; - if (buf.len != 0) { - all_buffers.appendAssumeCapacity(.{ - .base = buf.ptr, - .len = buf.len, - }); - file_size += buf.len; - } - } - - const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory; - const file = try directory.handle.createFile(io, emit_h.loc.basename, .{ - // We set the end position explicitly below; by not truncating the file, we possibly - // make it easier on the file system by doing 1 reallocation instead of two. - .truncate = false, - }); - defer file.close(io); - - try file.setLength(io, file_size); - try file.pwritevAll(all_buffers.items, 0); -} - pub fn updateExports( - self: *C, + c: *C, pt: Zcu.PerThread, exported: Zcu.Exported, export_indices: []const Zcu.Export.Index, -) !void { +) Allocator.Error!void { const zcu = pt.zcu; const gpa = zcu.gpa; - const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) { - .nav => |nav| .{ - zcu.navFileScope(nav).mod.?, - .{ .nav = nav }, - self.navs.getPtr(nav).?, - (try self.exported_navs.getOrPut(gpa, nav)).value_ptr, - }, - .uav => |uav| .{ - zcu.root_mod, - .{ .uav = uav }, - self.uavs.getPtr(uav).?, - (try self.exported_uavs.getOrPut(gpa, uav)).value_ptr, - }, - }; - const ctype_pool = &decl_block.ctype_pool; + + var arena: std.heap.ArenaAllocator = .init(gpa); + defer arena.deinit(); + var dg: codegen.DeclGen = .{ .gpa = gpa, + .arena = arena.allocator(), .pt = pt, - .mod = mod, - .error_msg = null, - .pass = pass, + .mod = zcu.root_mod, + .owner_nav = .none, .is_naked_fn = false, .expected_block = null, - .fwd_decl = undefined, - .ctype_pool = decl_block.ctype_pool, - .scratch = .initBuffer(self.scratch_buf), + .error_msg = null, + .ctype_deps = .empty, .uavs = .empty, }; - dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf); defer { assert(dg.uavs.count() == 0); - ctype_pool.* = dg.ctype_pool.move(); - ctype_pool.freeUnusedCapacity(gpa); - - self.fwd_decl_buf = dg.fwd_decl.toArrayList().allocatedSlice(); - self.scratch_buf = dg.scratch.allocatedSlice(); + dg.ctype_deps.deinit(gpa); } - codegen.genExports(&dg, exported, export_indices) catch |err| switch (err) { - error.WriteFailed, error.OutOfMemory => return error.OutOfMemory, + + const code: String = code: { + var aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &c.string_bytes); + defer c.string_bytes = aw.toArrayList(); + const start = aw.written().len; + codegen.genExports(&dg, &aw.writer, exported, export_indices) catch |err| switch (err) { + error.WriteFailed => return error.OutOfMemory, + error.OutOfMemory => |e| return e, + }; + break :code .{ + .start = @intCast(start), + .len = @intCast(aw.written().len - start), + }; }; - exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.written()) }; + switch (exported) { + .nav => |nav| try c.exported_navs.put(gpa, nav, code), + .uav => |uav| try c.exported_uavs.put(gpa, uav, code), + } } pub fn deleteExport( @@ -866,20 +1294,237 @@ pub fn deleteExport( } } -fn addUavsFromCodegen(c: *C, uavs: *const std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment)) Allocator.Error!void { +fn mergeNeededCTypes( + c: *C, + need_types: *std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, void), + need_errunion_types: *std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, void), + need_aligned_types: *std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, u64), + deps: *const CTypeDependencies, +) Allocator.Error!void { const gpa = c.base.comp.gpa; - try c.uavs.ensureUnusedCapacity(gpa, uavs.count()); - try c.aligned_uavs.ensureUnusedCapacity(gpa, uavs.count()); - for (uavs.keys(), uavs.values()) |uav_val, uav_align| { - { - const gop = c.uavs.getOrPutAssumeCapacity(uav_val); - if (!gop.found_existing) gop.value_ptr.* = .{}; - } - if (uav_align != .none) { - const gop = c.aligned_uavs.getOrPutAssumeCapacity(uav_val); - gop.value_ptr.* = if (gop.found_existing) max: { - break :max gop.value_ptr.*.maxStrict(uav_align); - } else uav_align; + + const resolved = deps.get(c); + + try need_types.ensureUnusedCapacity(gpa, resolved.type.len + resolved.type_fwd.len); + try need_errunion_types.ensureUnusedCapacity(gpa, resolved.errunion_type.len + resolved.errunion_type_fwd.len); + try need_aligned_types.ensureUnusedCapacity(gpa, resolved.aligned_type_fwd.len); + + for (resolved.type) |index| need_types.putAssumeCapacity(index, {}); + for (resolved.type_fwd) |index| need_types.putAssumeCapacity(index, {}); + + for (resolved.errunion_type) |index| need_errunion_types.putAssumeCapacity(index, {}); + for (resolved.errunion_type_fwd) |index| need_errunion_types.putAssumeCapacity(index, {}); + + for (resolved.aligned_type_fwd, resolved.aligned_type_masks) |ty_index, align_mask| { + const gop = need_aligned_types.getOrPutAssumeCapacity(ty_index); + if (!gop.found_existing) gop.value_ptr.* = 0; + gop.value_ptr.* |= align_mask; + } +} + +fn mergeNeededUavs( + zcu: *const Zcu, + global: *std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), + new: *const std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment), +) Allocator.Error!void { + const gpa = zcu.comp.gpa; + + try global.ensureUnusedCapacity(gpa, new.count()); + for (new.keys(), new.values()) |uav_val, need_align| { + const gop = global.getOrPutAssumeCapacity(uav_val); + if (!gop.found_existing) gop.value_ptr.* = .none; + + if (need_align != .none) { + const cur_align = switch (gop.value_ptr.*) { + .none => Value.fromInterned(uav_val).typeOf(zcu).abiAlignment(zcu), + else => |a| a, + }; + if (need_align.compareStrict(.gt, cur_align)) { + gop.value_ptr.* = need_align; + } } } } + +fn addCTypeDependencies( + c: *C, + pt: Zcu.PerThread, + deps: *const codegen.CType.Dependencies, +) Allocator.Error!CTypeDependencies { + const gpa = pt.zcu.comp.gpa; + + try c.bigint_types.ensureUnusedCapacity(gpa, deps.bigint.count()); + for (deps.bigint.keys()) |bigint| c.bigint_types.putAssumeCapacity(bigint, {}); + + const type_start = c.type_dependencies.items.len; + const errunion_type_start = type_start + deps.type.count(); + const type_fwd_start = errunion_type_start + deps.errunion_type.count(); + const errunion_type_fwd_start = type_fwd_start + deps.type_fwd.count(); + const aligned_type_fwd_start = errunion_type_fwd_start + deps.errunion_type_fwd.count(); + try c.type_dependencies.appendNTimes(gpa, undefined, deps.type.count() + + deps.errunion_type.count() + + deps.type_fwd.count() + + deps.errunion_type_fwd.count() + + deps.aligned_type_fwd.count()); + + const align_mask_start = c.align_dependency_masks.items.len; + try c.align_dependency_masks.appendSlice(gpa, deps.aligned_type_fwd.values()); + + for (deps.type.keys(), type_start..) |ty, i| { + const pool_index = try c.type_pool.get(pt, .{ .c = c }, ty); + c.type_dependencies.items[i] = pool_index; + } + + for (deps.errunion_type.keys(), errunion_type_start..) |ty, i| { + const pool_index = try c.type_pool.get(pt, .{ .c = c }, ty); + c.type_dependencies.items[i] = pool_index; + } + + for (deps.type_fwd.keys(), type_fwd_start..) |ty, i| { + const pool_index = try c.type_pool.get(pt, .{ .c = c }, ty); + c.type_dependencies.items[i] = pool_index; + } + + for (deps.errunion_type_fwd.keys(), errunion_type_fwd_start..) |ty, i| { + const pool_index = try c.type_pool.get(pt, .{ .c = c }, ty); + c.type_dependencies.items[i] = pool_index; + } + + for (deps.aligned_type_fwd.keys(), aligned_type_fwd_start..) |ty, i| { + const pool_index = try c.type_pool.get(pt, .{ .c = c }, ty); + c.type_dependencies.items[i] = pool_index; + } + + return .{ + .len = @intCast(deps.type.count()), + .errunion_len = @intCast(deps.errunion_type.count()), + .fwd_len = @intCast(deps.type_fwd.count()), + .errunion_fwd_len = @intCast(deps.errunion_type_fwd.count()), + .aligned_fwd_len = @intCast(deps.aligned_type_fwd.count()), + .type_start = @intCast(type_start), + .align_mask_start = @intCast(align_mask_start), + }; +} + +fn updateNewUavs(c: *C, pt: Zcu.PerThread, old_uavs_len: usize) Allocator.Error!void { + const gpa = pt.zcu.comp.gpa; + var index = old_uavs_len; + while (index < c.uavs.count()) : (index += 1) { + // `new_uavs` is UAVs discovered while lowering *this* UAV. + const new_uavs: []const InternPool.Index = new: { + c.uavs.lockPointers(); + defer c.uavs.unlockPointers(); + const val: Value = .fromInterned(c.uavs.keys()[index]); + const rendered_decl = &c.uavs.values()[index]; + rendered_decl.* = .init; + try c.updateUav(pt, val, rendered_decl); + break :new rendered_decl.need_uavs.keys(); + }; + try c.uavs.ensureUnusedCapacity(gpa, new_uavs.len); + for (new_uavs) |val| { + const gop = c.uavs.getOrPutAssumeCapacity(val); + if (!gop.found_existing) { + assert(gop.index > index); + } + } + } +} + +const FlushTypes = struct { + c: *C, + f: *Flush, + + aligned_types: *const std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, u64), + aligned_type_strings: []const []const u8, + + status: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, bool), + errunion_status: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, bool), + aligned_status: std.AutoArrayHashMapUnmanaged(link.ConstPool.Index, void), + + fn processDeps(ft: *FlushTypes, deps: *const CTypeDependencies) void { + const resolved = deps.get(ft.c); + for (resolved.type) |pool_index| ft.doType(pool_index); + for (resolved.type_fwd) |pool_index| ft.doTypeFwd(pool_index); + for (resolved.errunion_type) |pool_index| ft.doErrunionType(pool_index); + for (resolved.errunion_type_fwd) |pool_index| ft.doErrunionTypeFwd(pool_index); + for (resolved.aligned_type_fwd) |pool_index| ft.doAlignedTypeFwd(pool_index); + } + fn processDepsAsFwd(ft: *FlushTypes, deps: *const CTypeDependencies) void { + const resolved = deps.get(ft.c); + for (resolved.type) |pool_index| ft.doTypeFwd(pool_index); + for (resolved.type_fwd) |pool_index| ft.doTypeFwd(pool_index); + for (resolved.errunion_type) |pool_index| ft.doErrunionTypeFwd(pool_index); + for (resolved.errunion_type_fwd) |pool_index| ft.doErrunionTypeFwd(pool_index); + for (resolved.aligned_type_fwd) |pool_index| ft.doAlignedTypeFwd(pool_index); + } + + fn doAlignedTypeFwd(ft: *FlushTypes, pool_index: link.ConstPool.Index) void { + const c = ft.c; + if (ft.aligned_status.contains(pool_index)) return; + if (ft.aligned_types.getIndex(pool_index)) |i| { + const rendered = &c.types.items[@intFromEnum(pool_index)]; + ft.processDepsAsFwd(&rendered.deps); + ft.f.appendBufAssumeCapacity(ft.aligned_type_strings[i]); + } + ft.aligned_status.putAssumeCapacity(pool_index, {}); + } + fn doTypeFwd(ft: *FlushTypes, pool_index: link.ConstPool.Index) void { + const c = ft.c; + if (ft.status.contains(pool_index)) return; + const rendered = &c.types.items[@intFromEnum(pool_index)]; + if (rendered.fwd_decl.len > 0) { + ft.f.appendBufAssumeCapacity(rendered.fwd_decl.get(c)); + ft.status.putAssumeCapacityNoClobber(pool_index, false); + } else { + ft.processDepsAsFwd(&rendered.definition_deps); + const gop = ft.status.getOrPutAssumeCapacity(pool_index); + if (!gop.found_existing) { + gop.value_ptr.* = false; + ft.f.appendBufAssumeCapacity(rendered.definition.get(c)); + } + } + } + fn doType(ft: *FlushTypes, pool_index: link.ConstPool.Index) void { + const c = ft.c; + if (ft.status.get(pool_index)) |completed| { + if (completed) return; + } + const rendered = &c.types.items[@intFromEnum(pool_index)]; + ft.processDeps(&rendered.definition_deps); + if (rendered.fwd_decl.len == 0 and ft.status.contains(pool_index)) { + // `doTypeFwd` already rendered the defintion, we just had to complete the type by + // fully resolving its dependencies. + } else if (rendered.definition.len > 0) { + ft.f.appendBufAssumeCapacity(rendered.definition.get(c)); + } else if (!ft.status.contains(pool_index)) { + // The type will never be completed, but it must be forward declared to avoid it being + // declared in the wrong scope. + ft.f.appendBufAssumeCapacity(rendered.fwd_decl.get(c)); + } + ft.status.putAssumeCapacity(pool_index, true); + } + fn doErrunionTypeFwd(ft: *FlushTypes, pool_index: link.ConstPool.Index) void { + const c = ft.c; + const gop = ft.errunion_status.getOrPutAssumeCapacity(pool_index); + if (gop.found_existing) return; + const rendered = &c.types.items[@intFromEnum(pool_index)]; + ft.f.appendBufAssumeCapacity(rendered.errunion_fwd_decl.get(c)); + gop.value_ptr.* = false; + } + fn doErrunionType(ft: *FlushTypes, pool_index: link.ConstPool.Index) void { + const c = ft.c; + if (ft.errunion_status.get(pool_index)) |completed| { + if (completed) return; + } + const rendered = &c.types.items[@intFromEnum(pool_index)]; + ft.processDeps(&rendered.deps); + if (rendered.errunion_definition.len > 0) { + ft.f.appendBufAssumeCapacity(rendered.errunion_definition.get(c)); + } else { + // The error union type will never be completed, but forward declare it to avoid the + // type being first declared in a different scope. + ft.f.appendBufAssumeCapacity(rendered.errunion_fwd_decl.get(c)); + } + ft.errunion_status.putAssumeCapacity(pool_index, true); + } +}; diff --git a/src/link/DebugConstPool.zig b/src/link/ConstPool.zig similarity index 83% rename from src/link/DebugConstPool.zig rename to src/link/ConstPool.zig index eb8c60b6f9..1282ad67c4 100644 --- a/src/link/DebugConstPool.zig +++ b/src/link/ConstPool.zig @@ -9,14 +9,11 @@ /// Indices into the pool are dense, and constants are never removed from the pool, so the debug /// info implementation can store information for each one with a simple `ArrayList`. /// -/// To use `DebugConstPool`, the debug info implementation is required to: -/// * forward `updateContainerType` calls to its `DebugConstPool` -/// * expose some callback functions---see functions in `DebugInfo` +/// To use `ConstPool`, the debug info implementation is required to: +/// * forward `updateContainerType` calls to its `ConstPool` +/// * expose some callback functions---see functions in `User` /// * ensure that any `get` call is eventually followed by a `flushPending` call -/// -/// TODO: everything in this file should have the error set 'Allocator.Error', but right now the -/// self-hosted linkers can return all kinds of crap for some reason. This needs fixing. -const DebugConstPool = @This(); +const ConstPool = @This(); values: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), pending: std.ArrayList(Index), @@ -24,7 +21,7 @@ complete_containers: std.AutoArrayHashMapUnmanaged(InternPool.Index, void), container_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, ContainerDepEntry.Index), container_dep_entries: std.ArrayList(ContainerDepEntry), -pub const empty: DebugConstPool = .{ +pub const empty: ConstPool = .{ .values = .empty, .pending = .empty, .complete_containers = .empty, @@ -32,7 +29,7 @@ pub const empty: DebugConstPool = .{ .container_dep_entries = .empty, }; -pub fn deinit(pool: *DebugConstPool, gpa: Allocator) void { +pub fn deinit(pool: *ConstPool, gpa: Allocator) void { pool.values.deinit(gpa); pool.pending.deinit(gpa); pool.complete_containers.deinit(gpa); @@ -42,13 +39,14 @@ pub fn deinit(pool: *DebugConstPool, gpa: Allocator) void { pub const Index = enum(u32) { _, - pub fn val(i: Index, pool: *const DebugConstPool) InternPool.Index { + pub fn val(i: Index, pool: *const ConstPool) InternPool.Index { return pool.values.keys()[@intFromEnum(i)]; } }; -pub const DebugInfo = union(enum) { +pub const User = union(enum) { dwarf: *@import("Dwarf.zig"), + c: *@import("C.zig"), llvm: @import("../codegen/llvm.zig").Object.Ptr, /// Inform the debug info implementation that the new constant `val` was added to the pool at @@ -56,12 +54,12 @@ pub const DebugInfo = union(enum) { /// that there will eventually be a call to either `updateConst` or `updateConstIncomplete` /// following the `addConst` call, to actually populate the constant's debug info. fn addConst( - di: DebugInfo, + user: User, pt: Zcu.PerThread, index: Index, val: InternPool.Index, - ) !void { - switch (di) { + ) Allocator.Error!void { + switch (user) { inline else => |impl| return impl.addConst(pt, index, val), } } @@ -71,12 +69,12 @@ pub const DebugInfo = union(enum) { /// * If it is a type, its layout is known. /// * Otherwise, the layout of its type is known. fn updateConst( - di: DebugInfo, + user: User, pt: Zcu.PerThread, index: Index, val: InternPool.Index, - ) !void { - switch (di) { + ) Allocator.Error!void { + switch (user) { inline else => |impl| return impl.updateConst(pt, index, val), } } @@ -87,12 +85,12 @@ pub const DebugInfo = union(enum) { /// initialized so never had its layout resolved). Instead, the implementation must emit some /// form of placeholder entry representing an incomplete/unknown constant. fn updateConstIncomplete( - di: DebugInfo, + user: User, pt: Zcu.PerThread, index: Index, val: InternPool.Index, - ) !void { - switch (di) { + ) Allocator.Error!void { + switch (user) { inline else => |impl| return impl.updateConstIncomplete(pt, index, val), } } @@ -100,7 +98,7 @@ pub const DebugInfo = union(enum) { const ContainerDepEntry = extern struct { next: ContainerDepEntry.Index.Optional, - depender: DebugConstPool.Index, + depender: ConstPool.Index, const Index = enum(u32) { _, const Optional = enum(u32) { @@ -116,7 +114,7 @@ const ContainerDepEntry = extern struct { fn toOptional(i: ContainerDepEntry.Index) Optional { return @enumFromInt(@intFromEnum(i)); } - fn ptr(i: ContainerDepEntry.Index, pool: *DebugConstPool) *ContainerDepEntry { + fn ptr(i: ContainerDepEntry.Index, pool: *ConstPool) *ContainerDepEntry { return &pool.container_dep_entries.items[@intFromEnum(i)]; } }; @@ -125,12 +123,12 @@ const ContainerDepEntry = extern struct { /// Calls to `link.File.updateContainerType` must be forwarded to this function so that the debug /// constant pool has up-to-date information about the resolution status of types. pub fn updateContainerType( - pool: *DebugConstPool, + pool: *ConstPool, pt: Zcu.PerThread, - di: DebugInfo, + user: User, container_ty: InternPool.Index, success: bool, -) !void { +) Allocator.Error!void { if (success) { const gpa = pt.zcu.comp.gpa; try pool.complete_containers.put(gpa, container_ty, {}); @@ -139,18 +137,18 @@ pub fn updateContainerType( } var opt_dep = pool.container_deps.get(container_ty); while (opt_dep) |dep| : (opt_dep = dep.ptr(pool).next.unwrap()) { - try pool.update(pt, di, dep.ptr(pool).depender); + try pool.update(pt, user, dep.ptr(pool).depender); } } /// After this is called, there may be a constant for which debug information (complete or not) has /// not yet been emitted, so the user must call `flushPending` at some point after this call. -pub fn get(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, val: InternPool.Index) !DebugConstPool.Index { +pub fn get(pool: *ConstPool, pt: Zcu.PerThread, user: User, val: InternPool.Index) Allocator.Error!ConstPool.Index { const zcu = pt.zcu; const ip = &zcu.intern_pool; const gpa = zcu.comp.gpa; const gop = try pool.values.getOrPut(gpa, val); - const index: DebugConstPool.Index = @enumFromInt(gop.index); + const index: ConstPool.Index = @enumFromInt(gop.index); if (!gop.found_existing) { const ty: Type = switch (ip.typeOf(val)) { .type_type => if (ip.isUndef(val)) .type else .fromInterned(val), @@ -158,17 +156,17 @@ pub fn get(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, val: InternP }; try pool.registerTypeDeps(index, ty, zcu); try pool.pending.append(gpa, index); - try di.addConst(pt, index, val); + try user.addConst(pt, index, val); } return index; } -pub fn flushPending(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo) !void { +pub fn flushPending(pool: *ConstPool, pt: Zcu.PerThread, user: User) Allocator.Error!void { while (pool.pending.pop()) |pending_ty| { - try pool.update(pt, di, pending_ty); + try pool.update(pt, user, pending_ty); } } -fn update(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, index: DebugConstPool.Index) !void { +fn update(pool: *ConstPool, pt: Zcu.PerThread, user: User, index: ConstPool.Index) Allocator.Error!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; const val = index.val(pool); @@ -177,12 +175,12 @@ fn update(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, index: DebugC else => |ty| .fromInterned(ty), }; if (pool.checkType(ty, zcu)) { - try di.updateConst(pt, index, val); + try user.updateConst(pt, index, val); } else { - try di.updateConstIncomplete(pt, index, val); + try user.updateConstIncomplete(pt, index, val); } } -fn checkType(pool: *const DebugConstPool, ty: Type, zcu: *const Zcu) bool { +fn checkType(pool: *const ConstPool, ty: Type, zcu: *const Zcu) bool { if (ty.isGenericPoison()) return true; return switch (ty.zigTypeTag(zcu)) { .type, @@ -227,7 +225,7 @@ fn checkType(pool: *const DebugConstPool, ty: Type, zcu: *const Zcu) bool { }, }; } -fn registerTypeDeps(pool: *DebugConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void { +fn registerTypeDeps(pool: *ConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void { if (ty.isGenericPoison()) return; switch (ty.zigTypeTag(zcu)) { .type, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index e64346c17c..38021aa42f 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -18,7 +18,6 @@ const codegen = @import("../codegen.zig"); const dev = @import("../dev.zig"); const link = @import("../link.zig"); const target_info = @import("../target.zig"); -const DebugConstPool = link.DebugConstPool; gpa: Allocator, bin_file: *link.File, @@ -26,10 +25,10 @@ format: DW.Format, endian: std.builtin.Endian, address_size: AddressSize, -const_pool: DebugConstPool, +const_pool: link.ConstPool, mods: std.AutoArrayHashMapUnmanaged(*Module, ModInfo), -/// Indices are `DebugConstPool.Index`. +/// Indices are `link.ConstPool.Index`. values: std.ArrayList(struct { Unit.Index, Entry.Index }), navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Entry.Index), decls: std.AutoArrayHashMapUnmanaged(InternPool.TrackedInst.Index, Entry.Index), @@ -1038,7 +1037,7 @@ const Entry = struct { const zcu = dwarf.bin_file.comp.zcu.?; const ip = &zcu.intern_pool; for (0.., dwarf.values.items) |raw_index, unit_and_entry| { - const index: DebugConstPool.Index = @enumFromInt(raw_index); + const index: link.ConstPool.Index = @enumFromInt(raw_index); const val = index.val(&dwarf.const_pool); const val_unit, const val_entry = unit_and_entry; if (sec.getUnit(val_unit) == unit and unit.getEntry(val_entry) == entry) @@ -3291,8 +3290,14 @@ pub fn updateContainerType( ) !void { try dwarf.const_pool.updateContainerType(pt, .{ .dwarf = dwarf }, ty, success); } -/// Should only be called by the `DebugConstPool` implementation. -pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) !void { +/// Should only be called by the `link.ConstPool` implementation. +pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void { + addConstInner(dwarf, pt, index, val) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => |e| std.debug.panic("DWARF TODO: '{t}' while registering constant\n", .{e}), + }; +} +fn addConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) !void { const zcu = pt.zcu; const ip = &zcu.intern_pool; @@ -3321,11 +3326,17 @@ pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: DebugConstPool.Index, v assert(@intFromEnum(index) == dwarf.values.items.len); try dwarf.values.append(dwarf.gpa, .{ unit, entry }); } -/// Should only be called by the `DebugConstPool` implementation. +/// Should only be called by the `link.ConstPool` implementation. /// /// Emits a "dummy" DIE for the given comptime-only value (which may be a type). For types, this is /// an opaque type. Otherwise, it is an undefined value of the value's type. -pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: DebugConstPool.Index, value_index: InternPool.Index) !void { +pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) Allocator.Error!void { + updateConstIncompleteInner(dwarf, pt, debug_const_index, value_index) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => |e| std.debug.panic("DWARF TODO: '{t}' while updating incomplete constant\n", .{e}), + }; +} +fn updateConstIncompleteInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) !void { const zcu = pt.zcu; const val: Value = .fromInterned(value_index); @@ -3380,10 +3391,16 @@ pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index try dwarf.debug_info.section.replaceEntry(unit, entry, dwarf, wip_nav.debug_info.written()); try dwarf.debug_loclists.section.replaceEntry(unit, entry, dwarf, wip_nav.debug_loclists.written()); } -/// Should only be called by the `DebugConstPool` implementation. +/// Should only be called by the `link.ConstPool` implementation. /// /// Emits a DIE for the given comptime-only value (which may be a type). -pub fn updateConst(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: DebugConstPool.Index, value_index: InternPool.Index) !void { +pub fn updateConst(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) Allocator.Error!void { + updateConstInner(dwarf, pt, debug_const_index, value_index) catch |err| switch (err) { + error.OutOfMemory => |e| return e, + else => |e| std.debug.panic("DWARF TODO: '{t}' while updating constant\n", .{e}), + }; +} +fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) !void { const zcu = pt.zcu; const ip = &zcu.intern_pool; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index dd4c2abd24..c45f73d97a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1716,19 +1716,8 @@ pub fn updateContainerType( if (build_options.skip_non_native and builtin.object_format != .elf) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - const zcu = pt.zcu; - const gpa = zcu.gpa; return self.zigObjectPtr().?.updateContainerType(pt, ty, success) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => |e| { - try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create( - gpa, - zcu.typeSrcLoc(ty), - "failed to update container type: {s}", - .{@errorName(e)}, - )); - return error.TypeFailureReported; - }, }; }