mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-03-08 01:04:43 +01:00
cbe: rework CType and other major refactors
The goal of these changes is to allow the C backend to support the new lazier type resolution system implemented by the frontend. This required a full rewrite of the `CType` abstraction, and major changes to the C backend "linker". The `DebugConstPool` abstraction introduced in a previous commit turns out to be useful for the C backend to codegen types. Because this use case is not debug information but rather general linking (albeit when targeting an unusual object format), I have renamed the abstraction to `ConstPool`. With it, the C linker is told when a type's layout becomes known, and can at that point generate the corresponding C definitions, rather than deferring this work until `flush`. The work done in `flush` is now more-or-less *solely* focused on collecting all of the buffers into a big array for a vectored write. This does unfortunately involve a non-trivial graph traversal to emit type definitions in an appropriate order, but it's still quite fast in practice, and it operates on fairly compact dependency data. We don't generate the actual type *definitions* in `flush`; that happens during compilation using `ConstPool` as discussed above. (We do generate the typedefs for underaligned types in `flush`, but that's a trivial amount of work in most cases.) `CType` is now an ephemeral type: it is created only when we render a type (the logic for which has been pushed into just 2 or 3 functions in `codegen.c`---most of the backend now operates on unmolested Zig `Type`s instead). C types are no longer stored in a "pool", although the type "dependencies" of generated C code (that is, the struct, unions, and typedefs which the generated code references) are tracked (in some simple hash sets) and given to the linker so it can codegen the types.
This commit is contained in:
parent
e88727390f
commit
319548c772
16 changed files with 5312 additions and 7206 deletions
|
|
@ -259,7 +259,7 @@
|
|||
#endif
|
||||
|
||||
#if zig_has_attribute(packed) || defined(zig_tinyc)
|
||||
#define zig_packed(definition) __attribute__((packed)) definition
|
||||
#define zig_packed(definition) definition __attribute__((packed))
|
||||
#elif defined(zig_msvc)
|
||||
#define zig_packed(definition) __pragma(pack(1)) definition __pragma(pack())
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -3382,9 +3382,6 @@ fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancel
|
|||
error.OutOfMemory, error.Canceled => |e| return e,
|
||||
};
|
||||
}
|
||||
if (comp.zcu) |zcu| {
|
||||
try link.File.C.flushEmitH(zcu);
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is called by the frontend before flush(). It communicates that
|
||||
|
|
|
|||
|
|
@ -3403,7 +3403,7 @@ pub const LoadedStructType = struct {
|
|||
/// Iterates over non-comptime fields in the order they are laid out in memory at runtime.
|
||||
/// May or may not include zero-bit fields.
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn iterateRuntimeOrder(s: *const LoadedStructType, ip: *InternPool) RuntimeOrderIterator {
|
||||
pub fn iterateRuntimeOrder(s: *const LoadedStructType, ip: *const InternPool) RuntimeOrderIterator {
|
||||
switch (s.layout) {
|
||||
.auto => {
|
||||
const ro = std.mem.sliceTo(s.field_runtime_order.get(ip), .omitted);
|
||||
|
|
|
|||
|
|
@ -789,7 +789,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
|
|||
/// Determines whether a function type has runtime bits, i.e. whether a
|
||||
/// function with this type can exist at runtime.
|
||||
/// Asserts that `ty` is a function type.
|
||||
pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool {
|
||||
pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *const Zcu) bool {
|
||||
assertHasLayout(fn_ty, zcu);
|
||||
const fn_info = zcu.typeToFunc(fn_ty).?;
|
||||
if (fn_info.comptime_bits != 0) return false;
|
||||
|
|
@ -830,7 +830,7 @@ pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool {
|
|||
}
|
||||
|
||||
/// Like `hasRuntimeBits`, but also returns `true` for runtime functions.
|
||||
pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
|
||||
pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.@"fn" => return ty.fnHasRuntimeBits(zcu),
|
||||
else => return ty.hasRuntimeBits(zcu),
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ pub fn intFromEnum(val: Value, zcu: *const Zcu) Value {
|
|||
}
|
||||
|
||||
/// Asserts that `val` is an integer.
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *Zcu) BigIntConst {
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *const Zcu) BigIntConst {
|
||||
if (val.getUnsignedInt(zcu)) |x| {
|
||||
return BigIntMutable.init(&space.limbs, x).toConst();
|
||||
}
|
||||
|
|
@ -669,7 +669,7 @@ pub fn floatCast(val: Value, dest_ty: Type, pt: Zcu.PerThread) !Value {
|
|||
}
|
||||
|
||||
/// Asserts the value is comparable. Supports comparisons between heterogeneous types.
|
||||
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *Zcu) bool {
|
||||
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu: *const Zcu) bool {
|
||||
if (lhs.pointerNav(zcu)) |lhs_nav| {
|
||||
if (rhs.pointerNav(zcu)) |rhs_nav| {
|
||||
switch (op) {
|
||||
|
|
@ -695,7 +695,7 @@ pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, zcu:
|
|||
return order(lhs, rhs, zcu).compare(op);
|
||||
}
|
||||
|
||||
pub fn order(lhs: Value, rhs: Value, zcu: *Zcu) std.math.Order {
|
||||
pub fn order(lhs: Value, rhs: Value, zcu: *const Zcu) std.math.Order {
|
||||
if (lhs.isFloat(zcu) or rhs.isFloat(zcu)) {
|
||||
const lhs_f128 = lhs.toFloat(f128, zcu);
|
||||
const rhs_f128 = rhs.toFloat(f128, zcu);
|
||||
|
|
@ -805,7 +805,7 @@ pub fn canMutateComptimeVarState(val: Value, zcu: *Zcu) bool {
|
|||
/// Gets the `Nav` referenced by this pointer. If the pointer does not point
|
||||
/// to a `Nav`, or if it points to some part of one (like a field or element),
|
||||
/// returns null.
|
||||
pub fn pointerNav(val: Value, zcu: *Zcu) ?InternPool.Nav.Index {
|
||||
pub fn pointerNav(val: Value, zcu: *const Zcu) ?InternPool.Nav.Index {
|
||||
return switch (zcu.intern_pool.indexToKey(val.toIntern())) {
|
||||
// TODO: these 3 cases are weird; these aren't pointer values!
|
||||
.variable => |v| v.owner_nav,
|
||||
|
|
|
|||
|
|
@ -4113,13 +4113,13 @@ pub const ResolvedReference = struct {
|
|||
/// If an `AnalUnit` is not in the returned map, it is unreferenced.
|
||||
/// The returned hashmap is owned by the `Zcu`, so should not be freed by the caller.
|
||||
/// This hashmap is cached, so repeated calls to this function are cheap.
|
||||
pub fn resolveReferences(zcu: *Zcu) !*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
|
||||
pub fn resolveReferences(zcu: *Zcu) Allocator.Error!*const std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
|
||||
if (zcu.resolved_references == null) {
|
||||
zcu.resolved_references = try zcu.resolveReferencesInner();
|
||||
}
|
||||
return &zcu.resolved_references.?;
|
||||
}
|
||||
fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
|
||||
fn resolveReferencesInner(zcu: *Zcu) Allocator.Error!std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
|
||||
const gpa = zcu.gpa;
|
||||
const comp = zcu.comp;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
|
|
|||
5297
src/codegen/c.zig
5297
src/codegen/c.zig
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
1013
src/codegen/c/type.zig
Normal file
1013
src/codegen/c/type.zig
Normal file
File diff suppressed because it is too large
Load diff
651
src/codegen/c/type/render_defs.zig
Normal file
651
src/codegen/c/type/render_defs.zig
Normal file
|
|
@ -0,0 +1,651 @@
|
|||
/// Renders the `typedef` for an aligned type.
|
||||
pub fn defineAligned(
|
||||
ty: Type,
|
||||
alignment: Alignment,
|
||||
complete: bool,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
const name_cty: CType = .{ .aligned = .{
|
||||
.ty = ty,
|
||||
.alignment = alignment,
|
||||
} };
|
||||
|
||||
const cty: CType = try .lower(ty, deps, arena, zcu);
|
||||
|
||||
try w.writeAll("typedef ");
|
||||
if (complete and alignment.compareStrict(.lt, ty.abiAlignment(zcu))) {
|
||||
try w.print("zig_under_align({d}) ", .{alignment.toByteUnits().?});
|
||||
}
|
||||
try w.print("{f}{f}{f}; /* align({d}) {f} */\n", .{
|
||||
cty.fmtDeclaratorPrefix(zcu),
|
||||
name_cty.fmtTypeName(zcu),
|
||||
cty.fmtDeclaratorSuffix(zcu),
|
||||
alignment.toByteUnits().?,
|
||||
ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
/// Renders the definition of a big-int `struct`.
|
||||
pub fn defineBigInt(big: CType.BigInt, w: *Writer, zcu: *const Zcu) Writer.Error!void {
|
||||
const name_cty: CType = .{ .bigint = .{
|
||||
.limb_size = big.limb_size,
|
||||
.limbs_len = big.limbs_len,
|
||||
} };
|
||||
const limb_cty: CType = .{ .int = big.limb_size.unsigned() };
|
||||
const array_cty: CType = .{ .array = .{
|
||||
.len = big.limbs_len,
|
||||
.elem_ty = &limb_cty,
|
||||
.nonstring = limb_cty.isStringElem(),
|
||||
} };
|
||||
try w.print("{f} {{ {f}limbs{f}; }}; /* {d} bits */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
array_cty.fmtDeclaratorPrefix(zcu),
|
||||
array_cty.fmtDeclaratorSuffix(zcu),
|
||||
big.limb_size.bits() * @as(u17, big.limbs_len),
|
||||
});
|
||||
}
|
||||
|
||||
/// Renders a forward declaration of the `struct` which represents an error union whose payload type
|
||||
/// is `payload_ty` (the error set type is unspecified).
|
||||
pub fn errunionFwdDecl(payload_ty: Type, w: *Writer, zcu: *const Zcu) Writer.Error!void {
|
||||
const name_cty: CType = .{ .errunion = .{
|
||||
.payload_ty = payload_ty,
|
||||
} };
|
||||
try w.print("{f};\n", .{name_cty.fmtTypeName(zcu)});
|
||||
}
|
||||
/// Renders the definition of the `struct` which represents an error union whose payload type is
|
||||
/// `payload_ty` (the error set type is unspecified).
|
||||
///
|
||||
/// Asserts that the layout of `payload_ty` is resolved.
|
||||
pub fn errunionDefineComplete(
|
||||
payload_ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
payload_ty.assertHasLayout(zcu);
|
||||
|
||||
const name_cty: CType = .{ .errunion = .{
|
||||
.payload_ty = payload_ty,
|
||||
} };
|
||||
|
||||
const error_cty: CType = try .lower(.anyerror, deps, arena, zcu);
|
||||
|
||||
if (payload_ty.hasRuntimeBits(zcu)) {
|
||||
const payload_cty: CType = try .lower(payload_ty, deps, arena, zcu);
|
||||
try w.print(
|
||||
\\{f} {{ /* anyerror!{f} */
|
||||
\\ {f}payload{f};
|
||||
\\ {f}error{f};
|
||||
\\}};
|
||||
\\
|
||||
, .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
payload_ty.fmt(pt),
|
||||
payload_cty.fmtDeclaratorPrefix(zcu),
|
||||
payload_cty.fmtDeclaratorSuffix(zcu),
|
||||
error_cty.fmtDeclaratorPrefix(zcu),
|
||||
error_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
} else {
|
||||
try w.print("{f} {{ {f}error{f}; }}; /* anyerror!{f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
error_cty.fmtDeclaratorPrefix(zcu),
|
||||
error_cty.fmtDeclaratorSuffix(zcu),
|
||||
payload_ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// If the Zig type `ty` lowers to a `struct` or `union` type, renders a forward declaration of that
|
||||
/// type. Does not write anything for error union types, because their forward declarations are
|
||||
/// instead rendered by `errunionFwdDecl`.
|
||||
pub fn fwdDecl(ty: Type, w: *Writer, zcu: *const Zcu) Writer.Error!void {
|
||||
const name_cty: CType = switch (ty.zigTypeTag(zcu)) {
|
||||
.@"struct" => switch (ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => .{ .@"struct" = ty },
|
||||
.@"packed" => return,
|
||||
},
|
||||
.@"union" => switch (ty.containerLayout(zcu)) {
|
||||
.auto => .{ .union_auto = ty },
|
||||
.@"extern" => .{ .union_extern = ty },
|
||||
.@"packed" => return,
|
||||
},
|
||||
.pointer => if (ty.isSlice(zcu)) .{ .slice = ty } else return,
|
||||
.optional => .{ .opt = ty },
|
||||
.array => .{ .arr = ty },
|
||||
.vector => .{ .vec = ty },
|
||||
else => return,
|
||||
};
|
||||
try w.print("{f};\n", .{name_cty.fmtTypeName(zcu)});
|
||||
}
|
||||
|
||||
/// If the Zig type `ty` lowers to a `typedef`, renders a typedef of that type to `void`, because
|
||||
/// the type's layout is not resolved. This is only necessary for `typedef`s because a `struct` or
|
||||
/// `union` which is never defined is already an incomplete type, just like `void`.
|
||||
pub fn defineIncomplete(ty: Type, w: *Writer, pt: Zcu.PerThread) Writer.Error!void {
|
||||
const zcu = pt.zcu;
|
||||
const name_cty: CType = switch (ty.zigTypeTag(zcu)) {
|
||||
.@"fn" => .{ .@"fn" = ty },
|
||||
.@"enum" => .{ .@"enum" = ty },
|
||||
.@"struct", .@"union" => switch (ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => return,
|
||||
.@"packed" => .{ .bitpack = ty },
|
||||
},
|
||||
else => return,
|
||||
};
|
||||
try w.print("typedef void {f}; /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
|
||||
/// If the Zig type `ty` lowers to a `struct` or `union` type, or to a `typedef`, renders the
|
||||
/// definition of that type. Does not write anything for error union types, because their
|
||||
/// definitions are instead rendered by `errunionDefine`.
|
||||
///
|
||||
/// Asserts that the layout of `ty` is resolved.
|
||||
pub fn defineComplete(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
ty.assertHasLayout(zcu);
|
||||
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.@"fn" => if (!ty.fnHasRuntimeBits(zcu)) {
|
||||
const name_cty: CType = .{ .@"fn" = ty };
|
||||
try w.print("typedef void {f}; /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
} else {
|
||||
const ip = &zcu.intern_pool;
|
||||
const func_type = ip.indexToKey(ty.toIntern()).func_type;
|
||||
|
||||
// While incomplete types are usually an acceptable substitute for "void", this is not
|
||||
// true in function return types, where "void" is the only incomplete type permitted.
|
||||
const actual_ret_ty: Type = .fromInterned(func_type.return_type);
|
||||
const effective_ret_ty: Type = switch (actual_ret_ty.classify(zcu)) {
|
||||
.no_possible_value => .noreturn,
|
||||
.one_possible_value, .fully_comptime => .void, // no runtime bits
|
||||
.partially_comptime, .runtime => actual_ret_ty, // yes runtime bits
|
||||
};
|
||||
|
||||
const name_cty: CType = .{ .@"fn" = ty };
|
||||
const ret_cty: CType = try .lower(effective_ret_ty, deps, arena, zcu);
|
||||
|
||||
try w.print("typedef {f}{f}(", .{
|
||||
ret_cty.fmtDeclaratorPrefix(zcu),
|
||||
name_cty.fmtTypeName(zcu),
|
||||
});
|
||||
var any_params = false;
|
||||
for (func_type.param_types.get(ip)) |param_ty_ip| {
|
||||
const param_ty: Type = .fromInterned(param_ty_ip);
|
||||
if (!param_ty.hasRuntimeBits(zcu)) continue;
|
||||
if (any_params) try w.writeAll(", ");
|
||||
any_params = true;
|
||||
const param_cty: CType = try .lower(param_ty, deps, arena, zcu);
|
||||
try w.print("{f}", .{param_cty.fmtTypeName(zcu)});
|
||||
}
|
||||
if (func_type.is_var_args) {
|
||||
if (any_params) try w.writeAll(", ");
|
||||
try w.writeAll("...");
|
||||
} else if (!any_params) {
|
||||
try w.writeAll("void");
|
||||
}
|
||||
try w.print("){f}; /* {f} */\n", .{
|
||||
ret_cty.fmtDeclaratorSuffixIgnoreNonstring(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
},
|
||||
.@"enum" => {
|
||||
const name_cty: CType = .{ .@"enum" = ty };
|
||||
const cty: CType = try .lower(ty.intTagType(zcu), deps, arena, zcu);
|
||||
try w.print("typedef {f}{f}{f}; /* {f} */\n", .{
|
||||
cty.fmtDeclaratorPrefix(zcu),
|
||||
name_cty.fmtTypeName(zcu),
|
||||
cty.fmtDeclaratorSuffix(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
},
|
||||
.@"struct" => if (ty.isTuple(zcu)) {
|
||||
try defineTuple(ty, deps, arena, w, pt);
|
||||
} else switch (ty.containerLayout(zcu)) {
|
||||
.auto, .@"extern" => try defineStruct(ty, deps, arena, w, pt),
|
||||
.@"packed" => try defineBitpack(ty, deps, arena, w, pt),
|
||||
},
|
||||
.@"union" => switch (ty.containerLayout(zcu)) {
|
||||
.auto => try defineUnionAuto(ty, deps, arena, w, pt),
|
||||
.@"extern" => try defineUnionExtern(ty, deps, arena, w, pt),
|
||||
.@"packed" => try defineBitpack(ty, deps, arena, w, pt),
|
||||
},
|
||||
.pointer => if (ty.isSlice(zcu)) {
|
||||
const name_cty: CType = .{ .slice = ty };
|
||||
const ptr_cty: CType = try .lower(ty.slicePtrFieldType(zcu), deps, arena, zcu);
|
||||
try w.print(
|
||||
\\{f} {{ /* {f} */
|
||||
\\ {f}ptr{f};
|
||||
\\ size_t len;
|
||||
\\}};
|
||||
\\
|
||||
, .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
ptr_cty.fmtDeclaratorPrefix(zcu),
|
||||
ptr_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
},
|
||||
.optional => switch (CType.classifyOptional(ty, zcu)) {
|
||||
.error_set,
|
||||
.ptr_like,
|
||||
.slice_like,
|
||||
.npv_payload,
|
||||
=> {},
|
||||
|
||||
.opv_payload => {
|
||||
const name_cty: CType = .{ .opt = ty };
|
||||
try w.print("{f} {{ bool is_null; }}; /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
},
|
||||
|
||||
.@"struct" => {
|
||||
const name_cty: CType = .{ .opt = ty };
|
||||
const payload_cty: CType = try .lower(ty.optionalChild(zcu), deps, arena, zcu);
|
||||
try w.print(
|
||||
\\{f} {{ /* {f} */
|
||||
\\ {f}payload{f};
|
||||
\\ bool is_null;
|
||||
\\}};
|
||||
\\
|
||||
, .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
payload_cty.fmtDeclaratorPrefix(zcu),
|
||||
payload_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
},
|
||||
},
|
||||
.array => if (ty.hasRuntimeBits(zcu)) {
|
||||
const name_cty: CType = .{ .arr = ty };
|
||||
const elem_cty: CType = try .lower(ty.childType(zcu), deps, arena, zcu);
|
||||
const array_cty: CType = .{ .array = .{
|
||||
.len = ty.arrayLenIncludingSentinel(zcu),
|
||||
.elem_ty = &elem_cty,
|
||||
.nonstring = nonstring: {
|
||||
if (!elem_cty.isStringElem()) break :nonstring false;
|
||||
const s = ty.sentinel(zcu) orelse break :nonstring true;
|
||||
break :nonstring Value.compareHetero(s, .neq, .zero_comptime_int, zcu);
|
||||
},
|
||||
} };
|
||||
try w.print("{f} {{ {f}array{f}; }}; /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
array_cty.fmtDeclaratorPrefix(zcu),
|
||||
array_cty.fmtDeclaratorSuffix(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
},
|
||||
.vector => if (ty.hasRuntimeBits(zcu)) {
|
||||
const name_cty: CType = .{ .vec = ty };
|
||||
const elem_cty: CType = try .lower(ty.childType(zcu), deps, arena, zcu);
|
||||
const array_cty: CType = .{ .array = .{
|
||||
.len = ty.arrayLenIncludingSentinel(zcu),
|
||||
.elem_ty = &elem_cty,
|
||||
.nonstring = elem_cty.isStringElem(),
|
||||
} };
|
||||
try w.print("{f} {{ {f}array{f}; }}; /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
array_cty.fmtDeclaratorPrefix(zcu),
|
||||
array_cty.fmtDeclaratorSuffix(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
fn defineBitpack(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
const name_cty: CType = .{ .bitpack = ty };
|
||||
const cty: CType = try .lower(ty.bitpackBackingInt(zcu), deps, arena, zcu);
|
||||
try w.print("typedef {f}{f}{f}; /* {f} */\n", .{
|
||||
cty.fmtDeclaratorPrefix(zcu),
|
||||
name_cty.fmtTypeName(zcu),
|
||||
cty.fmtDeclaratorSuffix(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
}
|
||||
fn defineTuple(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
if (!ty.hasRuntimeBits(zcu)) return;
|
||||
const ip = &zcu.intern_pool;
|
||||
const tuple = ip.indexToKey(ty.toIntern()).tuple_type;
|
||||
|
||||
// Fields cannot be underaligned, because tuple fields cannot have specified alignments.
|
||||
// However, overaligned fields are possible thanks to intermediate zero-bit fields.
|
||||
|
||||
const tuple_align = ty.abiAlignment(zcu);
|
||||
|
||||
// If the alignment of other fields would not give the tuple sufficient alignment, we
|
||||
// need to align the first field (which does not affect its offset, because 0 is always
|
||||
// well-aligned) to indirectly specify the tuple alignment.
|
||||
const overalign: bool = for (tuple.types.get(ip)) |field_ty_ip| {
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.defaultStructFieldAlignment(.auto, zcu);
|
||||
if (natural_align.compareStrict(.gte, tuple_align)) break false;
|
||||
} else true;
|
||||
|
||||
const name_cty: CType = .{ .@"struct" = ty };
|
||||
try w.print("{f} {{ /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
var zig_offset: u64 = 0;
|
||||
var c_offset: u64 = 0;
|
||||
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty_ip, field_val_ip, field_index| {
|
||||
if (field_val_ip != .none) continue; // `comptime` field
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
const field_align = field_ty.abiAlignment(zcu);
|
||||
zig_offset = field_align.forward(zig_offset);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
c_offset = field_align.forward(c_offset);
|
||||
if (zig_offset == 0 and overalign) {
|
||||
// This is the first field; specify its alignment to align the tuple.
|
||||
try w.print(" zig_align({d})", .{tuple_align.toByteUnits().?});
|
||||
} else if (zig_offset > c_offset) {
|
||||
// This field needs to be overaligned compared to what its offset would otherwise be.
|
||||
const need_align: Alignment = .fromLog2Units(@ctz(zig_offset));
|
||||
try w.print(" zig_align({d})", .{need_align.toByteUnits().?});
|
||||
c_offset = need_align.forward(c_offset);
|
||||
assert(c_offset == zig_offset);
|
||||
}
|
||||
const field_cty: CType = try .lower(field_ty, deps, arena, zcu);
|
||||
try w.print(" {f}f{d}{f};\n", .{
|
||||
field_cty.fmtDeclaratorPrefix(zcu),
|
||||
field_index,
|
||||
field_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
const field_size = field_ty.abiSize(zcu);
|
||||
zig_offset += field_size;
|
||||
c_offset += field_size;
|
||||
}
|
||||
try w.writeAll("};\n");
|
||||
}
|
||||
fn defineStruct(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
if (!ty.hasRuntimeBits(zcu)) return;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
|
||||
// If there are any underaligned fields, we need to byte-pack the struct.
|
||||
const pack: bool = pack: {
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
var offset: u64 = 0;
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu);
|
||||
const natural_offset = natural_align.forward(offset);
|
||||
const actual_offset = struct_type.field_offsets.get(ip)[field_index];
|
||||
if (actual_offset < natural_offset) break :pack true;
|
||||
offset = actual_offset + field_ty.abiSize(zcu);
|
||||
}
|
||||
break :pack false;
|
||||
};
|
||||
|
||||
// If the alignment of other fields would not give the struct sufficient alignment, we
|
||||
// need to align the first field (which does not affect its offset, because 0 is always
|
||||
// well-aligned) to indirectly specify the struct alignment.
|
||||
const overalign: bool = switch (pack) {
|
||||
true => struct_type.alignment.compareStrict(.gt, .@"1"),
|
||||
false => overalign: {
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu);
|
||||
if (natural_align.compareStrict(.gte, struct_type.alignment)) break :overalign false;
|
||||
}
|
||||
break :overalign true;
|
||||
},
|
||||
};
|
||||
|
||||
if (pack) try w.writeAll("zig_packed(");
|
||||
const name_cty: CType = .{ .@"struct" = ty };
|
||||
try w.print("{f} {{ /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
var offset: u64 = 0;
|
||||
while (it.next()) |field_index| {
|
||||
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.defaultStructFieldAlignment(struct_type.layout, zcu);
|
||||
const natural_offset = switch (pack) {
|
||||
true => offset,
|
||||
false => natural_align.forward(offset),
|
||||
};
|
||||
const actual_offset = struct_type.field_offsets.get(ip)[field_index];
|
||||
if (actual_offset == 0 and overalign) {
|
||||
// This is the first field; specify its alignment to align the struct.
|
||||
try w.print(" zig_align({d})", .{struct_type.alignment.toByteUnits().?});
|
||||
} else if (actual_offset > natural_offset) {
|
||||
// This field needs to be underaligned or overaligned compared to what its
|
||||
// offset would otherwise be.
|
||||
const need_align: Alignment = .fromLog2Units(@ctz(actual_offset));
|
||||
if (need_align.compareStrict(.lt, natural_align)) {
|
||||
try w.print(" zig_under_align({d})", .{need_align.toByteUnits().?});
|
||||
} else {
|
||||
try w.print(" zig_align({d})", .{need_align.toByteUnits().?});
|
||||
}
|
||||
}
|
||||
const field_cty: CType = try .lower(field_ty, deps, arena, zcu);
|
||||
const field_name = struct_type.field_names.get(ip)[field_index].toSlice(ip);
|
||||
try w.print(" {f}{f}{f};\n", .{
|
||||
field_cty.fmtDeclaratorPrefix(zcu),
|
||||
fmtIdentSolo(field_name),
|
||||
field_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
offset = actual_offset + field_ty.abiSize(zcu);
|
||||
}
|
||||
assert(struct_type.alignment.forward(offset) == struct_type.size);
|
||||
try w.writeByte('}');
|
||||
if (pack) try w.writeByte(')');
|
||||
try w.writeAll(";\n");
|
||||
}
|
||||
fn defineUnionAuto(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
if (!ty.hasRuntimeBits(zcu)) return;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_type);
|
||||
|
||||
// If there are any underaligned fields, we need to byte-pack the union.
|
||||
const pack: bool = for (union_type.field_types.get(ip)) |field_ty_ip| {
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.abiAlignment(zcu);
|
||||
if (natural_align.compareStrict(.gt, union_type.alignment)) break true;
|
||||
} else false;
|
||||
|
||||
// If the alignment of other fields would not give the union sufficient alignment, we
|
||||
// need to align the first field (which does not affect its offset, because 0 is always
|
||||
// well-aligned) to indirectly specify the union alignment.
|
||||
const overalign: bool = switch (pack) {
|
||||
true => union_type.alignment.compareStrict(.gt, .@"1"),
|
||||
false => for (union_type.field_types.get(ip)) |field_ty_ip| {
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.abiAlignment(zcu);
|
||||
if (natural_align.compareStrict(.gte, union_type.alignment)) break false;
|
||||
} else overalign: {
|
||||
if (union_type.has_runtime_tag) {
|
||||
const tag_align = enum_tag_ty.abiAlignment(zcu);
|
||||
if (tag_align.compareStrict(.gte, union_type.alignment)) break :overalign false;
|
||||
}
|
||||
break :overalign true;
|
||||
},
|
||||
};
|
||||
|
||||
const payload_has_bits = !union_type.has_runtime_tag or union_type.size > enum_tag_ty.abiSize(zcu);
|
||||
|
||||
const name_cty: CType = .{ .union_auto = ty };
|
||||
try w.print("{f} {{ /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
if (payload_has_bits) {
|
||||
try w.writeByte(' ');
|
||||
if (pack) try w.writeAll("zig_packed(");
|
||||
try w.writeAll("union {\n");
|
||||
for (0..enum_tag_ty.enumFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const field_name = enum_tag_ty.enumFieldName(field_index, zcu).toSlice(ip);
|
||||
const field_cty: CType = try .lower(field_ty, deps, arena, zcu);
|
||||
try w.writeAll(" ");
|
||||
if (overalign and field_index == 0) {
|
||||
// This is the first field; specify its alignment to align the union.
|
||||
try w.print("zig_align({d}) ", .{union_type.alignment.toByteUnits().?});
|
||||
}
|
||||
try w.print("{f}{f}{f};\n", .{
|
||||
field_cty.fmtDeclaratorPrefix(zcu),
|
||||
fmtIdentSolo(field_name),
|
||||
field_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
}
|
||||
try w.writeAll(" }");
|
||||
if (pack) try w.writeByte(')');
|
||||
try w.writeAll(" payload;\n");
|
||||
}
|
||||
if (union_type.has_runtime_tag) {
|
||||
const tag_cty: CType = try .lower(enum_tag_ty, deps, arena, zcu);
|
||||
try w.print(" {f}tag{f};\n", .{
|
||||
tag_cty.fmtDeclaratorPrefix(zcu),
|
||||
tag_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
}
|
||||
try w.writeAll("};\n");
|
||||
}
|
||||
fn defineUnionExtern(
|
||||
ty: Type,
|
||||
deps: *CType.Dependencies,
|
||||
arena: Allocator,
|
||||
w: *Writer,
|
||||
pt: Zcu.PerThread,
|
||||
) (Allocator.Error || Writer.Error)!void {
|
||||
const zcu = pt.zcu;
|
||||
if (!ty.hasRuntimeBits(zcu)) return;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
assert(!union_type.has_runtime_tag);
|
||||
const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_type);
|
||||
|
||||
// If there are any underaligned fields, we need to byte-pack the union.
|
||||
const pack: bool = for (union_type.field_types.get(ip)) |field_ty_ip| {
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.abiAlignment(zcu);
|
||||
if (natural_align.compareStrict(.gt, union_type.alignment)) break true;
|
||||
} else false;
|
||||
|
||||
// If the alignment of other fields would not give the union sufficient alignment, we
|
||||
// need to align the first field (which does not affect its offset, because 0 is always
|
||||
// well-aligned) to indirectly specify the union alignment.
|
||||
const overalign: bool = switch (pack) {
|
||||
true => union_type.alignment.compareStrict(.gt, .@"1"),
|
||||
false => for (union_type.field_types.get(ip)) |field_ty_ip| {
|
||||
const field_ty: Type = .fromInterned(field_ty_ip);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const natural_align = field_ty.abiAlignment(zcu);
|
||||
if (natural_align.compareStrict(.gte, union_type.alignment)) break false;
|
||||
} else overalign: {
|
||||
if (union_type.has_runtime_tag) {
|
||||
const tag_align = enum_tag_ty.abiAlignment(zcu);
|
||||
if (tag_align.compareStrict(.gte, union_type.alignment)) break :overalign false;
|
||||
}
|
||||
break :overalign true;
|
||||
},
|
||||
};
|
||||
|
||||
if (pack) try w.writeAll("zig_packed(");
|
||||
|
||||
const name_cty: CType = .{ .union_extern = ty };
|
||||
try w.print("{f} {{ /* {f} */\n", .{
|
||||
name_cty.fmtTypeName(zcu),
|
||||
ty.fmt(pt),
|
||||
});
|
||||
|
||||
for (0..enum_tag_ty.enumFieldCount(zcu)) |field_index| {
|
||||
const field_ty = ty.fieldType(field_index, zcu);
|
||||
if (!field_ty.hasRuntimeBits(zcu)) continue;
|
||||
const field_name = enum_tag_ty.enumFieldName(field_index, zcu).toSlice(ip);
|
||||
const field_cty: CType = try .lower(field_ty, deps, arena, zcu);
|
||||
if (overalign and field_index == 0) {
|
||||
// This is the first field; specify its alignment to align the union.
|
||||
try w.print(" zig_align({d})", .{union_type.alignment.toByteUnits().?});
|
||||
}
|
||||
try w.print(" {f}{f}{f};\n", .{
|
||||
field_cty.fmtDeclaratorPrefix(zcu),
|
||||
fmtIdentSolo(field_name),
|
||||
field_cty.fmtDeclaratorSuffix(zcu),
|
||||
});
|
||||
}
|
||||
try w.writeByte('}');
|
||||
if (pack) try w.writeByte(')');
|
||||
try w.writeAll(";\n");
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const Writer = std.Io.Writer;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const Zcu = @import("../../../Zcu.zig");
|
||||
const Type = @import("../../../Type.zig");
|
||||
const Value = @import("../../../Value.zig");
|
||||
const CType = @import("../type.zig").CType;
|
||||
const Alignment = @import("../../../InternPool.zig").Alignment;
|
||||
|
||||
const fmtIdentSolo = @import("../../c.zig").fmtIdentSolo;
|
||||
|
|
@ -23,7 +23,6 @@ const Package = @import("../Package.zig");
|
|||
const Air = @import("../Air.zig");
|
||||
const Value = @import("../Value.zig");
|
||||
const Type = @import("../Type.zig");
|
||||
const DebugConstPool = link.DebugConstPool;
|
||||
const codegen = @import("../codegen.zig");
|
||||
const x86_64_abi = @import("x86_64/abi.zig");
|
||||
const wasm_c_abi = @import("wasm/abi.zig");
|
||||
|
|
@ -532,8 +531,8 @@ pub const Object = struct {
|
|||
debug_file_map: std.AutoHashMapUnmanaged(Zcu.File.Index, Builder.Metadata),
|
||||
|
||||
/// This pool *only* contains types (and does not contain `@as(type, undefined)`).
|
||||
debug_type_pool: DebugConstPool,
|
||||
/// Keyed on `DebugConstPool.Index`.
|
||||
debug_type_pool: link.ConstPool,
|
||||
/// Keyed on `link.ConstPool.Index`.
|
||||
debug_types: std.ArrayList(Builder.Metadata),
|
||||
/// Initially `.none`, set if the type `anyerror` is lowered to a debug type. The type will not
|
||||
/// actually be created until `emit`, which must resolve this reference with an appropriate enum
|
||||
|
|
@ -1622,10 +1621,7 @@ pub const Object = struct {
|
|||
}
|
||||
|
||||
fn flushPendingDebugTypes(o: *Object, pt: Zcu.PerThread) Allocator.Error!void {
|
||||
o.debug_type_pool.flushPending(pt, .{ .llvm = o }) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => unreachable, // TODO: stop self-hosted backends from returning all of this crap!
|
||||
};
|
||||
try o.debug_type_pool.flushPending(pt, .{ .llvm = o });
|
||||
}
|
||||
|
||||
pub fn updateExports(
|
||||
|
|
@ -1823,17 +1819,14 @@ pub const Object = struct {
|
|||
|
||||
pub fn updateContainerType(o: *Object, pt: Zcu.PerThread, ty: InternPool.Index, success: bool) Allocator.Error!void {
|
||||
if (!o.builder.strip) {
|
||||
o.debug_type_pool.updateContainerType(pt, .{ .llvm = o }, ty, success) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => unreachable, // TODO: stop self-hosted backends from returning all of this crap!
|
||||
};
|
||||
try o.debug_type_pool.updateContainerType(pt, .{ .llvm = o }, ty, success);
|
||||
}
|
||||
}
|
||||
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
///
|
||||
/// `val` is always a type because `o.debug_type_pool` only contains types.
|
||||
pub fn addConst(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
pub fn addConst(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.comp.gpa;
|
||||
assert(zcu.intern_pool.typeOf(val) == .type_type);
|
||||
|
|
@ -1846,10 +1839,10 @@ pub const Object = struct {
|
|||
o.debug_anyerror_fwd_ref = fwd_ref.toOptional();
|
||||
}
|
||||
}
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
///
|
||||
/// `val` is always a type because `o.debug_type_pool` only contains types.
|
||||
pub fn updateConstIncomplete(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
pub fn updateConstIncomplete(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
assert(pt.zcu.intern_pool.typeOf(val) == .type_type);
|
||||
const fwd_ref = o.debug_types.items[@intFromEnum(index)];
|
||||
assert(val != .anyerror_type);
|
||||
|
|
@ -1857,10 +1850,10 @@ pub const Object = struct {
|
|||
const debug_incomplete_type = try o.builder.debugSignedType(name_str, 0);
|
||||
o.builder.resolveDebugForwardReference(fwd_ref, debug_incomplete_type);
|
||||
}
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
///
|
||||
/// `val` is always a type because `o.debug_type_pool` only contains types.
|
||||
pub fn updateConst(o: *Object, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
pub fn updateConst(o: *Object, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
assert(pt.zcu.intern_pool.typeOf(val) == .type_type);
|
||||
const fwd_ref = o.debug_types.items[@intFromEnum(index)];
|
||||
if (val == .anyerror_type) {
|
||||
|
|
@ -1890,10 +1883,7 @@ pub const Object = struct {
|
|||
|
||||
fn getDebugType(o: *Object, pt: Zcu.PerThread, ty: Type) Allocator.Error!Builder.Metadata {
|
||||
assert(!o.builder.strip);
|
||||
const index = o.debug_type_pool.get(pt, .{ .llvm = o }, ty.toIntern()) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => unreachable, // TODO: stop self-hosted backends from returning all of this crap!
|
||||
};
|
||||
const index = try o.debug_type_pool.get(pt, .{ .llvm = o }, ty.toIntern());
|
||||
return o.debug_types.items[@intFromEnum(index)];
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ const codegen = @import("codegen.zig");
|
|||
pub const aarch64 = @import("link/aarch64.zig");
|
||||
pub const LdScript = @import("link/LdScript.zig");
|
||||
pub const Queue = @import("link/Queue.zig");
|
||||
pub const DebugConstPool = @import("link/DebugConstPool.zig");
|
||||
pub const ConstPool = @import("link/ConstPool.zig");
|
||||
|
||||
pub const Diags = struct {
|
||||
/// Stored here so that function definitions can distinguish between
|
||||
|
|
@ -804,7 +804,7 @@ pub const File = struct {
|
|||
switch (base.tag) {
|
||||
.lld => unreachable,
|
||||
else => {},
|
||||
inline .elf => |tag| {
|
||||
inline .elf, .c => |tag| {
|
||||
dev.check(tag.devFeature());
|
||||
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty, success);
|
||||
},
|
||||
|
|
|
|||
1909
src/link/C.zig
1909
src/link/C.zig
File diff suppressed because it is too large
Load diff
|
|
@ -9,14 +9,11 @@
|
|||
/// Indices into the pool are dense, and constants are never removed from the pool, so the debug
|
||||
/// info implementation can store information for each one with a simple `ArrayList`.
|
||||
///
|
||||
/// To use `DebugConstPool`, the debug info implementation is required to:
|
||||
/// * forward `updateContainerType` calls to its `DebugConstPool`
|
||||
/// * expose some callback functions---see functions in `DebugInfo`
|
||||
/// To use `ConstPool`, the debug info implementation is required to:
|
||||
/// * forward `updateContainerType` calls to its `ConstPool`
|
||||
/// * expose some callback functions---see functions in `User`
|
||||
/// * ensure that any `get` call is eventually followed by a `flushPending` call
|
||||
///
|
||||
/// TODO: everything in this file should have the error set 'Allocator.Error', but right now the
|
||||
/// self-hosted linkers can return all kinds of crap for some reason. This needs fixing.
|
||||
const DebugConstPool = @This();
|
||||
const ConstPool = @This();
|
||||
|
||||
values: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
|
||||
pending: std.ArrayList(Index),
|
||||
|
|
@ -24,7 +21,7 @@ complete_containers: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
|
|||
container_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, ContainerDepEntry.Index),
|
||||
container_dep_entries: std.ArrayList(ContainerDepEntry),
|
||||
|
||||
pub const empty: DebugConstPool = .{
|
||||
pub const empty: ConstPool = .{
|
||||
.values = .empty,
|
||||
.pending = .empty,
|
||||
.complete_containers = .empty,
|
||||
|
|
@ -32,7 +29,7 @@ pub const empty: DebugConstPool = .{
|
|||
.container_dep_entries = .empty,
|
||||
};
|
||||
|
||||
pub fn deinit(pool: *DebugConstPool, gpa: Allocator) void {
|
||||
pub fn deinit(pool: *ConstPool, gpa: Allocator) void {
|
||||
pool.values.deinit(gpa);
|
||||
pool.pending.deinit(gpa);
|
||||
pool.complete_containers.deinit(gpa);
|
||||
|
|
@ -42,13 +39,14 @@ pub fn deinit(pool: *DebugConstPool, gpa: Allocator) void {
|
|||
|
||||
pub const Index = enum(u32) {
|
||||
_,
|
||||
pub fn val(i: Index, pool: *const DebugConstPool) InternPool.Index {
|
||||
pub fn val(i: Index, pool: *const ConstPool) InternPool.Index {
|
||||
return pool.values.keys()[@intFromEnum(i)];
|
||||
}
|
||||
};
|
||||
|
||||
pub const DebugInfo = union(enum) {
|
||||
pub const User = union(enum) {
|
||||
dwarf: *@import("Dwarf.zig"),
|
||||
c: *@import("C.zig"),
|
||||
llvm: @import("../codegen/llvm.zig").Object.Ptr,
|
||||
|
||||
/// Inform the debug info implementation that the new constant `val` was added to the pool at
|
||||
|
|
@ -56,12 +54,12 @@ pub const DebugInfo = union(enum) {
|
|||
/// that there will eventually be a call to either `updateConst` or `updateConstIncomplete`
|
||||
/// following the `addConst` call, to actually populate the constant's debug info.
|
||||
fn addConst(
|
||||
di: DebugInfo,
|
||||
user: User,
|
||||
pt: Zcu.PerThread,
|
||||
index: Index,
|
||||
val: InternPool.Index,
|
||||
) !void {
|
||||
switch (di) {
|
||||
) Allocator.Error!void {
|
||||
switch (user) {
|
||||
inline else => |impl| return impl.addConst(pt, index, val),
|
||||
}
|
||||
}
|
||||
|
|
@ -71,12 +69,12 @@ pub const DebugInfo = union(enum) {
|
|||
/// * If it is a type, its layout is known.
|
||||
/// * Otherwise, the layout of its type is known.
|
||||
fn updateConst(
|
||||
di: DebugInfo,
|
||||
user: User,
|
||||
pt: Zcu.PerThread,
|
||||
index: Index,
|
||||
val: InternPool.Index,
|
||||
) !void {
|
||||
switch (di) {
|
||||
) Allocator.Error!void {
|
||||
switch (user) {
|
||||
inline else => |impl| return impl.updateConst(pt, index, val),
|
||||
}
|
||||
}
|
||||
|
|
@ -87,12 +85,12 @@ pub const DebugInfo = union(enum) {
|
|||
/// initialized so never had its layout resolved). Instead, the implementation must emit some
|
||||
/// form of placeholder entry representing an incomplete/unknown constant.
|
||||
fn updateConstIncomplete(
|
||||
di: DebugInfo,
|
||||
user: User,
|
||||
pt: Zcu.PerThread,
|
||||
index: Index,
|
||||
val: InternPool.Index,
|
||||
) !void {
|
||||
switch (di) {
|
||||
) Allocator.Error!void {
|
||||
switch (user) {
|
||||
inline else => |impl| return impl.updateConstIncomplete(pt, index, val),
|
||||
}
|
||||
}
|
||||
|
|
@ -100,7 +98,7 @@ pub const DebugInfo = union(enum) {
|
|||
|
||||
const ContainerDepEntry = extern struct {
|
||||
next: ContainerDepEntry.Index.Optional,
|
||||
depender: DebugConstPool.Index,
|
||||
depender: ConstPool.Index,
|
||||
const Index = enum(u32) {
|
||||
_,
|
||||
const Optional = enum(u32) {
|
||||
|
|
@ -116,7 +114,7 @@ const ContainerDepEntry = extern struct {
|
|||
fn toOptional(i: ContainerDepEntry.Index) Optional {
|
||||
return @enumFromInt(@intFromEnum(i));
|
||||
}
|
||||
fn ptr(i: ContainerDepEntry.Index, pool: *DebugConstPool) *ContainerDepEntry {
|
||||
fn ptr(i: ContainerDepEntry.Index, pool: *ConstPool) *ContainerDepEntry {
|
||||
return &pool.container_dep_entries.items[@intFromEnum(i)];
|
||||
}
|
||||
};
|
||||
|
|
@ -125,12 +123,12 @@ const ContainerDepEntry = extern struct {
|
|||
/// Calls to `link.File.updateContainerType` must be forwarded to this function so that the debug
|
||||
/// constant pool has up-to-date information about the resolution status of types.
|
||||
pub fn updateContainerType(
|
||||
pool: *DebugConstPool,
|
||||
pool: *ConstPool,
|
||||
pt: Zcu.PerThread,
|
||||
di: DebugInfo,
|
||||
user: User,
|
||||
container_ty: InternPool.Index,
|
||||
success: bool,
|
||||
) !void {
|
||||
) Allocator.Error!void {
|
||||
if (success) {
|
||||
const gpa = pt.zcu.comp.gpa;
|
||||
try pool.complete_containers.put(gpa, container_ty, {});
|
||||
|
|
@ -139,18 +137,18 @@ pub fn updateContainerType(
|
|||
}
|
||||
var opt_dep = pool.container_deps.get(container_ty);
|
||||
while (opt_dep) |dep| : (opt_dep = dep.ptr(pool).next.unwrap()) {
|
||||
try pool.update(pt, di, dep.ptr(pool).depender);
|
||||
try pool.update(pt, user, dep.ptr(pool).depender);
|
||||
}
|
||||
}
|
||||
|
||||
/// After this is called, there may be a constant for which debug information (complete or not) has
|
||||
/// not yet been emitted, so the user must call `flushPending` at some point after this call.
|
||||
pub fn get(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, val: InternPool.Index) !DebugConstPool.Index {
|
||||
pub fn get(pool: *ConstPool, pt: Zcu.PerThread, user: User, val: InternPool.Index) Allocator.Error!ConstPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const gpa = zcu.comp.gpa;
|
||||
const gop = try pool.values.getOrPut(gpa, val);
|
||||
const index: DebugConstPool.Index = @enumFromInt(gop.index);
|
||||
const index: ConstPool.Index = @enumFromInt(gop.index);
|
||||
if (!gop.found_existing) {
|
||||
const ty: Type = switch (ip.typeOf(val)) {
|
||||
.type_type => if (ip.isUndef(val)) .type else .fromInterned(val),
|
||||
|
|
@ -158,17 +156,17 @@ pub fn get(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, val: InternP
|
|||
};
|
||||
try pool.registerTypeDeps(index, ty, zcu);
|
||||
try pool.pending.append(gpa, index);
|
||||
try di.addConst(pt, index, val);
|
||||
try user.addConst(pt, index, val);
|
||||
}
|
||||
return index;
|
||||
}
|
||||
pub fn flushPending(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo) !void {
|
||||
pub fn flushPending(pool: *ConstPool, pt: Zcu.PerThread, user: User) Allocator.Error!void {
|
||||
while (pool.pending.pop()) |pending_ty| {
|
||||
try pool.update(pt, di, pending_ty);
|
||||
try pool.update(pt, user, pending_ty);
|
||||
}
|
||||
}
|
||||
|
||||
fn update(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, index: DebugConstPool.Index) !void {
|
||||
fn update(pool: *ConstPool, pt: Zcu.PerThread, user: User, index: ConstPool.Index) Allocator.Error!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const val = index.val(pool);
|
||||
|
|
@ -177,12 +175,12 @@ fn update(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, index: DebugC
|
|||
else => |ty| .fromInterned(ty),
|
||||
};
|
||||
if (pool.checkType(ty, zcu)) {
|
||||
try di.updateConst(pt, index, val);
|
||||
try user.updateConst(pt, index, val);
|
||||
} else {
|
||||
try di.updateConstIncomplete(pt, index, val);
|
||||
try user.updateConstIncomplete(pt, index, val);
|
||||
}
|
||||
}
|
||||
fn checkType(pool: *const DebugConstPool, ty: Type, zcu: *const Zcu) bool {
|
||||
fn checkType(pool: *const ConstPool, ty: Type, zcu: *const Zcu) bool {
|
||||
if (ty.isGenericPoison()) return true;
|
||||
return switch (ty.zigTypeTag(zcu)) {
|
||||
.type,
|
||||
|
|
@ -227,7 +225,7 @@ fn checkType(pool: *const DebugConstPool, ty: Type, zcu: *const Zcu) bool {
|
|||
},
|
||||
};
|
||||
}
|
||||
fn registerTypeDeps(pool: *DebugConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void {
|
||||
fn registerTypeDeps(pool: *ConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void {
|
||||
if (ty.isGenericPoison()) return;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.type,
|
||||
|
|
@ -18,7 +18,6 @@ const codegen = @import("../codegen.zig");
|
|||
const dev = @import("../dev.zig");
|
||||
const link = @import("../link.zig");
|
||||
const target_info = @import("../target.zig");
|
||||
const DebugConstPool = link.DebugConstPool;
|
||||
|
||||
gpa: Allocator,
|
||||
bin_file: *link.File,
|
||||
|
|
@ -26,10 +25,10 @@ format: DW.Format,
|
|||
endian: std.builtin.Endian,
|
||||
address_size: AddressSize,
|
||||
|
||||
const_pool: DebugConstPool,
|
||||
const_pool: link.ConstPool,
|
||||
|
||||
mods: std.AutoArrayHashMapUnmanaged(*Module, ModInfo),
|
||||
/// Indices are `DebugConstPool.Index`.
|
||||
/// Indices are `link.ConstPool.Index`.
|
||||
values: std.ArrayList(struct { Unit.Index, Entry.Index }),
|
||||
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Entry.Index),
|
||||
decls: std.AutoArrayHashMapUnmanaged(InternPool.TrackedInst.Index, Entry.Index),
|
||||
|
|
@ -1038,7 +1037,7 @@ const Entry = struct {
|
|||
const zcu = dwarf.bin_file.comp.zcu.?;
|
||||
const ip = &zcu.intern_pool;
|
||||
for (0.., dwarf.values.items) |raw_index, unit_and_entry| {
|
||||
const index: DebugConstPool.Index = @enumFromInt(raw_index);
|
||||
const index: link.ConstPool.Index = @enumFromInt(raw_index);
|
||||
const val = index.val(&dwarf.const_pool);
|
||||
const val_unit, const val_entry = unit_and_entry;
|
||||
if (sec.getUnit(val_unit) == unit and unit.getEntry(val_entry) == entry)
|
||||
|
|
@ -3291,8 +3290,14 @@ pub fn updateContainerType(
|
|||
) !void {
|
||||
try dwarf.const_pool.updateContainerType(pt, .{ .dwarf = dwarf }, ty, success);
|
||||
}
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: DebugConstPool.Index, val: InternPool.Index) !void {
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) Allocator.Error!void {
|
||||
addConstInner(dwarf, pt, index, val) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => |e| std.debug.panic("DWARF TODO: '{t}' while registering constant\n", .{e}),
|
||||
};
|
||||
}
|
||||
fn addConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, index: link.ConstPool.Index, val: InternPool.Index) !void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
|
|
@ -3321,11 +3326,17 @@ pub fn addConst(dwarf: *Dwarf, pt: Zcu.PerThread, index: DebugConstPool.Index, v
|
|||
assert(@intFromEnum(index) == dwarf.values.items.len);
|
||||
try dwarf.values.append(dwarf.gpa, .{ unit, entry });
|
||||
}
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
///
|
||||
/// Emits a "dummy" DIE for the given comptime-only value (which may be a type). For types, this is
|
||||
/// an opaque type. Otherwise, it is an undefined value of the value's type.
|
||||
pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: DebugConstPool.Index, value_index: InternPool.Index) !void {
|
||||
pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) Allocator.Error!void {
|
||||
updateConstIncompleteInner(dwarf, pt, debug_const_index, value_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => |e| std.debug.panic("DWARF TODO: '{t}' while updating incomplete constant\n", .{e}),
|
||||
};
|
||||
}
|
||||
fn updateConstIncompleteInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) !void {
|
||||
const zcu = pt.zcu;
|
||||
|
||||
const val: Value = .fromInterned(value_index);
|
||||
|
|
@ -3380,10 +3391,16 @@ pub fn updateConstIncomplete(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index
|
|||
try dwarf.debug_info.section.replaceEntry(unit, entry, dwarf, wip_nav.debug_info.written());
|
||||
try dwarf.debug_loclists.section.replaceEntry(unit, entry, dwarf, wip_nav.debug_loclists.written());
|
||||
}
|
||||
/// Should only be called by the `DebugConstPool` implementation.
|
||||
/// Should only be called by the `link.ConstPool` implementation.
|
||||
///
|
||||
/// Emits a DIE for the given comptime-only value (which may be a type).
|
||||
pub fn updateConst(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: DebugConstPool.Index, value_index: InternPool.Index) !void {
|
||||
pub fn updateConst(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) Allocator.Error!void {
|
||||
updateConstInner(dwarf, pt, debug_const_index, value_index) catch |err| switch (err) {
|
||||
error.OutOfMemory => |e| return e,
|
||||
else => |e| std.debug.panic("DWARF TODO: '{t}' while updating constant\n", .{e}),
|
||||
};
|
||||
}
|
||||
fn updateConstInner(dwarf: *Dwarf, pt: Zcu.PerThread, debug_const_index: link.ConstPool.Index, value_index: InternPool.Index) !void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
|
|
|
|||
|
|
@ -1716,19 +1716,8 @@ pub fn updateContainerType(
|
|||
if (build_options.skip_non_native and builtin.object_format != .elf) {
|
||||
@panic("Attempted to compile for object format that was disabled by build configuration");
|
||||
}
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
return self.zigObjectPtr().?.updateContainerType(pt, ty, success) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => |e| {
|
||||
try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create(
|
||||
gpa,
|
||||
zcu.typeSrcLoc(ty),
|
||||
"failed to update container type: {s}",
|
||||
.{@errorName(e)},
|
||||
));
|
||||
return error.TypeFailureReported;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue