compiler: make Dwarf and self-hosted x86_64 happy

Introduces a small abstraction, `link.DebugConstPool`, to deal with
lowering type/value information into debug info when it may not be known
until type resolution (which in some cases will *never* happen). It is
currently only used by self-hosted DWARF logic, but it will also be of
use to the LLVM backend (which is my next focus).
This commit is contained in:
Matthew Lugg 2026-02-11 15:05:33 +00:00
parent 44f6de0fb5
commit 6b185e94b9
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
8 changed files with 1029 additions and 1464 deletions

View file

@ -956,8 +956,7 @@ pub const RcSourceFile = struct {
const Job = union(enum) {
/// Given the generated AIR for a function, put it onto the code generation queue.
/// This `Job` exists (instead of the `link.ZcuTask` being directly queued) to ensure that
/// all types are resolved before the linker task is queued.
/// MLUGG TODO: because type resolution is no longer necessary, we can remove this now
/// If the backend does not support `Zcu.Feature.separate_thread`, codegen and linking happen immediately.
/// Before queueing this `Job`, increase the estimated total item count for both
/// `comp.zcu.?.codegen_prog_node` and `comp.link_prog_node`.
@ -967,17 +966,10 @@ const Job = union(enum) {
air: Air,
},
/// Queue a `link.ZcuTask` to emit this non-function `Nav` into the output binary.
/// This `Job` exists (instead of the `link.ZcuTask` being directly queued) to ensure that
/// all types are resolved before the linker task is queued.
/// MLUGG TODO: because type resolution is no longer necessary, we can remove this now
/// If the backend does not support `Zcu.Feature.separate_thread`, the task is run immediately.
/// Before queueing this `Job`, increase the estimated total item count for `comp.link_prog_node`.
link_nav: InternPool.Nav.Index,
/// Queue a `link.ZcuTask` to emit debug information for this container type.
/// This `Job` exists (instead of the `link.ZcuTask` being directly queued) to ensure that
/// all types are resolved before the linker task is queued.
/// If the backend does not support `Zcu.Feature.separate_thread`, the task is run immediately.
/// Before queueing this `Job`, increase the estimated total item count for `comp.link_prog_node`.
link_type: InternPool.Index,
/// Before queueing this `Job`, increase the estimated total item count for `comp.link_prog_node`.
update_line_number: InternPool.TrackedInst.Index,
/// The `AnalUnit`, which is *not* a `func`, must be semantically analyzed.
@ -5058,26 +5050,6 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
var owned_air: ?Air = func.air;
defer if (owned_air) |*air| air.deinit(gpa);
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveAirTypesForCodegen(&owned_air.?) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this function impossible. This
// is a transitive failure, but it doesn't need recording, because this
// function semantically depends on the failed type, so when it is changed
// the function will be updated.
zcu.codegen_prog_node.completeOne();
comp.link_prog_node.completeOne();
return;
},
};
}
// Some linkers need to refer to the AIR. In that case, the linker is not running
// concurrently, so we'll just keep ownership of the AIR for ourselves instead of
// letting the codegen job destroy it.
@ -5101,51 +5073,10 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
}
}
assert(nav.status == .fully_resolved);
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveValueTypesForCodegen(zcu.navValue(nav_index)) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this `Nav` impossible. This is
// a transitive failure, but it doesn't need recording, because this `Nav`
// semantically depends on the failed type, so when it is changed the value
// of the `Nav` will be updated.
comp.link_prog_node.completeOne();
return;
},
};
}
try comp.link_queue.enqueueZcu(comp, tid, .{ .link_nav = nav_index });
},
.link_type => |ty| {
const zcu = comp.zcu.?;
if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(zcu.gpa);
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveTypeForCodegen(.fromInterned(ty)) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this type impossible. This is
// a transitive failure, but it doesn't need recording, because this type
// semantically depends on the failed type, so when it is changed the type
// will be updated appropriately.
comp.link_prog_node.completeOne();
return;
},
};
}
try comp.link_queue.enqueueZcu(comp, tid, .{ .link_type = ty });
},
.update_line_number => |tracked_inst| {
try comp.link_queue.enqueueZcu(comp, tid, .{ .update_line_number = tracked_inst });
try comp.link_queue.enqueueZcu(comp, tid, .{ .debug_update_line_number = tracked_inst });
},
.analyze_unit => |unit| {
const tracy_trace = traceNamed(@src(), "analyze_unit");

View file

@ -998,10 +998,10 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
try sema.flushExports();
}
/// Ensures that the layout of the given `struct` or `union` type is fully up-to-date, performing
/// re-analysis if necessary. Asserts that `ty` is a struct (not a tuple!) or union. Returns
/// `error.AnalysisFail` if an analysis error is encountered during type resolution; the caller is
/// free to ignore this, since the error is already registered.
/// Ensures that the layout of the given `struct`, `union`, or `enum` type is fully up-to-date,
/// performing re-analysis if necessary. Asserts that `ty` is a struct (not a tuple!), union, or
/// enum type. Returns `error.AnalysisFail` if an analysis error is encountered during type
/// resolution; the caller is free to ignore this, since the error is already registered.
pub fn ensureTypeLayoutUpToDate(
pt: Zcu.PerThread,
ty: Type,
@ -1012,7 +1012,8 @@ pub fn ensureTypeLayoutUpToDate(
defer tracy.end();
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = zcu.comp;
const gpa = comp.gpa;
const anal_unit: AnalUnit = .wrap(.{ .type_layout = ty.toIntern() });
@ -1021,7 +1022,7 @@ pub fn ensureTypeLayoutUpToDate(
assert(!zcu.analysis_in_progress.contains(anal_unit));
const was_outdated = zcu.clearOutdatedState(anal_unit) or
zcu.intern_pool.setWantTypeLayout(zcu.comp.io, ty.toIntern());
zcu.intern_pool.setWantTypeLayout(comp.io, ty.toIntern());
if (was_outdated) {
// `was_outdated` is true in the initial update, so this isn't a `dev.check`.
@ -1038,7 +1039,7 @@ pub fn ensureTypeLayoutUpToDate(
return;
}
if (zcu.comp.debugIncremental()) {
if (comp.debugIncremental()) {
const info = try zcu.incremental_debug_state.getUnitInfo(gpa, anal_unit);
info.last_update_gen = zcu.generation;
info.deps.clearRetainingCapacity();
@ -1078,15 +1079,17 @@ pub fn ensureTypeLayoutUpToDate(
.@"union" => Sema.type_resolution.resolveUnionLayout(&sema, ty),
else => unreachable,
};
result catch |err| switch (err) {
error.AnalysisFail => {
const new_success: bool = if (result) s: {
break :s true;
} else |err| switch (err) {
error.AnalysisFail => success: {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this unit caused the error, it would have an entry in `failed_analysis`.
// Since it does not, this must be a transitive failure.
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
log.debug("mark transitive analysis failure for {f}", .{zcu.fmtAnalUnit(anal_unit)});
}
return error.AnalysisFail;
break :success false;
},
error.OutOfMemory,
error.Canceled,
@ -1098,6 +1101,15 @@ pub fn ensureTypeLayoutUpToDate(
sema.flushExports() catch |err| switch (err) {
error.OutOfMemory => |e| return e,
};
// We don't need to `markDependeeOutdated`/`markPoDependeeUpToDate` here, because we already
// marked the layout as outdated at the top of this function. However, we do need to tell the
// debug info logic in the backend about this type.
comp.link_prog_node.increaseEstimatedTotalItems(1);
try comp.link_queue.enqueueZcu(comp, pt.tid, .{ .debug_update_container_type = .{
.ty = ty.toIntern(),
.success = new_success,
} });
}
/// Ensures that the resolved value of the given `Nav` is fully up-to-date, performing re-analysis
@ -4102,463 +4114,3 @@ fn printVerboseAir(
try air.write(w, pt, liveness);
try w.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)});
}
// MLUGG TODO: these functions are all blatant hacks. See if I can remove them!
pub fn resolveTypeForCodegen(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
if (ty.isGenericPoison()) return;
switch (ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
.noreturn,
.int,
.float,
.error_set,
.@"opaque",
.comptime_float,
.comptime_int,
.undefined,
.null,
.enum_literal,
=> {},
.frame, .@"anyframe" => @panic("TODO resolveTypeForCodegen async frames"),
.optional => try pt.resolveTypeForCodegen(ty.childType(zcu)),
.error_union => try pt.resolveTypeForCodegen(ty.errorUnionPayload(zcu)),
.pointer => try pt.resolveTypeForCodegen(ty.childType(zcu)),
.array => try pt.resolveTypeForCodegen(ty.childType(zcu)),
.vector => try pt.resolveTypeForCodegen(ty.childType(zcu)),
.@"fn" => {
const info = zcu.typeToFunc(ty).?;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
try pt.resolveTypeForCodegen(.fromInterned(param_ty));
}
try pt.resolveTypeForCodegen(.fromInterned(info.return_type));
},
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.struct_type => try pt.ensureTypeLayoutUpToDate(ty, null),
.tuple_type => |tuple| for (0..tuple.types.len) |i| {
const field_is_comptime = tuple.values.get(ip)[i] != .none;
if (field_is_comptime) continue;
const field_ty = tuple.types.get(ip)[i];
try pt.resolveTypeForCodegen(.fromInterned(field_ty));
},
else => unreachable,
},
.@"union" => try pt.ensureTypeLayoutUpToDate(ty, null),
.@"enum" => try pt.ensureTypeLayoutUpToDate(ty, null),
}
}
pub fn resolveValueTypesForCodegen(pt: Zcu.PerThread, val: Value) Zcu.SemaError!void {
const zcu = pt.zcu;
const ty: Type = switch (val.typeOf(zcu).toIntern()) {
.type_type => if (val.isUndef(zcu)) {
return;
} else val.toType(),
else => |ty| .fromInterned(ty),
};
return pt.resolveTypeForCodegen(ty);
}
pub fn resolveAirTypesForCodegen(pt: Zcu.PerThread, air: *const Air) Zcu.SemaError!void {
return pt.resolveBodyTypesForCodegen(air, air.getMainBody());
}
fn resolveBodyTypesForCodegen(pt: Zcu.PerThread, air: *const Air, body: []const Air.Inst.Index) Zcu.SemaError!void {
const zcu = pt.zcu;
const tags = air.instructions.items(.tag);
const datas = air.instructions.items(.data);
for (body) |inst| {
const data = datas[@intFromEnum(inst)];
switch (tags[@intFromEnum(inst)]) {
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.arg => try pt.resolveTypeForCodegen(data.arg.ty.toType()),
.add,
.add_safe,
.add_optimized,
.add_wrap,
.add_sat,
.sub,
.sub_safe,
.sub_optimized,
.sub_wrap,
.sub_sat,
.mul,
.mul_safe,
.mul_optimized,
.mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.max,
.min,
.bit_and,
.bit_or,
.shr,
.shr_exact,
.shl,
.shl_exact,
.shl_sat,
.xor,
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
.bool_and,
.bool_or,
.store,
.store_safe,
.set_union_tag,
.array_elem_val,
.slice_elem_val,
.ptr_elem_val,
.memset,
.memset_safe,
.memcpy,
.memmove,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.legalize_vec_elem_val,
=> {
try pt.resolveRefTypesForCodegen(data.bin_op.lhs);
try pt.resolveRefTypesForCodegen(data.bin_op.rhs);
},
.not,
.bitcast,
.clz,
.ctz,
.popcount,
.byte_swap,
.bit_reverse,
.abs,
.load,
.fptrunc,
.fpext,
.intcast,
.intcast_safe,
.trunc,
.optional_payload,
.optional_payload_ptr,
.optional_payload_ptr_set,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.get_union_tag,
.slice_len,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.array_to_slice,
.int_from_float,
.int_from_float_optimized,
.int_from_float_safe,
.int_from_float_optimized_safe,
.float_from_int,
.splat,
.error_set_has_value,
.addrspace_cast,
.c_va_arg,
.c_va_copy,
=> {
try pt.resolveTypeForCodegen(data.ty_op.ty.toType());
try pt.resolveRefTypesForCodegen(data.ty_op.operand);
},
.alloc,
.ret_ptr,
.c_va_start,
=> try pt.resolveTypeForCodegen(data.ty),
.ptr_add,
.ptr_sub,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.slice,
.slice_elem_ptr,
.ptr_elem_ptr,
=> {
const bin = air.extraData(Air.Bin, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(bin.lhs);
try pt.resolveRefTypesForCodegen(bin.rhs);
},
.block,
.loop,
=> {
const block = air.unwrapBlock(inst);
try pt.resolveTypeForCodegen(block.ty);
try pt.resolveBodyTypesForCodegen(air, block.body);
},
.dbg_inline_block => {
const block = air.unwrapDbgBlock(inst);
try pt.resolveTypeForCodegen(block.ty);
try pt.resolveBodyTypesForCodegen(air, block.body);
},
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.floor,
.ceil,
.round,
.trunc_float,
.neg,
.neg_optimized,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.ret,
.ret_safe,
.ret_load,
.is_named_enum_value,
.tag_name,
.error_name,
.cmp_lt_errors_len,
.c_va_end,
.set_err_return_trace,
=> try pt.resolveRefTypesForCodegen(data.un_op),
.br, .switch_dispatch => try pt.resolveRefTypesForCodegen(data.br.operand),
.cmp_vector,
.cmp_vector_optimized,
=> {
const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(extra.lhs);
try pt.resolveRefTypesForCodegen(extra.rhs);
},
.reduce,
.reduce_optimized,
=> try pt.resolveRefTypesForCodegen(data.reduce.operand),
.struct_field_ptr,
.struct_field_val,
=> {
const extra = air.extraData(Air.StructField, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(extra.struct_operand);
},
.shuffle_one => {
const unwrapped = air.unwrapShuffleOne(zcu, inst);
try pt.resolveTypeForCodegen(unwrapped.result_ty);
try pt.resolveRefTypesForCodegen(unwrapped.operand);
for (unwrapped.mask) |m| switch (m.unwrap()) {
.elem => {},
.value => |val| try pt.resolveValueTypesForCodegen(.fromInterned(val)),
};
},
.shuffle_two => {
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
try pt.resolveTypeForCodegen(unwrapped.result_ty);
try pt.resolveRefTypesForCodegen(unwrapped.operand_a);
try pt.resolveRefTypesForCodegen(unwrapped.operand_b);
// No values to check because there are no comptime-known values other than undef
},
.cmpxchg_weak,
.cmpxchg_strong,
=> {
const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(extra.ptr);
try pt.resolveRefTypesForCodegen(extra.expected_value);
try pt.resolveRefTypesForCodegen(extra.new_value);
},
.aggregate_init => {
const ty = data.ty_pl.ty.toType();
const elems_len: usize = @intCast(ty.arrayLen(zcu));
const elems: []const Air.Inst.Ref = @ptrCast(air.extra.items[data.ty_pl.payload..][0..elems_len]);
try pt.resolveTypeForCodegen(ty);
if (ty.zigTypeTag(zcu) == .@"struct") {
for (elems, 0..) |elem, elem_idx| {
if (ty.structFieldIsComptime(elem_idx, zcu)) continue;
try pt.resolveRefTypesForCodegen(elem);
}
} else {
for (elems) |elem| {
try pt.resolveRefTypesForCodegen(elem);
}
}
},
.union_init => {
const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(extra.init);
},
.field_parent_ptr => {
const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data;
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
try pt.resolveRefTypesForCodegen(extra.field_ptr);
},
.atomic_load => try pt.resolveRefTypesForCodegen(data.atomic_load.ptr),
.prefetch => try pt.resolveRefTypesForCodegen(data.prefetch.ptr),
.runtime_nav_ptr => try pt.resolveTypeForCodegen(.fromInterned(data.ty_nav.ty)),
.select,
.mul_add,
.legalize_vec_store_elem,
=> {
const bin = air.extraData(Air.Bin, data.pl_op.payload).data;
try pt.resolveRefTypesForCodegen(data.pl_op.operand);
try pt.resolveRefTypesForCodegen(bin.lhs);
try pt.resolveRefTypesForCodegen(bin.rhs);
},
.atomic_rmw => {
const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data;
try pt.resolveRefTypesForCodegen(data.pl_op.operand);
try pt.resolveRefTypesForCodegen(extra.operand);
},
.call,
.call_always_tail,
.call_never_tail,
.call_never_inline,
=> {
const call = air.unwrapCall(inst);
try pt.resolveRefTypesForCodegen(call.callee);
for (call.args) |arg| try pt.resolveRefTypesForCodegen(arg);
},
.dbg_var_ptr,
.dbg_var_val,
.dbg_arg_inline,
=> try pt.resolveRefTypesForCodegen(data.pl_op.operand),
.@"try", .try_cold => {
const @"try" = air.unwrapTry(inst);
try pt.resolveRefTypesForCodegen(@"try".error_union);
try pt.resolveBodyTypesForCodegen(air, @"try".else_body);
},
.try_ptr, .try_ptr_cold => {
const try_ptr = air.unwrapTryPtr(inst);
try pt.resolveTypeForCodegen(try_ptr.error_union_payload_ptr_ty.toType());
try pt.resolveRefTypesForCodegen(try_ptr.error_union_ptr);
try pt.resolveBodyTypesForCodegen(air, try_ptr.else_body);
},
.cond_br => {
const cond_br = air.unwrapCondBr(inst);
try pt.resolveRefTypesForCodegen(cond_br.condition);
try pt.resolveBodyTypesForCodegen(air, cond_br.then_body);
try pt.resolveBodyTypesForCodegen(air, cond_br.else_body);
},
.switch_br, .loop_switch_br => {
const switch_br = air.unwrapSwitch(inst);
try pt.resolveRefTypesForCodegen(switch_br.operand);
var it = switch_br.iterateCases();
while (it.next()) |case| {
for (case.items) |item| {
try pt.resolveRefTypesForCodegen(item);
}
for (case.ranges) |range| {
try pt.resolveRefTypesForCodegen(range[0]);
try pt.resolveRefTypesForCodegen(range[1]);
}
try pt.resolveBodyTypesForCodegen(air, case.body);
}
try pt.resolveBodyTypesForCodegen(air, it.elseBody());
},
.assembly => {
const @"asm" = air.unwrapAsm(inst);
try pt.resolveTypeForCodegen(data.ty_pl.ty.toType());
for (@"asm".outputs) |output| if (output != .none) try pt.resolveRefTypesForCodegen(output);
for (@"asm".inputs) |input| if (input != .none) try pt.resolveRefTypesForCodegen(input);
},
.legalize_compiler_rt_call => {
const compiler_rt_call = air.unwrapCompilerRtCall(inst);
for (compiler_rt_call.args) |arg| try pt.resolveRefTypesForCodegen(arg);
},
.trap,
.breakpoint,
.ret_addr,
.frame_addr,
.unreach,
.wasm_memory_size,
.wasm_memory_grow,
.work_item_id,
.work_group_size,
.work_group_id,
.dbg_stmt,
.dbg_empty_stmt,
.err_return_trace,
.save_err_return_trace_index,
.repeat,
=> {},
}
}
}
fn resolveRefTypesForCodegen(pt: Zcu.PerThread, ref: Air.Inst.Ref) Zcu.SemaError!void {
const ip_index = ref.toInterned() orelse {
// `ref` refers to a prior instruction, which we already did the resolution for.
return;
};
return pt.resolveValueTypesForCodegen(.fromInterned(ip_index));
}

View file

@ -700,7 +700,14 @@ fn lowerPtr(
};
return lowerPtr(bin_file, pt, src_loc, field.base, w, reloc_parent, offset + field_off);
},
.arr_elem, .comptime_field, .comptime_alloc => unreachable,
.arr_elem => |arr_elem| {
const base_ptr_ty = Value.fromInterned(arr_elem.base).typeOf(zcu);
assert(base_ptr_ty.ptrSize(zcu) == .many);
const elem_size = base_ptr_ty.childType(zcu).abiSize(zcu);
return lowerPtr(bin_file, pt, src_loc, arr_elem.base, w, reloc_parent, offset + elem_size * arr_elem.index);
},
.comptime_alloc => unreachable,
.comptime_field => unreachable,
};
}
@ -781,9 +788,8 @@ fn lowerNavRef(
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = lf.comp.config.output_mode == .Obj;
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
const is_fn_body = nav_ty.zigTypeTag(zcu) == .@"fn";
if (!is_fn_body and !nav_ty.hasRuntimeBits(zcu)) {
if (!nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) and ip.getNav(nav_index).getExtern(ip) == null) {
try w.splatByteAll(0xaa, ptr_width_bytes);
return;
}
@ -795,7 +801,7 @@ fn lowerNavRef(
dev.check(link.File.Tag.wasm.devFeature());
const wasm = lf.cast(.wasm).?;
assert(reloc_parent == .none);
if (is_fn_body) {
if (nav_ty.zigTypeTag(zcu) == .@"fn") {
const gop = try wasm.zcu_indirect_function_set.getOrPut(gpa, nav_index);
if (!gop.found_existing) gop.value_ptr.* = {};
if (is_obj) {
@ -1025,23 +1031,26 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
.pointer => switch (ty.ptrSize(zcu)) {
.slice => {},
.one, .many, .c => {
const elem_ty = ty.childType(zcu);
const ptr = ip.indexToKey(val.toIntern()).ptr;
if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset };
if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.int => unreachable, // handled above
.nav => |nav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_nav = nav };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = zcu.navAlignment(nav);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
.nav => |nav_index| {
const nav = ip.getNav(nav_index);
const nav_ty: Type = .fromInterned(nav.typeOf(ip));
if (nav_ty.isRuntimeFnOrHasRuntimeBits(zcu) or nav.getExtern(ip) != null) {
return .{ .lea_nav = nav_index };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = zcu.navAlignment(nav_index);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
}
},
.uav => |uav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
.uav => |uav| if (Value.fromInterned(uav.val).typeOf(zcu).isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_uav = uav };
} else {
// Create the 0xaa bit pattern...

View file

@ -798,14 +798,27 @@ pub const File = struct {
};
/// Never called when LLVM is codegenning the ZCU.
fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void {
fn updateContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index, success: bool) UpdateContainerTypeError!void {
assert(base.comp.zcu.?.llvm_object == null);
switch (base.tag) {
.lld => unreachable,
else => {},
inline .elf => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty);
return @as(*tag.Type(), @fieldParentPtr("base", base)).updateContainerType(pt, ty, success);
},
}
}
/// Never called when LLVM is codegenning the ZCU.
fn clearContainerType(base: *File, pt: Zcu.PerThread, ty: InternPool.Index) UpdateContainerTypeError!void {
assert(base.comp.zcu.?.llvm_object == null);
switch (base.tag) {
.lld => unreachable,
else => {},
inline .elf => |tag| {
dev.check(tag.devFeature());
return @as(*tag.Type(), @fieldParentPtr("base", base)).clearContainerType(pt, ty);
},
}
}
@ -1375,8 +1388,14 @@ pub const ZcuTask = union(enum) {
link_nav: InternPool.Nav.Index,
/// Write the machine code for a function to the output file.
link_func: Zcu.CodegenTaskPool.Index,
link_type: InternPool.Index,
update_line_number: InternPool.TrackedInst.Index,
/// This struct/union/enum type has finished type resolution (successfully or otherwise), so the
/// linker can now lower debug information for this type (and any structural types which depend
/// on it, such as `?T`, `struct { T }`, `[2]T`, etc).
debug_update_container_type: struct {
ty: InternPool.Index,
success: bool,
},
debug_update_line_number: InternPool.TrackedInst.Index,
};
pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
@ -1563,21 +1582,24 @@ pub fn doZcuTask(comp: *Compilation, tid: Zcu.PerThread.Id, task: ZcuTask) void
}
break :nav ip.indexToKey(func).func.owner_nav;
},
.link_type => |ty| nav: {
const name = Type.fromInterned(ty).containerTypeName(ip).toSlice(ip);
const nav_prog_node = comp.link_prog_node.start(name, 0);
defer nav_prog_node.end();
if (zcu.llvm_object == null) {
.debug_update_container_type => |container_update| nav: {
const name = Type.fromInterned(container_update.ty).containerTypeName(ip).toSlice(ip);
const ty_prog_node = comp.link_prog_node.start(name, 0);
defer ty_prog_node.end();
if (zcu.llvm_object) |llvm_object| {
_ = llvm_object;
@compileError("MLUGG TODO");
} else {
if (comp.bin_file) |lf| {
lf.updateContainerType(pt, ty) catch |err| switch (err) {
lf.updateContainerType(pt, container_update.ty, container_update.success) catch |err| switch (err) {
error.OutOfMemory => diags.setAllocFailure(),
error.TypeFailureReported => assert(zcu.failed_types.contains(ty)),
error.TypeFailureReported => assert(zcu.failed_types.contains(container_update.ty)),
};
}
}
break :nav null;
},
.update_line_number => |ti| nav: {
.debug_update_line_number => |ti| nav: {
const nav_prog_node = comp.link_prog_node.start("Update line number", 0);
defer nav_prog_node.end();
if (pt.zcu.llvm_object == null) {

287
src/link/DebugConstPool.zig Normal file
View file

@ -0,0 +1,287 @@
/// Helper type for debug information implementations (such as `link.Dwarf`) to help them emit
/// information about comptime-known values (constants), including types.
///
/// Every constant with associated debug information is assigned an `Index` by calling `get`. The
/// pool will track which container types do and do not have a resolved layout, as well as which
/// constants in the pool depend on which types, and call into the implementation to emit debug
/// information for a constant only when all information is available.
///
/// Indices into the pool are dense, and constants are never removed from the pool, so the debug
/// info implementation can store information for each one with a simple `ArrayList`.
///
/// To use `DebugConstPool`, the debug info implementation is required to:
/// * forward `updateContainerType` calls to its `DebugConstPool`
/// * expose some callback functions---see functions in `DebugInfo`
/// * ensure that any `get` call is eventually followed by a `flushPending` call
const DebugConstPool = @This();
values: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
pending: std.ArrayList(Index),
complete_containers: std.AutoArrayHashMapUnmanaged(InternPool.Index, void),
container_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, ContainerDepEntry.Index),
container_dep_entries: std.ArrayList(ContainerDepEntry),
pub const empty: DebugConstPool = .{
.values = .empty,
.pending = .empty,
.complete_containers = .empty,
.container_deps = .empty,
.container_dep_entries = .empty,
};
pub fn deinit(pool: *DebugConstPool, gpa: Allocator) void {
pool.values.deinit(gpa);
pool.pending.deinit(gpa);
pool.complete_containers.deinit(gpa);
pool.container_deps.deinit(gpa);
pool.container_dep_entries.deinit(gpa);
}
pub const Index = enum(u32) {
_,
pub fn val(i: Index, pool: *const DebugConstPool) InternPool.Index {
return pool.values.keys()[@intFromEnum(i)];
}
};
pub const DebugInfo = union(enum) {
dwarf: *@import("Dwarf.zig"),
llvm: @import("../codegen/llvm.zig").Object.Ptr,
/// Inform the debug info implementation that the new constant `val` was added to the pool at
/// the given index (which equals the current pool length) due to a `get` call. It is guaranteed
/// that there will eventually be a call to either `updateConst` or `updateConstIncomplete`
/// following the `addConst` call, to actually populate the constant's debug info.
fn addConst(
di: DebugInfo,
pt: Zcu.PerThread,
index: Index,
val: InternPool.Index,
) !void {
switch (di) {
inline else => |impl| return impl.addConst(pt, index, val),
}
}
/// Tell the debug info implementation to emit information for the constant `val`, which is in
/// the pool at the given index. `val` is "complete", which means:
/// * If it is a type, its layout is known.
/// * Otherwise, the layout of its type is known.
fn updateConst(
di: DebugInfo,
pt: Zcu.PerThread,
index: Index,
val: InternPool.Index,
) !void {
switch (di) {
inline else => |impl| return impl.updateConst(pt, index, val),
}
}
/// Tell the debug info implementation to emit information for the constant `val`, which is in
/// the pool at the given index. `val` is "incomplete", meaning the implementation cannot emit
/// full information for it (for instance, perhaps it is a struct type which was never actually
/// initialized so never had its layout resolved). Instead, the implementation must emit some
/// form of placeholder entry representing an incomplete/unknown constant.
fn updateConstIncomplete(
di: DebugInfo,
pt: Zcu.PerThread,
index: Index,
val: InternPool.Index,
) !void {
switch (di) {
inline else => |impl| return impl.updateConstIncomplete(pt, index, val),
}
}
};
const ContainerDepEntry = extern struct {
next: ContainerDepEntry.Index.Optional,
depender: DebugConstPool.Index,
const Index = enum(u32) {
_,
const Optional = enum(u32) {
none = std.math.maxInt(u32),
_,
fn unwrap(o: Optional) ?ContainerDepEntry.Index {
return switch (o) {
.none => null,
else => @enumFromInt(@intFromEnum(o)),
};
}
};
fn toOptional(i: ContainerDepEntry.Index) Optional {
return @enumFromInt(@intFromEnum(i));
}
fn ptr(i: ContainerDepEntry.Index, pool: *DebugConstPool) *ContainerDepEntry {
return &pool.container_dep_entries.items[@intFromEnum(i)];
}
};
};
/// Calls to `link.File.updateContainerType` must be forwarded to this function so that the debug
/// constant pool has up-to-date information about the resolution status of types.
pub fn updateContainerType(
pool: *DebugConstPool,
pt: Zcu.PerThread,
di: DebugInfo,
container_ty: InternPool.Index,
success: bool,
) !void {
if (success) {
const gpa = pt.zcu.comp.gpa;
try pool.complete_containers.put(gpa, container_ty, {});
} else {
_ = pool.complete_containers.fetchSwapRemove(container_ty);
}
var opt_dep = pool.container_deps.get(container_ty);
while (opt_dep) |dep| : (opt_dep = dep.ptr(pool).next.unwrap()) {
try pool.update(pt, di, dep.ptr(pool).depender);
}
}
/// After this is called, there may be a constant for which debug information (complete or not) has
/// not yet been emitted, so the user must call `flushPending` at some point after this call.
pub fn get(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, val: InternPool.Index) !DebugConstPool.Index {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.comp.gpa;
const gop = try pool.values.getOrPut(gpa, val);
const index: DebugConstPool.Index = @enumFromInt(gop.index);
if (!gop.found_existing) {
const ty: Type = switch (ip.typeOf(val)) {
.type_type => if (ip.isUndef(val)) .type else .fromInterned(val),
else => |ty| .fromInterned(ty),
};
try pool.registerTypeDeps(index, ty, zcu);
try pool.pending.append(gpa, index);
try di.addConst(pt, index, val);
}
return index;
}
pub fn flushPending(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo) !void {
while (pool.pending.pop()) |pending_ty| {
try pool.update(pt, di, pending_ty);
}
}
fn update(pool: *DebugConstPool, pt: Zcu.PerThread, di: DebugInfo, index: DebugConstPool.Index) !void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const val = index.val(pool);
const ty: Type = switch (ip.typeOf(val)) {
.type_type => if (ip.isUndef(val)) .type else .fromInterned(val),
else => |ty| .fromInterned(ty),
};
if (pool.checkType(ty, zcu)) {
try di.updateConst(pt, index, val);
} else {
try di.updateConstIncomplete(pt, index, val);
}
}
fn checkType(pool: *const DebugConstPool, ty: Type, zcu: *const Zcu) bool {
if (ty.isGenericPoison()) return true;
return switch (ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
.noreturn,
.int,
.float,
.pointer,
.comptime_float,
.comptime_int,
.undefined,
.null,
.error_set,
.@"opaque",
.frame,
.@"anyframe",
.enum_literal,
=> true,
.array, .vector => pool.checkType(ty.childType(zcu), zcu),
.optional => pool.checkType(ty.optionalChild(zcu), zcu),
.error_union => pool.checkType(ty.errorUnionPayload(zcu), zcu),
.@"fn" => {
const ip = &zcu.intern_pool;
const func = ip.indexToKey(ty.toIntern()).func_type;
for (func.param_types.get(ip)) |param_ty_ip| {
if (!pool.checkType(.fromInterned(param_ty_ip), zcu)) return false;
}
return pool.checkType(.fromInterned(func.return_type), zcu);
},
.@"struct" => if (ty.isTuple(zcu)) {
for (0..ty.structFieldCount(zcu)) |field_index| {
if (!pool.checkType(ty.fieldType(field_index, zcu), zcu)) return false;
}
return true;
} else {
return pool.complete_containers.contains(ty.toIntern());
},
.@"union", .@"enum" => {
return pool.complete_containers.contains(ty.toIntern());
},
};
}
fn registerTypeDeps(pool: *DebugConstPool, root: Index, ty: Type, zcu: *const Zcu) Allocator.Error!void {
if (ty.isGenericPoison()) return;
switch (ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
.noreturn,
.int,
.float,
.pointer,
.comptime_float,
.comptime_int,
.undefined,
.null,
.error_set,
.@"opaque",
.frame,
.@"anyframe",
.enum_literal,
=> {},
.array, .vector => try pool.registerTypeDeps(root, ty.childType(zcu), zcu),
.optional => try pool.registerTypeDeps(root, ty.optionalChild(zcu), zcu),
.error_union => try pool.registerTypeDeps(root, ty.errorUnionPayload(zcu), zcu),
.@"fn" => {
const ip = &zcu.intern_pool;
const func = ip.indexToKey(ty.toIntern()).func_type;
for (func.param_types.get(ip)) |param_ty_ip| {
try pool.registerTypeDeps(root, .fromInterned(param_ty_ip), zcu);
}
try pool.registerTypeDeps(root, .fromInterned(func.return_type), zcu);
},
.@"struct", .@"union", .@"enum" => if (ty.isTuple(zcu)) {
for (0..ty.structFieldCount(zcu)) |field_index| {
try pool.registerTypeDeps(root, ty.fieldType(field_index, zcu), zcu);
}
} else {
// `ty` is a container; register the dependency.
const gpa = zcu.comp.gpa;
try pool.container_deps.ensureUnusedCapacity(gpa, 1);
try pool.container_dep_entries.ensureUnusedCapacity(gpa, 1);
errdefer comptime unreachable;
const gop = pool.container_deps.getOrPutAssumeCapacity(ty.toIntern());
const entry: ContainerDepEntry.Index = @enumFromInt(pool.container_dep_entries.items.len);
pool.container_dep_entries.appendAssumeCapacity(.{
.next = if (gop.found_existing) gop.value_ptr.toOptional() else .none,
.depender = root,
});
gop.value_ptr.* = entry;
},
}
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const InternPool = @import("../InternPool.zig");
const Type = @import("../Type.zig");
const Zcu = @import("../Zcu.zig");

File diff suppressed because it is too large Load diff

View file

@ -1711,13 +1711,14 @@ pub fn updateContainerType(
self: *Elf,
pt: Zcu.PerThread,
ty: InternPool.Index,
success: bool,
) link.File.UpdateContainerTypeError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const zcu = pt.zcu;
const gpa = zcu.gpa;
return self.zigObjectPtr().?.updateContainerType(pt, ty) catch |err| switch (err) {
return self.zigObjectPtr().?.updateContainerType(pt, ty, success) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => |e| {
try zcu.failed_types.putNoClobber(gpa, ty, try Zcu.ErrorMsg.create(

View file

@ -1719,11 +1719,12 @@ pub fn updateContainerType(
self: *ZigObject,
pt: Zcu.PerThread,
ty: InternPool.Index,
success: bool,
) !void {
const tracy = trace(@src());
defer tracy.end();
if (self.dwarf) |*dwarf| try dwarf.updateContainerType(pt, ty);
if (self.dwarf) |*dwarf| try dwarf.updateContainerType(pt, ty, success);
}
fn updateLazySymbol(