compiler: various lil' fixes

This commit is contained in:
Matthew Lugg 2026-03-01 07:32:09 +00:00
parent 2877a60969
commit 3ffa8d83a3
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
16 changed files with 312 additions and 203 deletions

View file

@ -388,8 +388,8 @@ const BinaryElfOutput = struct {
pub fn parse(allocator: Allocator, in: *File.Reader, elf_hdr: elf.Header) !Self {
var self: Self = .{
.segments = .{},
.sections = .{},
.segments = .empty,
.sections = .empty,
.allocator = allocator,
.shstrtab = null,
};

View file

@ -332,7 +332,7 @@ pub const ProcSym = extern struct {
name: [1]u8, // null-terminated
};
pub const ProcSymFlags = packed struct {
pub const ProcSymFlags = packed struct(u8) {
has_fp: bool,
has_iret: bool,
has_fret: bool,
@ -373,7 +373,7 @@ pub const LineFragmentHeader = extern struct {
code_size: u32,
};
pub const LineFlags = packed struct {
pub const LineFlags = packed struct(u16) {
/// CV_LINES_HAVE_COLUMNS
have_columns: bool,
unused: u15,

View file

@ -5513,6 +5513,11 @@ fn containerDecl(
if (next_field_idx != fields_len) {
return astgen.failNode(member_node, "'_' field of non-exhaustive enum must be last", .{});
}
if (tag_type_body_len == null) {
return astgen.failNodeNotes(node, "non-exhaustive enum missing integer tag type", .{}, &.{
try astgen.errNoteNode(member_node, "marked non-exhaustive here", .{}),
});
}
opt_nonexhaustive_node = member_node.toOptional();
continue;
}

View file

@ -4180,7 +4180,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
if (!refs.contains(logging_unit)) continue;
try messages.append(gpa, .{
.src_loc = compile_log.src(),
.msg = undefined, // populated later
.msg = "", // populated later, but must be valid for `sort` call below
.notes = &.{},
// We actually clear this later for most of these, but we populate
// this field for now to avoid having to allocate more data to track
@ -4221,6 +4221,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) error{OutOfMemory}!ErrorBundle {
break :compile_log_text try log_text.toOwnedSlice(gpa);
};
defer gpa.free(compile_log_text);
// TODO: eventually, this should be behind `std.debug.runtime_safety`. But right now, this is a
// very common way for incremental compilation bugs to manifest, so let's always check it.

View file

@ -416,7 +416,7 @@ pub const Block = struct {
return block.comptime_reason != null;
}
fn builtinCallArgSrc(block: *Block, builtin_call_node: std.zig.Ast.Node.Offset, arg_index: u32) LazySrcLoc {
pub fn builtinCallArgSrc(block: *Block, builtin_call_node: std.zig.Ast.Node.Offset, arg_index: u32) LazySrcLoc {
return block.src(.{ .node_offset_builtin_call_arg = .{
.builtin_call_node = builtin_call_node,
.arg_index = arg_index,
@ -4654,47 +4654,14 @@ fn zirValidateDeref(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
.slice => return sema.fail(block, src, "index syntax required for slice type '{f}'", .{operand_ty.fmt(pt)}),
}
const elem_ty = operand_ty.childType(zcu);
try sema.ensureLayoutResolved(elem_ty, src, .ptr_access);
const need_comptime = switch (elem_ty.classify(zcu)) {
.no_possible_value => return sema.fail(block, src, "cannot load {s} type '{f}'", .{
if (elem_ty.zigTypeTag(zcu) == .@"opaque") "opaque" else "uninstantiable",
elem_ty.fmt(pt),
}),
.one_possible_value => return, // no need to validate the actual pointer value!
.runtime => false,
.partially_comptime, .fully_comptime => true,
};
if (sema.resolveValue(operand)) |val| {
if (val.isUndef(zcu)) {
// Error for deref of undef pointer, unless the pointee is OPV in which case it's legal.
if (val.isUndef(zcu) and operand_ty.childType(zcu).classify(zcu) != .one_possible_value) {
return sema.fail(block, src, "cannot dereference undefined value", .{});
}
} else if (need_comptime) {
const msg = msg: {
const msg = try sema.errMsg(
src,
"values of type '{f}' must be comptime-known, but operand value is runtime-known",
.{elem_ty.fmt(pt)},
);
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsComptime(msg, src, elem_ty);
break :msg msg;
};
return sema.failWithOwnedErrorMsg(block, msg);
}
}
fn typeIsDestructurable(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.array, .vector => true,
.@"struct" => ty.isTuple(zcu),
else => false,
};
}
fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
@ -4705,14 +4672,14 @@ fn zirValidateDestructure(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
const operand = sema.resolveInst(extra.operand);
const operand_ty = sema.typeOf(operand);
if (!typeIsDestructurable(operand_ty, zcu)) {
if (!operand_ty.destructurable(zcu)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "type '{f}' cannot be destructured", .{operand_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(destructure_src, msg, "result destructured here", .{});
if (operand_ty.zigTypeTag(pt.zcu) == .error_union) {
const base_op_ty = operand_ty.errorUnionPayload(zcu);
if (typeIsDestructurable(base_op_ty, zcu))
if (base_op_ty.destructurable(zcu))
try sema.errNote(src, msg, "consider using 'try', 'catch', or 'if'", .{});
}
break :msg msg;
@ -8247,8 +8214,15 @@ fn zirOptionalPayload(
else => return sema.failWithExpectedOptionalType(block, src, operand_ty),
};
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.optionalValue(zcu)) |payload| return Air.internedToRef(payload.toIntern());
ct: {
if (try sema.resolveDefinedValue(block, src, operand)) |val| {
if (val.optionalValue(zcu)) |payload| return .fromValue(payload); // comptime-known payload
} else if (try sema.resolveIsNullFromType(block, src, operand_ty)) |is_null| {
if (!is_null) break :ct; // fully runtime-known
} else {
break :ct; // fully runtime-known
}
// Comptime-known to be `null`.
if (block.isComptime()) return sema.fail(block, src, "unable to unwrap null", .{});
if (safety_check and block.wantSafety()) {
try sema.safetyPanic(block, src, .unwrap_null);
@ -21085,7 +21059,8 @@ fn zirErrorCast(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData
continue :check ip.funcIesResolvedUnordered(func_index);
},
.error_set_type => |dest| {
if (operand_err_ty.isAnyError(zcu)) break :check .superset;
if (dest.names.len == 0) break :check .disjoint; // dest is 'error{}'
if (operand_err_ty.isAnyError(zcu)) break :check .overlap; // anyerror -> error{...} (non-empty)
var dest_has_all = true;
var dest_has_any = false;
for (operand_err_ty.errorSetNames(zcu).get(ip)) |operand_err_name| {
@ -24741,15 +24716,30 @@ fn zirBuiltinExtern(
const ty_src = block.builtinCallArgSrc(extra.node, 0);
const options_src = block.builtinCallArgSrc(extra.node, 1);
var ty = try sema.resolveType(block, ty_src, extra.lhs);
if (!ty.isPtrAtRuntime(zcu)) {
const ptr_ty = try sema.resolveType(block, ty_src, extra.lhs);
if (!ptr_ty.isPtrAtRuntime(zcu)) {
return sema.fail(block, ty_src, "expected (optional) pointer", .{});
}
if (!ty.validateExtern(.other, zcu)) {
const ptr_info = ptr_ty.ptrInfo(zcu);
const elem_ty: Type = .fromInterned(ptr_info.child);
try sema.ensureLayoutResolved(elem_ty, src, .@"extern");
if (!elem_ty.validateExtern(.other, zcu)) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{f}'", .{ty.fmt(pt)});
const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{f}'", .{ptr_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.explainWhyTypeIsNotExtern(msg, ty_src, ty, .other);
try sema.errNote(ty_src, msg, "pointer element type '{f}' is not extern compatible", .{elem_ty.fmt(pt)});
try sema.explainWhyTypeIsNotExtern(msg, ty_src, elem_ty, .other);
break :msg msg;
});
}
if (elem_ty.zigTypeTag(zcu) == .@"fn" and !ptr_info.flags.is_const) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{f}'", .{ptr_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(ty_src, msg, "pointer to extern function must be 'const'", .{});
break :msg msg;
});
}
@ -24769,14 +24759,9 @@ fn zirBuiltinExtern(
// TODO: error for threadlocal functions, non-const functions, etc
if (options.linkage == .weak and !ty.ptrAllowsZero(zcu)) {
ty = try pt.optionalType(ty.toIntern());
}
const ptr_info = ty.ptrInfo(zcu);
const extern_val = try pt.getExtern(.{
.name = options.name,
.ty = ptr_info.child,
.ty = elem_ty.toIntern(),
.lib_name = options.library_name,
.linkage = options.linkage,
.visibility = options.visibility,
@ -24807,13 +24792,17 @@ fn zirBuiltinExtern(
.source = .builtin,
});
// For a weak symbol where the given type is not nullable, make the pointer optional.
const result_ptr_ty: Type = if (options.linkage == .weak and !ptr_ty.ptrAllowsZero(zcu)) ty: {
break :ty try pt.optionalType(ptr_ty.toIntern());
} else ptr_ty;
const uncasted_ptr = try sema.analyzeNavRef(block, src, ip.indexToKey(extern_val).@"extern".owner_nav);
// We want to cast to `ty`, but that isn't necessarily an allowed coercion.
if (sema.resolveValue(uncasted_ptr)) |uncasted_ptr_val| {
const casted_ptr_val = try pt.getCoerced(uncasted_ptr_val, ty);
const casted_ptr_val = try pt.getCoerced(uncasted_ptr_val, result_ptr_ty);
return Air.internedToRef(casted_ptr_val.toIntern());
} else {
return block.addBitCast(ty, uncasted_ptr);
return block.addBitCast(result_ptr_ty, uncasted_ptr);
}
}
@ -25258,6 +25247,7 @@ pub fn explainWhyTypeIsUnpackable(
try sema.errNote(src, msg, "non-packed unions do not have a bit-packed representation", .{});
try sema.addDeclaredHereNote(msg, union_ty);
},
.slice => try sema.errNote(src, msg, "slices do not have a bit-packed representation", .{}),
.other => try sema.errNote(src, msg, "type does not have a bit-packed representation", .{}),
}
}
@ -25501,7 +25491,10 @@ fn addSafetyCheckSentinelMismatch(
};
assert(sema.typeOf(actual_sentinel).toIntern() == sentinel_ty.toIntern());
assert(sentinel_ty.isSelfComparable(zcu, true));
const ok = try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel);
const ok: Air.Inst.Ref = if (sentinel_ty.zigTypeTag(zcu) == .vector) ok: {
const elementwise = try parent_block.addCmpVector(expected_sentinel, actual_sentinel, .eq);
break :ok try parent_block.addReduce(elementwise, .And);
} else try parent_block.addBinOp(.cmp_eq, expected_sentinel, actual_sentinel);
return addSafetyCheckCall(sema, parent_block, src, ok, .@"panic.sentinelMismatch", &.{
expected_sentinel, actual_sentinel,
@ -26574,8 +26567,13 @@ fn unionFieldVal(
const active_tag_val = union_val.unionTag(zcu).?;
const active_index = enum_tag_ty.enumTagFieldIndex(active_tag_val, zcu).?;
if (active_index == field_index) return .fromValue(union_val.unionPayload(zcu));
return sema.fail(block, src, "access of union field '{f}' while field '{f}' is active", .{
field_name.fmt(ip), enum_tag_ty.enumFieldName(active_index, zcu).fmt(ip),
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "access of union field '{f}' while field '{f}' is active", .{
field_name.fmt(ip), enum_tag_ty.enumFieldName(active_index, zcu).fmt(ip),
});
errdefer msg.destroy(zcu.comp.gpa);
try sema.addDeclaredHereNote(msg, union_ty);
break :msg msg;
});
},
.@"extern" => if (try sema.bitCastVal(union_val, field_ty, 0, 0, 0)) |field_val| {
@ -26745,9 +26743,17 @@ fn elemVal(
return sema.analyzeLoad(block, src, .fromValue(elem_ptr_val), indexable_src);
}
if (try child_ty.onePossibleValue(pt)) |opv| return .fromValue(opv);
try sema.validateRuntimeElemAccess(block, elem_index_src, child_ty, indexable_ty, src);
switch (child_ty.classify(zcu)) {
.runtime => {},
.one_possible_value => return .fromValue((try child_ty.onePossibleValue(pt)).?),
.no_possible_value => switch (child_ty.zigTypeTag(zcu)) {
.@"opaque" => return sema.fail(block, src, "cannot load opaque type '{f}'", .{child_ty.fmt(pt)}),
else => return sema.fail(block, src, "cannot load uninstantiable type '{f}'", .{child_ty.fmt(pt)}),
},
.partially_comptime, .fully_comptime => unreachable, // caught by `validateRuntimeElemAccess`
}
try sema.checkLogicalPtrOperation(block, src, indexable_ty);
return block.addBinOp(.ptr_elem_val, indexable, elem_index);
},
.one => {
@ -29082,31 +29088,6 @@ fn storePtr2(
const elem_ty = ptr_ty.childType(zcu);
// To generate better code for tuples, we detect a tuple operand here, and
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
// which would occur if we used `coerce`.
// However, we avoid this mechanism if the destination element type is a tuple,
// because the regular store will be better for this case.
// If the destination type is a struct we don't want this mechanism to trigger, because
// this code does not handle tuple-to-struct coercion which requires dealing with missing
// fields.
const operand_ty = sema.typeOf(uncasted_operand);
if (operand_ty.isTuple(zcu) and elem_ty.zigTypeTag(zcu) == .array) {
const field_count = operand_ty.structFieldCount(zcu);
var i: u32 = 0;
while (i < field_count) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try sema.tupleField(block, operand_src, uncasted_operand, elem_src, i);
const elem_index = try pt.intRef(.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src, false, true);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
}
// TODO do the same thing for anon structs as for tuples above.
// However, beware of the need to handle missing/extra fields.
const is_ret = air_tag == .ret_ptr;
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
@ -29129,16 +29110,13 @@ fn storePtr2(
return sema.storePtrVal(block, src, ptr_val, operand_val, elem_ty);
};
// We're performing the store at runtime; as such, we need to make sure the pointee type
// is not comptime-only. We can hit this case with a `@ptrFromInt` pointer.
if (comptime_only) {
return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "cannot store comptime-only type '{f}' at runtime", .{elem_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(ptr_src, msg, "operation is runtime due to this pointer", .{});
break :msg msg;
});
}
// We're performing the store at runtime, so the pointee type must not be comptime-only.
if (comptime_only) return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "cannot store comptime-only type '{f}' at runtime", .{elem_ty.fmt(pt)});
errdefer msg.destroy(sema.gpa);
try sema.errNote(ptr_src, msg, "operation is runtime due to this pointer", .{});
break :msg msg;
});
try sema.requireRuntimeBlock(block, src, runtime_src);
@ -29556,7 +29534,10 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(block, msg);
}
if (union_ty.unionHasAllZeroBitFieldTypes(zcu)) {
for (union_obj.field_types.get(ip)) |field_ty_ip| {
if (Type.fromInterned(field_ty_ip).classify(zcu) != .one_possible_value) break;
} else {
// All fields are OPV, so the coercion is okay.
if (try union_ty.onePossibleValue(pt)) |opv| {
// The tag had redundant bits, but we've omitted the tag from the union's runtime layout, so the union is OPV and hence runtime-known.
return .fromValue(opv);
@ -29566,6 +29547,8 @@ fn coerceEnumToUnion(
}
}
// The coercion is invalid because one or more fields is not OPV.
const msg = msg: {
const msg = try sema.errMsg(
inst_src,
@ -30186,12 +30169,18 @@ fn analyzeLoad(
.pointer => ptr_ty.childType(zcu),
else => return sema.fail(block, ptr_src, "expected pointer, found '{f}'", .{ptr_ty.fmt(pt)}),
};
if (elem_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.fail(block, ptr_src, "cannot load opaque type '{f}'", .{elem_ty.fmt(pt)});
}
try sema.ensureLayoutResolved(elem_ty, src, .ptr_access);
if (try elem_ty.onePossibleValue(pt)) |opv| return .fromValue(opv);
const comptime_only = switch (elem_ty.classify(zcu)) {
.no_possible_value => switch (elem_ty.zigTypeTag(zcu)) {
.@"opaque" => return sema.fail(block, src, "cannot load opaque type '{f}'", .{elem_ty.fmt(pt)}),
else => return sema.fail(block, src, "cannot load uninstantiable type '{f}'", .{elem_ty.fmt(pt)}),
},
.one_possible_value => return .fromValue((try elem_ty.onePossibleValue(pt)).?),
.runtime => false,
.partially_comptime, .fully_comptime => true,
};
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
if (try sema.pointerDeref(block, src, ptr_val, ptr_ty)) |elem_val| {
@ -30199,7 +30188,7 @@ fn analyzeLoad(
}
}
if (elem_ty.comptimeOnly(zcu)) return sema.failWithOwnedErrorMsg(block, msg: {
if (comptime_only) return sema.failWithOwnedErrorMsg(block, msg: {
const msg = try sema.errMsg(src, "cannot load comptime-only type '{f}'", .{elem_ty.fmt(pt)});
errdefer msg.destroy(zcu.gpa);
try sema.errNote(ptr_src, msg, "pointer of type '{f}' is runtime-known", .{ptr_ty.fmt(pt)});

View file

@ -20,6 +20,9 @@ pub fn incrementDefinedInt(
const zcu = pt.zcu;
assert(prev_val.typeOf(zcu).toIntern() == ty.toIntern());
assert(!prev_val.isUndef(zcu));
if (ty.intInfo(zcu).bits == 0) {
return .{ .overflow = true, .val = try comptimeIntAdd(sema, prev_val, .one_comptime_int) };
}
const res = try intAdd(sema, prev_val, try pt.intValue(ty, 1), ty);
return .{ .overflow = res.overflow, .val = res.val };
}

View file

@ -33,6 +33,7 @@ pub const LayoutResolveReason = enum {
align_check,
bit_ptr_child,
@"export",
@"extern",
builtin_type,
/// Written after string: "while resolving type 'T' "
@ -58,6 +59,7 @@ pub const LayoutResolveReason = enum {
.align_check => "for alignment check here",
.bit_ptr_child => "for bit size check here",
.@"export" => "for export here",
.@"extern" => "for extern declaration here",
.builtin_type => "from 'std.builtin'",
// zig fmt: on
};
@ -198,7 +200,7 @@ pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void {
const name = struct_obj.field_names.get(ip)[field_index];
if (ip.addFieldName(struct_obj.field_names, struct_obj.field_name_map, name)) |prev_field_index| {
return sema.failWithOwnedErrorMsg(&block, msg: {
const src = block.nodeOffset(.zero);
const src = block.builtinCallArgSrc(.zero, 2);
const msg = try sema.errMsg(src, "duplicate struct field '{f}' at index '{d}", .{ name.fmt(ip), field_index });
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index});
@ -494,20 +496,22 @@ fn resolvePackedStructLayout(
// Finally, either validate or infer the backing int type.
const backing_int_ty: Type = if (explicit_backing_int_ty) |backing_ty| ty: {
// We only need to validate the type.
if (backing_ty.zigTypeTag(zcu) != .int) return sema.failWithOwnedErrorMsg(block, msg: {
const src = struct_ty.srcLoc(zcu);
const msg = try sema.errMsg(src, "expected backing integer type, found '{f}'", .{backing_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) });
try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits});
break :msg msg;
});
if (backing_ty.zigTypeTag(zcu) != .int) return sema.fail(
block,
block.src(.container_arg),
"expected backing integer type, found '{f}'",
.{backing_ty.fmt(pt)},
);
if (field_bits != backing_ty.intInfo(zcu).bits) return sema.failWithOwnedErrorMsg(block, msg: {
const src = struct_ty.srcLoc(zcu);
const msg = try sema.errMsg(src, "backing integer bit width does not match total bit width of fields", .{});
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) });
try sema.errNote(
block.src(.container_arg),
msg,
"backing integer '{f}' has bit width '{d}'",
.{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) },
);
try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits});
break :msg msg;
});
@ -1033,7 +1037,6 @@ fn resolvePackedUnionLayout(
const msg = try sema.errMsg(field_ty_src, "packed unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsUnpackable(msg, field_ty_src, reason);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
assert(!field_ty.comptimeOnly(zcu)); // packable types are not comptime-only
@ -1062,6 +1065,12 @@ fn resolvePackedUnionLayout(
// Finally, either validate or infer the backing int type.
const backing_int_ty: Type = if (explicit_backing_int_ty) |backing_ty| ty: {
if (backing_ty.zigTypeTag(zcu) != .int) return sema.fail(
block,
block.src(.container_arg),
"expected backing integer type, found '{f}'",
.{backing_ty.fmt(pt)},
);
const backing_int_bits = backing_ty.intInfo(zcu).bits;
for (union_obj.field_types.get(ip), 0..) |field_type_ip, field_idx| {
const field_type: Type = .fromInterned(field_type_ip);
@ -1071,7 +1080,12 @@ fn resolvePackedUnionLayout(
const msg = try sema.errMsg(field_ty_src, "field bit width does not match backing integer", .{});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits });
try sema.errNote(field_ty_src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_int_bits });
try sema.errNote(
block.src(.container_arg),
msg,
"backing integer '{f}' has bit width '{d}'",
.{ backing_ty.fmt(pt), backing_int_bits },
);
try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{});
break :msg msg;
});
@ -1157,7 +1171,7 @@ pub fn resolveEnumLayout(sema: *Sema, enum_ty: Type) CompileError!void {
const name = enum_obj.field_names.get(ip)[field_index];
if (ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name)) |prev_field_index| {
return sema.failWithOwnedErrorMsg(&block, msg: {
const src = block.nodeOffset(.zero);
const src = block.builtinCallArgSrc(.zero, 2);
const msg = try sema.errMsg(src, "duplicate union field '{f}' at index '{d}", .{ name.fmt(ip), field_index });
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index});
@ -1183,8 +1197,8 @@ pub fn resolveEnumLayout(sema: *Sema, enum_ty: Type) CompileError!void {
const name = enum_obj.field_names.get(ip)[field_index];
if (ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name)) |prev_field_index| {
return sema.failWithOwnedErrorMsg(&block, msg: {
const src = block.nodeOffset(.zero);
const msg = try sema.errMsg(src, "duplicate enum field '{f}' at index '{d}", .{ name.fmt(ip), field_index });
const src = block.builtinCallArgSrc(.zero, 2);
const msg = try sema.errMsg(src, "duplicate enum field '{f}' at index '{d}'", .{ name.fmt(ip), field_index });
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index});
break :msg msg;

View file

@ -2985,12 +2985,21 @@ pub fn containerTypeName(ty: Type, ip: *const InternPool) InternPool.NullTermina
};
}
pub fn destructurable(ty: Type, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.array, .vector => true,
.@"struct" => ty.isTuple(zcu),
else => false,
};
}
pub const UnpackableReason = union(enum) {
comptime_only,
pointer,
enum_inferred_int_tag: Type,
non_packed_struct: Type,
non_packed_union: Type,
slice,
other,
};
@ -3027,7 +3036,10 @@ pub fn unpackable(ty: Type, zcu: *const Zcu) ?UnpackableReason {
else
.other,
.pointer => .pointer,
.pointer => switch (ty.ptrSize(zcu)) {
.slice => .slice,
.one, .many, .c => .pointer,
},
.@"enum" => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).int_tag_mode) {
.explicit => null,

View file

@ -2014,7 +2014,11 @@ pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, op
},
.field => |field| base: {
const base_ptr = Value.fromInterned(field.base);
const base_ptr_ty = base_ptr.typeOf(zcu);
const base_ptr_ty = try pt.ptrType(info: {
var info = base_ptr.typeOf(zcu).ptrInfo(zcu);
info.flags.size = .one;
break :info info;
});
const parent_step = try arena.create(PointerDeriveStep);
parent_step.* = try pointerDerivation(base_ptr, arena, pt, opt_sema);
break :base .{ .field_ptr = .{
@ -2155,13 +2159,17 @@ pub fn pointerDerivation(ptr_val: Value, arena: Allocator, pt: Zcu.PerThread, op
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + field_ty.abiSize(zcu);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
const old_ptr_ty = try cur_derive.ptrType(pt);
const base_ptr_ty = try pt.ptrType(info: {
var info = (try cur_derive.ptrType(pt)).ptrInfo(zcu);
info.flags.size = .one;
break :info info;
});
const parent = try arena.create(PointerDeriveStep);
parent.* = cur_derive;
cur_derive = .{ .field_ptr = .{
.parent = parent,
.field_idx = @intCast(field_idx),
.result_ptr_ty = try old_ptr_ty.fieldPtrType(@intCast(field_idx), pt),
.result_ptr_ty = try base_ptr_ty.fieldPtrType(@intCast(field_idx), pt),
} };
cur_offset -= start_off;
break;

View file

@ -2028,9 +2028,15 @@ pub const SrcLoc = struct {
const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse return tree.nodeToSpan(node);
const arg_node = container_decl.ast.arg.unwrap() orelse return tree.nodeToSpan(node);
return tree.nodeToSpan(arg_node);
if (tree.fullContainerDecl(&buf, node)) |container_decl| {
const arg_node = container_decl.ast.arg.unwrap() orelse return tree.nodeToSpan(node);
return tree.nodeToSpan(arg_node);
} else if (tree.builtinCallParams(&buf, node)) |args| {
// Builtin calls (`@Enum` etc) should use the first argument.
return tree.nodeToSpan(if (args.len > 0) args[0] else node);
} else {
return tree.nodeToSpan(node);
}
},
.container_field_name,
.container_field_value,
@ -2040,8 +2046,38 @@ pub const SrcLoc = struct {
const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse
const container_decl = tree.fullContainerDecl(&buf, node) orelse {
// This could be a reification builtin. These are the args we care about:
// * `@Enum(_, _, names, values)`
// * `@Struct(_, _, names, types, values_and_aligns)`
// * `@Union(_, _, names, types, aligns)`
if (tree.builtinCallParams(&buf, node)) |args| {
const builtin_name = tree.tokenSlice(tree.firstToken(node));
const arg_index: ?u3 = if (std.mem.eql(u8, builtin_name, "@Enum")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 3,
.container_field_type => null,
.container_field_align => null,
else => unreachable,
} else if (std.mem.eql(u8, builtin_name, "@Struct")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 4,
.container_field_type => 3,
.container_field_align => 4,
else => unreachable,
} else if (std.mem.eql(u8, builtin_name, "@Union")) switch (src_loc.lazy) {
.container_field_name => 2,
.container_field_value => 4,
.container_field_type => 3,
.container_field_align => null,
else => unreachable,
} else null;
if (arg_index) |i| {
if (args.len >= i) return tree.nodeToSpan(args[i]);
}
}
return tree.nodeToSpan(node);
};
var cur_field_idx: usize = 0;
for (container_decl.ast.members) |member_node| {

View file

@ -2176,6 +2176,9 @@ fn analyzeNavType(
return .{ .type_changed = true };
}
/// If `func_index` is not a runtime function (e.g. it has a comptime-only parameter type) then it
/// is still valid to call this function and use its `func_body` unit in general---analysis of the
/// runtime function body will simply fail.
pub fn ensureFuncBodyUpToDate(
pt: Zcu.PerThread,
func_index: InternPool.Index,
@ -2278,29 +2281,6 @@ fn analyzeFuncBody(
const func = zcu.funcInfo(func_index);
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
// Make sure that this function is still owned by the same `Nav`. Otherwise, analyzing
// it would be a waste of time in the best case, and could cause codegen to give bogus
// results in the worst case.
if (func.generic_owner == .none) {
// Among another things, this ensures that the function's `zir_body_inst` is correct.
try pt.ensureNavValUpToDate(func.owner_nav, reason);
if (ip.getNav(func.owner_nav).status.fully_resolved.val != func_index) {
// This function is no longer referenced! There's no point in re-analyzing it.
// Just mark a transitive failure and move on.
return error.AnalysisFail;
}
} else {
const go_nav = zcu.funcInfo(func.generic_owner).owner_nav;
// Among another things, this ensures that the function's `zir_body_inst` is correct.
try pt.ensureNavValUpToDate(go_nav, reason);
if (ip.getNav(go_nav).status.fully_resolved.val != func.generic_owner) {
// The generic owner is no longer referenced, so this function is also unreferenced.
// There's no point in re-analyzing it. Just mark a transitive failure and move on.
return error.AnalysisFail;
}
}
// We'll want to remember what the IES used to be before the update for
// dependency invalidation purposes.
const old_resolved_ies = if (func.analysisUnordered(ip).inferred_error_set)
@ -3263,29 +3243,25 @@ fn analyzeFuncBodyInner(
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
const func = zcu.funcInfo(func_index);
const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_info.file);
// This is the `Nav` corresponding to the `declaration` instruction which the function or its generic owner originates from.
const decl_analysis = if (func.generic_owner == .none)
ip.getNav(func.owner_nav).analysis.?
else
ip.getNav(zcu.funcInfo(func.generic_owner).owner_nav).analysis.?;
const file = zcu.fileByIndex(decl_analysis.zir_index.resolveFile(ip));
const zir = file.zir.?;
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, reason);
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
if (func.analysisUnordered(ip).inferred_error_set) {
func.setResolvedErrorSet(ip, io, .none);
}
if (zcu.comp.time_report) |*tr| {
if (func.generic_owner != .none) {
tr.stats.n_generic_instances += 1;
}
}
// This is the `Nau` corresponding to the `declaration` instruction which the function or its generic owner originates from.
const decl_nav = ip.getNav(if (func.generic_owner == .none)
func.owner_nav
else
zcu.funcInfo(func.generic_owner).owner_nav);
const func_nav = ip.getNav(func.owner_nav);
var analysis_arena = std.heap.ArenaAllocator.init(gpa);
@ -3319,9 +3295,30 @@ fn analyzeFuncBodyInner(
// Every runtime function has a dependency on the source of the Decl it originates from.
// It also depends on the value of its owner Decl.
try sema.declareDependency(.{ .src_hash = decl_nav.analysis.?.zir_index });
try sema.declareDependency(.{ .src_hash = decl_analysis.zir_index });
try sema.declareDependency(.{ .nav_val = func.owner_nav });
// Make sure that the declaration `Nav` still refers to this function (or its generic owner).
// This will not be the case if the incremental update has changed a function type or turned a
// `fn` decl into some other declaration. In that case, we must not run analysis: this function
// will not be referenced this update, and trying to generate it could be problematic since we
// assume the owner NAV actually, um, owns us.
//
// If we *are* still owned by the right NAV, this analysis updates `zir_body_inst` if necessary.
if (func.generic_owner == .none) {
try pt.ensureNavValUpToDate(func.owner_nav, reason);
if (ip.getNav(func.owner_nav).status.fully_resolved.val != func_index) {
return error.AnalysisFail;
}
} else {
const go_nav = zcu.funcInfo(func.generic_owner).owner_nav;
try pt.ensureNavValUpToDate(go_nav, reason);
if (ip.getNav(go_nav).status.fully_resolved.val != func.generic_owner) {
return error.AnalysisFail;
}
}
if (func.analysisUnordered(ip).inferred_error_set) {
const ies = try analysis_arena.allocator().create(Sema.InferredErrorSet);
ies.* = .{ .func = func_index };
@ -3339,11 +3336,11 @@ fn analyzeFuncBodyInner(
var inner_block: Sema.Block = .{
.parent = null,
.sema = &sema,
.namespace = decl_nav.analysis.?.namespace,
.namespace = decl_analysis.namespace,
.instructions = .empty,
.inlining = null,
.comptime_reason = null,
.src_base_inst = decl_nav.analysis.?.zir_index,
.src_base_inst = decl_analysis.zir_index,
.type_name_ctx = func_nav.fqn,
};
defer inner_block.instructions.deinit(gpa);
@ -3385,6 +3382,13 @@ fn analyzeFuncBodyInner(
const param_ty: Type = .fromInterned(fn_ty_info.param_types.get(ip)[runtime_param_index]);
runtime_param_index += 1;
if (param_ty.isGenericPoison()) {
// We're guaranteed to get a compile error on the `fnHasRuntimeBits` check after this
// loop (the generic poison means this is a generic function). But `continue` here to
// avoid an illegal call to `onePossibleValue` below.
continue;
}
const param_ty_src = inner_block.src(.{ .func_decl_param_ty = @intCast(zir_param_index) });
try sema.ensureLayoutResolved(param_ty, param_ty_src, .parameter);
@ -3406,6 +3410,23 @@ fn analyzeFuncBodyInner(
try sema.ensureLayoutResolved(sema.fn_ret_ty, inner_block.src(.{ .node_offset_fn_type_ret_ty = .zero }), .return_type);
// The function type is now resolved, so we're ready to check whether it even makes sense to ask
// for it to be analyzed at runtime.
if (!fn_ty.fnHasRuntimeBits(zcu)) {
const description: []const u8 = switch (fn_ty_info.cc) {
.@"inline" => "inline",
else => "generic",
};
// This error makes sense because the only reason this analysis would ever be requested is
// for IES resolution.
return sema.fail(
&inner_block,
inner_block.nodeOffset(.zero),
"cannot resolve inferred error set of {s} function type '{f}'",
.{ description, fn_ty.fmt(pt) },
);
}
const last_arg_index = inner_block.instructions.items.len;
// Save the error trace as our first action in the function.

View file

@ -2112,7 +2112,7 @@ pub fn genTagNameFn(
const loaded_enum = ip.loadEnumType(enum_ty.toIntern());
assert(loaded_enum.field_names.len > 0);
if (Type.fromInterned(loaded_enum.int_tag_type).bitSize(zcu) > 64) {
@panic("TODO CBE: tagName for enum over 128 bits");
@panic("TODO CBE: tagName for enum over 64 bits");
}
try w.print("static {s} zig_tagName_{f}__{d}({s} tag) {{\n", .{
@ -2130,10 +2130,10 @@ pub fn genTagNameFn(
try w.writeAll(" switch (tag) {\n");
const field_values = loaded_enum.field_values.get(ip);
for (loaded_enum.field_names.get(ip), 0..) |field_name, field_index| {
const field_int: u64 = int: {
const field_int: i65 = int: {
if (field_values.len == 0) break :int field_index;
const field_val: Value = .fromInterned(field_values[field_index]);
break :int field_val.toUnsignedInt(zcu);
break :int field_val.getUnsignedInt(zcu) orelse field_val.toSignedInt(zcu);
};
try w.print(" case {d}: return ({s}){{name{d},{d}}};\n", .{
field_int,
@ -3278,7 +3278,10 @@ fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.typeOf(ty_op.operand);
const scalar_ty = operand_ty.scalarType(zcu);
if (f.dg.intCastIsNoop(inst_scalar_ty, scalar_ty)) return f.moveCValue(inst, inst_ty, operand);
// `intCastIsNoop` doesn't apply to vectors because every vector lowers to a different C struct.
if (inst_ty.zigTypeTag(zcu) != .vector and f.dg.intCastIsNoop(inst_scalar_ty, scalar_ty)) {
return f.moveCValue(inst, inst_ty, operand);
}
const w = &f.code.writer;
const local = try f.allocLocal(inst, inst_ty);
@ -3491,6 +3494,8 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
const operand_ty = f.typeOf(bin_op.lhs);
const scalar_ty = operand_ty.scalarType(zcu);
const ref_arg = lowersToBigInt(scalar_ty, zcu);
const w = &f.code.writer;
const local = try f.allocLocal(inst, inst_ty);
const v = try Vectorize.start(f, inst, w, operand_ty);
@ -3504,9 +3509,11 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
try f.writeCValueMember(w, local, .{ .field = 0 });
try v.elem(f, w);
try w.writeAll(", ");
if (ref_arg) try w.writeByte('&');
try f.writeCValue(w, lhs, .other);
try v.elem(f, w);
try w.writeAll(", ");
if (ref_arg) try w.writeByte('&');
try f.writeCValue(w, rhs, .other);
if (f.typeOf(bin_op.rhs).isVector(zcu)) try v.elem(f, w);
try f.dg.renderBuiltinInfo(w, scalar_ty, info);

View file

@ -898,13 +898,23 @@ pub const CType = union(enum) {
try w.writeAll("fn_"); // intentional double underscore to start
for (func_type.param_types.get(ip)) |param_ty_ip| {
const param_ty: Type = .fromInterned(param_ty_ip);
try w.print("_P{f}", .{fmtZigType(param_ty, zcu)});
if (param_ty.isGenericPoison()) {
try w.writeAll("_Pgeneric");
} else {
try w.print("_P{f}", .{fmtZigType(param_ty, zcu)});
}
}
if (func_type.is_var_args) {
try w.writeAll("_VA");
}
const ret_ty: Type = .fromInterned(func_type.return_type);
try w.print("_R{f}", .{fmtZigType(ret_ty, zcu)});
if (ret_ty.isGenericPoison()) {
try w.writeAll("_Rgeneric");
} else if (ret_ty.zigTypeTag(zcu) == .error_union and ret_ty.errorUnionPayload(zcu).isGenericPoison()) {
try w.writeAll("_Rgeneric_ies");
} else {
try w.print("_R{f}", .{fmtZigType(ret_ty, zcu)});
}
},
.vector => try w.print("vec_{d}_{f}", .{

View file

@ -3436,9 +3436,9 @@ pub const Object = struct {
}
if (fn_info.cc == .auto and zcu.comp.config.any_error_tracing) {
const stack_trace_ty = zcu.builtin_decl_values.get(.StackTrace);
const ptr_ty = try pt.ptrType(.{ .child = stack_trace_ty });
try llvm_params.append(o.gpa, try o.lowerType(pt, ptr_ty));
// First parameter is a pointer to `std.builtin.StackTrace`.
const llvm_ptr_ty = try o.builder.ptrType(toLlvmAddressSpace(.generic, target));
try llvm_params.append(o.gpa, llvm_ptr_ty);
}
var it = iterateParamTypes(o, pt, fn_info);
@ -6719,16 +6719,11 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(zcu);
const elem_ty = ptr_ty.indexableElem(zcu);
const llvm_elem_ty = try o.lowerType(pt, elem_ty);
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
// TODO: when we go fully opaque pointers in LLVM 16 we can remove this branch
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, if (ptr_ty.isSinglePointer(zcu))
// If this is a single-item pointer to an array, we need another index in the GEP.
&.{ try o.builder.intValue(try o.lowerType(pt, Type.usize), 0), rhs }
else
&.{rhs}, "");
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{rhs}, "");
if (isByRef(elem_ty, zcu)) {
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
const ptr_align = (ptr_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
@ -6808,11 +6803,6 @@ pub const FuncGen = struct {
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(zcu)) {
const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, shifted_value, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
}
return self.wip.cast(.trunc, shifted_value, elem_llvm_ty, "");
},
@ -6830,11 +6820,6 @@ pub const FuncGen = struct {
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.bitcast, truncated_int, elem_llvm_ty, "");
} else if (field_ty.isPtrAtRuntime(zcu)) {
const same_size_int = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const truncated_int =
try self.wip.cast(.trunc, containing_int, same_size_int, "");
return self.wip.cast(.inttoptr, truncated_int, elem_llvm_ty, "");
}
return self.wip.cast(.trunc, containing_int, elem_llvm_ty, "");
},
@ -10110,7 +10095,7 @@ pub const FuncGen = struct {
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
// TODO: detect when the type changes (`updateContainerType` will be called) and re-emit this function
const gop = try o.named_enum_map.getOrPut(o.gpa, enum_ty.toIntern());
if (gop.found_existing) return gop.value_ptr.*;
errdefer assert(o.named_enum_map.remove(enum_ty.toIntern()));
@ -10728,10 +10713,7 @@ pub const FuncGen = struct {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const non_int_val = try self.resolveInst(extra.init);
const small_int_ty = try o.builder.intType(@intCast(field_ty.bitSize(zcu)));
const small_int_val = if (field_ty.isPtrAtRuntime(zcu))
try self.wip.cast(.ptrtoint, non_int_val, small_int_ty, "")
else
try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
const small_int_val = try self.wip.cast(.bitcast, non_int_val, small_int_ty, "");
return self.wip.conv(.unsigned, small_int_val, int_llvm_ty, "");
}

View file

@ -2252,6 +2252,12 @@ pub const WipNav = struct {
.generic_decl_const,
.generic_decl_func,
=> true,
// This comes from a decl which was previously generated as an incomplete value
// (I think that must mean either a function or an extern which previously had
// incomplete types).
.undefined_comptime_value => false,
else => |t| std.debug.panic("bad decl abbrev code: {t}", .{t}),
};
if (parent_type.getCaptures(zcu).len == 0) {

View file

@ -113,7 +113,7 @@ pub fn print(
if (slice.len == .zero_usize) {
return writer.writeAll("&.{}");
}
try print(.fromInterned(slice.ptr), writer, level - 1, pt, opt_sema);
try print(.fromInterned(slice.ptr), writer, level, pt, opt_sema);
} else {
const print_contents = switch (ip.getBackingAddrTag(slice.ptr).?) {
.field, .arr_elem, .eu_payload, .opt_payload => unreachable,
@ -170,6 +170,9 @@ pub fn print(
}
},
.bitpack => |bitpack| {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const ty: Type = .fromInterned(bitpack.ty);
switch (ty.zigTypeTag(zcu)) {
.@"struct" => {
@ -464,18 +467,30 @@ pub fn printPtrDerivation(
.uav_ptr => |uav| {
const ty = Value.fromInterned(uav.val).typeOf(zcu);
try writer.print("@as({f}, ", .{ty.fmt(pt)});
try print(Value.fromInterned(uav.val), writer, x.level - 1, pt, x.opt_sema);
if (x.level == 0) {
try writer.writeAll("...");
} else {
try print(Value.fromInterned(uav.val), writer, x.level - 1, pt, x.opt_sema);
}
try writer.writeByte(')');
},
.comptime_alloc_ptr => |info| {
try writer.print("@as({f}, ", .{info.val.typeOf(zcu).fmt(pt)});
try print(info.val, writer, x.level - 1, pt, x.opt_sema);
if (x.level == 0) {
try writer.writeAll("...");
} else {
try print(info.val, writer, x.level - 1, pt, x.opt_sema);
}
try writer.writeByte(')');
},
.comptime_field_ptr => |val| {
const ty = val.typeOf(zcu);
try writer.print("@as({f}, ", .{ty.fmt(pt)});
try print(val, writer, x.level - 1, pt, x.opt_sema);
if (x.level == 0) {
try writer.writeAll("...");
} else {
try print(val, writer, x.level - 1, pt, x.opt_sema);
}
try writer.writeByte(')');
},
else => unreachable,