get the compiler building

The change in codegen/x86_64/CodeGen.zig was not strictly necessary (the
Sema change I did solves the error I was getting there), I just think
it's better style anyway.
This commit is contained in:
Matthew Lugg 2026-02-27 14:40:01 +00:00
parent f366564bd5
commit 111286565b
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
6 changed files with 59 additions and 31 deletions

View file

@ -4160,7 +4160,7 @@ pub const RUNTIME_FUNCTION = switch (native_arch) {
BeginAddress: DWORD,
DUMMYUNIONNAME: extern union {
UnwindData: DWORD,
DUMMYSTRUCTNAME: packed struct {
DUMMYSTRUCTNAME: packed struct(u32) {
Flag: u2,
FunctionLength: u11,
Ret: u2,
@ -4177,7 +4177,7 @@ pub const RUNTIME_FUNCTION = switch (native_arch) {
BeginAddress: DWORD,
DUMMYUNIONNAME: extern union {
UnwindData: DWORD,
DUMMYSTRUCTNAME: packed struct {
DUMMYSTRUCTNAME: packed struct(u32) {
Flag: u2,
FunctionLength: u11,
RegF: u3,
@ -5013,7 +5013,7 @@ pub const KUSER_SHARED_DATA = extern struct {
KdDebuggerEnabled: BOOLEAN,
DummyUnion1: extern union {
MitigationPolicies: UCHAR,
Alt: packed struct {
Alt: packed struct(u8) {
NXSupportPolicy: u2,
SEHValidationPolicy: u2,
CurDirDevicesSkippedForDlls: u2,
@ -5029,7 +5029,7 @@ pub const KUSER_SHARED_DATA = extern struct {
SafeBootMode: BOOLEAN,
DummyUnion2: extern union {
VirtualizationFlags: UCHAR,
Alt: packed struct {
Alt: packed struct(u8) {
ArchStartedInEl2: u1,
QcSlIsSupported: u1,
SpareBits: u6,
@ -5038,7 +5038,7 @@ pub const KUSER_SHARED_DATA = extern struct {
Reserved12: [2]UCHAR,
DummyUnion3: extern union {
SharedDataFlags: ULONG,
Alt: packed struct {
Alt: packed struct(u32) {
DbgErrorPortPresent: u1,
DbgElevationEnabled: u1,
DbgVirtEnabled: u1,

View file

@ -3081,7 +3081,7 @@ fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_tok;
const operand = sema.resolveInst(inst_data.operand);
return sema.analyzeRef(block, block.tokenOffset(inst_data.src_tok), operand);
return sema.analyzeRef(block, block.tokenOffset(inst_data.src_tok), operand, .none);
}
fn zirEnsureResultUsed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void {
@ -18690,7 +18690,7 @@ fn zirStructInit(
const union_val = try sema.bitCast(block, resolved_ty, init_inst, src, field_src);
const result_val = try sema.coerce(block, result_ty, union_val, src);
if (is_ref) {
return sema.analyzeRef(block, src, result_val);
return sema.analyzeRef(block, src, result_val, .none);
} else {
return result_val;
}
@ -24192,7 +24192,7 @@ fn zirMemcpy(
}
} else if (dest_len == .none and len_val == null) {
// Change the dest to a slice, since its type must have the length.
const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr);
const dest_ptr_ptr = try sema.analyzeRef(block, dest_src, new_dest_ptr, .none);
new_dest_ptr = try sema.analyzeSlice(block, dest_src, dest_ptr_ptr, .zero, src_len, .none, LazySrcLoc.unneeded, dest_src, dest_src, dest_src, false);
const new_src_ptr_ty = sema.typeOf(new_src_ptr);
if (new_src_ptr_ty.isSlice(zcu)) {
@ -26301,7 +26301,7 @@ fn structFieldPtr(
const field_index: u32 = if (struct_ty.isTuple(zcu)) field_index: {
if (field_name.eqlSlice("len", ip)) {
const len_inst = try pt.intRef(.usize, struct_ty.structFieldCount(zcu));
return sema.analyzeRef(block, src, len_inst);
return sema.analyzeRef(block, src, len_inst, .none);
}
break :field_index try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src);
} else field_index: {
@ -29787,10 +29787,7 @@ fn coerceTupleToSlicePtrs(
.child = slice_info.child,
});
const array_inst = try sema.coerceTupleToArray(block, array_ty, slice_ty_src, tuple, tuple_src);
if (slice_info.flags.alignment != .none) {
return sema.fail(block, slice_ty_src, "TODO: override the alignment of the array decl we create here", .{});
}
const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst);
const ptr_array = try sema.analyzeRef(block, slice_ty_src, array_inst, slice_info.flags.alignment);
return sema.coerceArrayPtrToSlice(block, slice_ty, ptr_array, slice_ty_src);
}
@ -29809,10 +29806,7 @@ fn coerceTupleToArrayPtrs(
const ptr_info = ptr_array_ty.ptrInfo(zcu);
const array_ty: Type = .fromInterned(ptr_info.child);
const array_inst = try sema.coerceTupleToArray(block, array_ty, array_ty_src, tuple, tuple_src);
if (ptr_info.flags.alignment != .none) {
return sema.fail(block, array_ty_src, "TODO: override the alignment of the array decl we create here", .{});
}
const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst);
const ptr_array = try sema.analyzeRef(block, array_ty_src, array_inst, ptr_info.flags.alignment);
return ptr_array;
}
@ -30137,33 +30131,47 @@ fn analyzeRef(
block: *Block,
src: LazySrcLoc,
operand: Air.Inst.Ref,
alignment: Alignment,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const operand_ty = sema.typeOf(operand);
const address_space = target_util.defaultAddressSpace(zcu.getTarget(), .local);
const ptr_type = try pt.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{
.alignment = alignment,
.is_const = true,
.address_space = address_space,
},
});
if (sema.resolveValue(operand)) |val| {
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.@"extern" => |e| return sema.analyzeNavRef(block, src, e.owner_nav),
.func => |f| return sema.analyzeNavRef(block, src, f.owner_nav),
else => return uavRef(sema, val),
else => return .fromIntern(try pt.intern(.{ .ptr = .{
.ty = ptr_type.toIntern(),
.base_addr = .{ .uav = .{
.val = val.toIntern(),
.orig_ty = ptr_type.toIntern(),
} },
.byte_offset = 0,
} })),
}
}
// No `requireRuntimeBlock`; it's okay to `ref` to a runtime value in a comptime context,
// it's just that we can only use the *type* of the result, since the value is runtime-known.
const address_space = target_util.defaultAddressSpace(zcu.getTarget(), .local);
const ptr_type = try pt.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{
.is_const = true,
.address_space = address_space,
},
});
const mut_ptr_type = try pt.ptrType(.{
.child = operand_ty.toIntern(),
.flags = .{ .address_space = address_space },
.flags = .{
.alignment = alignment,
.is_const = false,
.address_space = address_space,
},
});
const alloc = try block.addTy(.alloc, mut_ptr_type);

View file

@ -147,7 +147,7 @@ pub fn ensureStructDefaultsResolved(sema: *Sema, ty: Type, src: LazySrcLoc) Sema
const ip = &zcu.intern_pool;
assert(ip.indexToKey(ty.toIntern()) == .struct_type);
if (zcu.comp.config.incremental) assert(sema.dependencies.contains(.{ .type_layout = ty.toIntern() }));
ty.assertHasLayout(zcu);
try sema.declareDependency(.{ .struct_defaults = ty.toIntern() });
try sema.addReferenceEntry(null, src, .wrap(.{ .struct_defaults = ty.toIntern() }));

View file

@ -4112,7 +4112,27 @@ pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Alloca
pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value {
if (std.debug.runtime_safety) {
assert(ty.classify(pt.zcu) != .one_possible_value);
// TODO: values of type `struct { comptime x: u8 = undefined }` are currently represented as
// undef. This is wrong: they should really be represented as empty aggregates instead,
// because `comptime` fields shouldn't factor into that decision! This is implemented
// through logic in `aggregateValue` and requires this weird workaround in what ought to be
// a straightforward assertion:
//assert(ty.classify(pt.zcu) != .one_possible_value);
if (ty.classify(pt.zcu) == .one_possible_value) {
const ip = &pt.zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
else => unreachable, // assertion failure
.struct_type => {
const comptime_bits = ip.loadStructType(ty.toIntern()).field_is_comptime_bits.getAll(ip);
for (comptime_bits) |bag| {
if (@popCount(bag) > 0) break;
} else unreachable; // assertion failure
},
.tuple_type => |tuple| for (tuple.values.get(ip)) |val| {
if (val != .none) break;
} else unreachable, // assertion failure
}
}
}
return .fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
}

View file

@ -689,7 +689,7 @@ fn constInt(cg: *CodeGen, ty: Type, value: anytype) !Id {
.comptime_int => if (value < 0) .signed else .unsigned,
else => unreachable,
};
if (@sizeOf(@TypeOf(value)) >= 4 and big_int) {
if (@TypeOf(value) != comptime_int and @sizeOf(@TypeOf(value)) >= 4 and big_int) {
const value64: u64 = switch (signedness) {
.signed => @bitCast(@as(i64, @intCast(value))),
.unsigned => @as(u64, @intCast(value)),

View file

@ -181111,7 +181111,7 @@ fn resolveCallingConventionValues(
var ret_sse = abi.getCAbiSseReturnRegs(cc);
var ret_x87 = abi.getCAbiX87ReturnRegs(cc);
const classes = switch (cc) {
const classes: []const abi.Class = switch (cc) {
.x86_64_sysv => std.mem.sliceTo(&abi.classifySystemV(ret_ty, zcu, cg.target, .ret), .none),
.x86_64_win => &.{abi.classifyWindows(ret_ty, zcu, cg.target, .ret)},
else => unreachable,