type resolution progress

This commit is contained in:
Matthew Lugg 2026-01-15 14:01:15 +00:00
parent 3502ddb1c7
commit eae122b3aa
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
29 changed files with 7359 additions and 11371 deletions

View file

@ -924,7 +924,12 @@ pub const Mutable = struct {
/// Asserts the result fits in `r`. Upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(bit_count)`.
pub fn bitReverse(r: *Mutable, a: Const, signedness: Signedness, bit_count: usize) void {
if (bit_count == 0) return;
if (bit_count == 0) {
r.limbs[0] = 0;
r.len = 1;
r.positive = true;
return;
}
r.copy(a);
@ -986,7 +991,12 @@ pub const Mutable = struct {
/// Asserts the result fits in `r`. Upper bound on the number of limbs needed by
/// r is `calcTwosCompLimbCount(8*byte_count)`.
pub fn byteSwap(r: *Mutable, a: Const, signedness: Signedness, byte_count: usize) void {
if (byte_count == 0) return;
if (byte_count == 0) {
r.limbs[0] = 0;
r.len = 1;
r.positive = true;
return;
}
r.copy(a);
const limbs_required = calcTwosCompLimbCount(8 * byte_count);

View file

@ -3710,7 +3710,7 @@ pub const Inst = struct {
};
}
pub fn layout(k: Kind) std.builtin.ContainerLayout {
pub fn layout(k: Kind) std.builtin.Type.ContainerLayout {
return switch (k) {
.auto, .tagged_explicit, .tagged_enum, .tagged_enum_explicit => .auto,
.@"extern" => .@"extern",
@ -4008,20 +4008,6 @@ pub const Inst = struct {
};
};
/// MLUGG TODO: delete this!
pub const DeclIterator = struct {
decls: []const Inst.Index,
index: usize,
pub fn next(it: *DeclIterator) ?Inst.Index {
if (it.index == it.decls.len) return null;
defer it.index += 1;
return it.decls[it.index];
}
};
pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
return .{ .decls = zir.typeDecls(decl_inst), .index = 0 };
}
/// `DeclContents` contains all "interesting" instructions found within a declaration by `findTrackable`.
/// These instructions are partitioned into a few different sets, since this makes ZIR instruction mapping
/// more effective.

View file

@ -502,8 +502,7 @@ pub fn intByteSize(target: *const std.Target, bits: u16) u16 {
pub fn intAlignment(target: *const std.Target, bits: u16) u16 {
return switch (target.cpu.arch) {
.x86 => switch (bits) {
0 => 0,
1...8 => 1,
0...8 => 1,
9...16 => 2,
17...32 => 4,
33...64 => switch (target.os.tag) {
@ -513,8 +512,7 @@ pub fn intAlignment(target: *const std.Target, bits: u16) u16 {
else => 16,
},
.x86_64 => switch (bits) {
0 => 0,
1...8 => 1,
0...8 => 1,
9...16 => 2,
17...32 => 4,
33...64 => 8,

View file

@ -14,7 +14,6 @@ const Type = @import("Type.zig");
const Value = @import("Value.zig");
const Zcu = @import("Zcu.zig");
const print = @import("Air/print.zig");
const types_resolved = @import("Air/types_resolved.zig");
pub const Legalize = @import("Air/Legalize.zig");
pub const Liveness = @import("Air/Liveness.zig");
@ -173,8 +172,8 @@ pub const Inst = struct {
/// outside the provenance of the operand, the result is undefined.
///
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
/// rhs is the offset. Result type is the same as lhs. The operand may
/// be a slice.
/// rhs is the offset. Result type is the same as lhs. The operand type's
/// pointer size may be `.slice`, `.many`, or `.c`.
ptr_add,
/// Subtract an offset, in element type units, from a pointer,
/// returning a new pointer. Element type may not be zero bits.
@ -183,8 +182,8 @@ pub const Inst = struct {
/// outside the provenance of the operand, the result is undefined.
///
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
/// rhs is the offset. Result type is the same as lhs. The operand may
/// be a slice.
/// rhs is the offset. Result type is the same as lhs. The operand type's
/// pointer size may be `.slice`, `.many`, or `.c`.
ptr_sub,
/// Given two operands which can be floats, integers, or vectors, returns the
/// greater of the operands. For vectors it operates element-wise.
@ -693,6 +692,7 @@ pub const Inst = struct {
/// Uses the `ty_pl` field with payload `Bin`.
slice_elem_ptr,
/// Given a pointer value, and element index, return the element value at that index.
/// The pointer size is either `.c` or `.many`.
/// Result type is the element type of the pointer operand.
/// Uses the `bin_op` field.
ptr_elem_val,
@ -2440,9 +2440,6 @@ pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index
};
}
pub const typesFullyResolved = types_resolved.typesFullyResolved;
pub const typeFullyResolved = types_resolved.checkType;
pub const valFullyResolved = types_resolved.checkVal;
pub const legalize = Legalize.legalize;
pub const write = print.write;
pub const writeInst = print.writeInst;

View file

@ -1,536 +0,0 @@
const Air = @import("../Air.zig");
const Zcu = @import("../Zcu.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const InternPool = @import("../InternPool.zig");
/// Given a body of AIR instructions, returns whether all type resolution necessary for codegen is complete.
/// If `false`, then type resolution must have failed, so codegen cannot proceed.
pub fn typesFullyResolved(air: Air, zcu: *Zcu) bool {
return checkBody(air, air.getMainBody(), zcu);
}
fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
const tags = air.instructions.items(.tag);
const datas = air.instructions.items(.data);
for (body) |inst| {
const data = datas[@intFromEnum(inst)];
switch (tags[@intFromEnum(inst)]) {
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.arg => {
if (!checkType(data.arg.ty.toType(), zcu)) return false;
},
.add,
.add_safe,
.add_optimized,
.add_wrap,
.add_sat,
.sub,
.sub_safe,
.sub_optimized,
.sub_wrap,
.sub_sat,
.mul,
.mul_safe,
.mul_optimized,
.mul_wrap,
.mul_sat,
.div_float,
.div_float_optimized,
.div_trunc,
.div_trunc_optimized,
.div_floor,
.div_floor_optimized,
.div_exact,
.div_exact_optimized,
.rem,
.rem_optimized,
.mod,
.mod_optimized,
.max,
.min,
.bit_and,
.bit_or,
.shr,
.shr_exact,
.shl,
.shl_exact,
.shl_sat,
.xor,
.cmp_lt,
.cmp_lt_optimized,
.cmp_lte,
.cmp_lte_optimized,
.cmp_eq,
.cmp_eq_optimized,
.cmp_gte,
.cmp_gte_optimized,
.cmp_gt,
.cmp_gt_optimized,
.cmp_neq,
.cmp_neq_optimized,
.bool_and,
.bool_or,
.store,
.store_safe,
.set_union_tag,
.array_elem_val,
.slice_elem_val,
.ptr_elem_val,
.memset,
.memset_safe,
.memcpy,
.memmove,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.legalize_vec_elem_val,
=> {
if (!checkRef(data.bin_op.lhs, zcu)) return false;
if (!checkRef(data.bin_op.rhs, zcu)) return false;
},
.not,
.bitcast,
.clz,
.ctz,
.popcount,
.byte_swap,
.bit_reverse,
.abs,
.load,
.fptrunc,
.fpext,
.intcast,
.intcast_safe,
.trunc,
.optional_payload,
.optional_payload_ptr,
.optional_payload_ptr_set,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.errunion_payload_ptr_set,
.wrap_errunion_payload,
.wrap_errunion_err,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.get_union_tag,
.slice_len,
.slice_ptr,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.array_to_slice,
.int_from_float,
.int_from_float_optimized,
.int_from_float_safe,
.int_from_float_optimized_safe,
.float_from_int,
.splat,
.error_set_has_value,
.addrspace_cast,
.c_va_arg,
.c_va_copy,
=> {
if (!checkType(data.ty_op.ty.toType(), zcu)) return false;
if (!checkRef(data.ty_op.operand, zcu)) return false;
},
.alloc,
.ret_ptr,
.c_va_start,
=> {
if (!checkType(data.ty, zcu)) return false;
},
.ptr_add,
.ptr_sub,
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.slice,
.slice_elem_ptr,
.ptr_elem_ptr,
=> {
const bin = air.extraData(Air.Bin, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.block,
.loop,
=> {
const block = air.unwrapBlock(inst);
if (!checkType(block.ty, zcu)) return false;
if (!checkBody(
air,
block.body,
zcu,
)) return false;
},
.dbg_inline_block => {
const block = air.unwrapDbgBlock(inst);
if (!checkType(block.ty, zcu)) return false;
if (!checkBody(
air,
block.body,
zcu,
)) return false;
},
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.floor,
.ceil,
.round,
.trunc_float,
.neg,
.neg_optimized,
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.ret,
.ret_safe,
.ret_load,
.is_named_enum_value,
.tag_name,
.error_name,
.cmp_lt_errors_len,
.c_va_end,
.set_err_return_trace,
=> {
if (!checkRef(data.un_op, zcu)) return false;
},
.br, .switch_dispatch => {
if (!checkRef(data.br.operand, zcu)) return false;
},
.cmp_vector,
.cmp_vector_optimized,
=> {
const extra = air.extraData(Air.VectorCmp, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.lhs, zcu)) return false;
if (!checkRef(extra.rhs, zcu)) return false;
},
.reduce,
.reduce_optimized,
=> {
if (!checkRef(data.reduce.operand, zcu)) return false;
},
.struct_field_ptr,
.struct_field_val,
=> {
const extra = air.extraData(Air.StructField, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.struct_operand, zcu)) return false;
},
.shuffle_one => {
const unwrapped = air.unwrapShuffleOne(zcu, inst);
if (!checkType(unwrapped.result_ty, zcu)) return false;
if (!checkRef(unwrapped.operand, zcu)) return false;
for (unwrapped.mask) |m| switch (m.unwrap()) {
.elem => {},
.value => |val| if (!checkVal(.fromInterned(val), zcu)) return false,
};
},
.shuffle_two => {
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
if (!checkType(unwrapped.result_ty, zcu)) return false;
if (!checkRef(unwrapped.operand_a, zcu)) return false;
if (!checkRef(unwrapped.operand_b, zcu)) return false;
// No values to check because there are no comptime-known values other than undef
},
.cmpxchg_weak,
.cmpxchg_strong,
=> {
const extra = air.extraData(Air.Cmpxchg, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.ptr, zcu)) return false;
if (!checkRef(extra.expected_value, zcu)) return false;
if (!checkRef(extra.new_value, zcu)) return false;
},
.aggregate_init => {
const ty = data.ty_pl.ty.toType();
const elems_len: usize = @intCast(ty.arrayLen(zcu));
const elems: []const Air.Inst.Ref = @ptrCast(air.extra.items[data.ty_pl.payload..][0..elems_len]);
if (!checkType(ty, zcu)) return false;
if (ty.zigTypeTag(zcu) == .@"struct") {
for (elems, 0..) |elem, elem_idx| {
if (ty.structFieldIsComptime(elem_idx, zcu)) continue;
if (!checkRef(elem, zcu)) return false;
}
} else {
for (elems) |elem| {
if (!checkRef(elem, zcu)) return false;
}
}
},
.union_init => {
const extra = air.extraData(Air.UnionInit, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.init, zcu)) return false;
},
.field_parent_ptr => {
const extra = air.extraData(Air.FieldParentPtr, data.ty_pl.payload).data;
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.field_ptr, zcu)) return false;
},
.atomic_load => {
if (!checkRef(data.atomic_load.ptr, zcu)) return false;
},
.prefetch => {
if (!checkRef(data.prefetch.ptr, zcu)) return false;
},
.runtime_nav_ptr => {
if (!checkType(.fromInterned(data.ty_nav.ty), zcu)) return false;
},
.select,
.mul_add,
.legalize_vec_store_elem,
=> {
const bin = air.extraData(Air.Bin, data.pl_op.payload).data;
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.atomic_rmw => {
const extra = air.extraData(Air.AtomicRmw, data.pl_op.payload).data;
if (!checkRef(data.pl_op.operand, zcu)) return false;
if (!checkRef(extra.operand, zcu)) return false;
},
.call,
.call_always_tail,
.call_never_tail,
.call_never_inline,
=> {
const call = air.unwrapCall(inst);
const args = call.args;
if (!checkRef(call.callee, zcu)) return false;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},
.dbg_var_ptr,
.dbg_var_val,
.dbg_arg_inline,
=> {
if (!checkRef(data.pl_op.operand, zcu)) return false;
},
.@"try", .try_cold => {
const unwrapped_try = air.unwrapTry(inst);
if (!checkRef(unwrapped_try.error_union, zcu)) return false;
if (!checkBody(
air,
unwrapped_try.else_body,
zcu,
)) return false;
},
.try_ptr, .try_ptr_cold => {
const unwrapped_try = air.unwrapTryPtr(inst);
if (!checkType(unwrapped_try.error_union_payload_ptr_ty.toType(), zcu)) return false;
if (!checkRef(unwrapped_try.error_union_ptr, zcu)) return false;
if (!checkBody(
air,
unwrapped_try.else_body,
zcu,
)) return false;
},
.cond_br => {
const cond_br = air.unwrapCondBr(inst);
if (!checkRef(cond_br.condition, zcu)) return false;
if (!checkBody(
air,
cond_br.then_body,
zcu,
)) return false;
if (!checkBody(
air,
cond_br.else_body,
zcu,
)) return false;
},
.switch_br, .loop_switch_br => {
const switch_br = air.unwrapSwitch(inst);
if (!checkRef(switch_br.operand, zcu)) return false;
var it = switch_br.iterateCases();
while (it.next()) |case| {
for (case.items) |item| if (!checkRef(item, zcu)) return false;
for (case.ranges) |range| {
if (!checkRef(range[0], zcu)) return false;
if (!checkRef(range[1], zcu)) return false;
}
if (!checkBody(air, case.body, zcu)) return false;
}
if (!checkBody(air, it.elseBody(), zcu)) return false;
},
.assembly => {
const unwrapped_asm = air.unwrapAsm(inst);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
// Luckily, we only care about the inputs and outputs, so we don't have to do
// the whole null-terminated string dance.
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false;
for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false;
},
.legalize_compiler_rt_call => {
const rt_call = air.unwrapCompilerRtCall(inst);
const args = rt_call.args;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},
.trap,
.breakpoint,
.ret_addr,
.frame_addr,
.unreach,
.wasm_memory_size,
.wasm_memory_grow,
.work_item_id,
.work_group_size,
.work_group_id,
.dbg_stmt,
.dbg_empty_stmt,
.err_return_trace,
.save_err_return_trace_index,
.repeat,
=> {},
}
}
return true;
}
fn checkRef(ref: Air.Inst.Ref, zcu: *Zcu) bool {
const ip_index = ref.toInterned() orelse {
// This operand refers back to a previous instruction.
// We have already checked that instruction's type.
// So, there's no need to check this operand's type.
return true;
};
return checkVal(Value.fromInterned(ip_index), zcu);
}
pub fn checkVal(val: Value, zcu: *Zcu) bool {
const ty = val.typeOf(zcu);
if (!checkType(ty, zcu)) return false;
if (val.isUndef(zcu)) return true;
if (ty.toIntern() == .type_type and !checkType(val.toType(), zcu)) return false;
// Check for lazy values
switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => return true,
.lazy_align, .lazy_size => |ty_index| {
return checkType(Type.fromInterned(ty_index), zcu);
},
},
else => return true,
}
}
pub fn checkType(ty: Type, zcu: *Zcu) bool {
const ip = &zcu.intern_pool;
if (ty.isGenericPoison()) return true;
return switch (ty.zigTypeTag(zcu)) {
.type,
.void,
.bool,
.noreturn,
.int,
.float,
.error_set,
.@"enum",
.@"opaque",
.vector,
// These types can appear due to some dummy instructions Sema introduces and expects to be omitted by Liveness.
// It's a little silly -- but fine, we'll return `true`.
.comptime_float,
.comptime_int,
.undefined,
.null,
.enum_literal,
=> true,
.frame,
.@"anyframe",
=> @panic("TODO Air.types_resolved.checkType async frames"),
.optional => checkType(ty.childType(zcu), zcu),
.error_union => checkType(ty.errorUnionPayload(zcu), zcu),
.pointer => checkType(ty.childType(zcu), zcu),
.array => checkType(ty.childType(zcu), zcu),
.@"fn" => {
const info = zcu.typeToFunc(ty).?;
for (0..info.param_types.len) |i| {
const param_ty = info.param_types.get(ip)[i];
if (!checkType(Type.fromInterned(param_ty), zcu)) return false;
}
return checkType(Type.fromInterned(info.return_type), zcu);
},
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
const struct_obj = zcu.typeToStruct(ty).?;
return switch (struct_obj.layout) {
.@"packed" => struct_obj.backingIntTypeUnordered(ip) != .none,
.auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved,
};
},
.tuple_type => |tuple| {
for (0..tuple.types.len) |i| {
const field_is_comptime = tuple.values.get(ip)[i] != .none;
if (field_is_comptime) continue;
const field_ty = tuple.types.get(ip)[i];
if (!checkType(Type.fromInterned(field_ty), zcu)) return false;
}
return true;
},
else => unreachable,
},
.@"union" => return zcu.typeToUnion(ty).?.flagsUnordered(ip).status == .fully_resolved,
};
}

View file

@ -126,15 +126,7 @@ oneshot_prelink_tasks: std.ArrayList(link.PrelinkTask),
/// work is queued or not.
queued_jobs: QueuedJobs,
work_queues: [
len: {
var len: usize = 0;
for (std.enums.values(Job.Tag)) |tag| {
len = @max(Job.stage(tag) + 1, len);
}
break :len len;
}
]std.Deque(Job),
work_queues: [2]std.Deque(Job),
/// These jobs are to invoke the Clang compiler to create an object file, which
/// gets linked with the Compilation.
@ -990,35 +982,27 @@ const Job = union(enum) {
update_line_number: InternPool.TrackedInst.Index,
/// The `AnalUnit`, which is *not* a `func`, must be semantically analyzed.
/// This may be its first time being analyzed, or it may be outdated.
/// If the unit is a test function, an `analyze_func` job will then be queued.
analyze_comptime_unit: InternPool.AnalUnit,
/// This function must be semantically analyzed.
/// This may be its first time being analyzed, or it may be outdated.
/// After analysis, a `codegen_func` job will be queued.
/// These must be separate jobs to ensure any needed type resolution occurs *before* codegen.
/// This job is separate from `analyze_comptime_unit` because it has a different priority.
analyze_func: InternPool.Index,
/// If the unit is a function, a `codegen_func` job will be queued after analysis completes.
/// If the unit is a *test* function, an `analyze_func` job will also be queued.
analyze_unit: InternPool.AnalUnit,
/// The main source file for the module needs to be analyzed.
analyze_mod: *Package.Module,
/// Fully resolve the given `struct` or `union` type.
resolve_type_fully: InternPool.Index,
/// The value is the index into `windows_libs`.
windows_import_lib: usize,
const Tag = @typeInfo(Job).@"union".tag_type.?;
fn stage(tag: Tag) usize {
return switch (tag) {
// Prioritize functions so that codegen can get to work on them on a
// separate thread, while Sema goes back to its own work.
.resolve_type_fully, .analyze_func, .codegen_func => 0,
fn stage(job: *const Job) usize {
// Prioritize functions so that codegen can get to work on them on a
// separate thread, while Sema goes back to its own work.
return switch (job.*) {
.codegen_func => 0,
.analyze_unit => |unit| switch (unit.unwrap()) {
.func => 0,
else => 1,
},
else => 1,
};
}
comptime {
// Job dependencies
assert(stage(.resolve_type_fully) <= stage(.codegen_func));
}
};
pub const CObject = struct {
@ -3728,7 +3712,9 @@ const Header = extern struct {
src_hash_deps_len: u32,
nav_val_deps_len: u32,
nav_ty_deps_len: u32,
interned_deps_len: u32,
type_layout_deps_len: u32,
type_inits_deps_len: u32,
func_ies_deps_len: u32,
zon_file_deps_len: u32,
embed_file_deps_len: u32,
namespace_deps_len: u32,
@ -3776,7 +3762,9 @@ pub fn saveState(comp: *Compilation) !void {
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
.nav_val_deps_len = @intCast(ip.nav_val_deps.count()),
.nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()),
.interned_deps_len = @intCast(ip.interned_deps.count()),
.type_layout_deps_len = @intCast(ip.type_layout_deps.count()),
.type_inits_deps_len = @intCast(ip.type_inits_deps.count()),
.func_ies_deps_len = @intCast(ip.func_ies_deps.count()),
.zon_file_deps_len = @intCast(ip.zon_file_deps.count()),
.embed_file_deps_len = @intCast(ip.embed_file_deps.count()),
.namespace_deps_len = @intCast(ip.namespace_deps.count()),
@ -3800,7 +3788,7 @@ pub fn saveState(comp: *Compilation) !void {
},
});
try bufs.ensureTotalCapacityPrecise(22 + 9 * pt_headers.items.len);
try bufs.ensureTotalCapacityPrecise(26 + 9 * pt_headers.items.len);
addBuf(&bufs, mem.asBytes(&header));
addBuf(&bufs, @ptrCast(pt_headers.items));
@ -3810,8 +3798,12 @@ pub fn saveState(comp: *Compilation) !void {
addBuf(&bufs, @ptrCast(ip.nav_val_deps.values()));
addBuf(&bufs, @ptrCast(ip.nav_ty_deps.keys()));
addBuf(&bufs, @ptrCast(ip.nav_ty_deps.values()));
addBuf(&bufs, @ptrCast(ip.interned_deps.keys()));
addBuf(&bufs, @ptrCast(ip.interned_deps.values()));
addBuf(&bufs, @ptrCast(ip.type_layout_deps.keys()));
addBuf(&bufs, @ptrCast(ip.type_layout_deps.values()));
addBuf(&bufs, @ptrCast(ip.type_inits_deps.keys()));
addBuf(&bufs, @ptrCast(ip.type_inits_deps.values()));
addBuf(&bufs, @ptrCast(ip.func_ies_deps.keys()));
addBuf(&bufs, @ptrCast(ip.func_ies_deps.values()));
addBuf(&bufs, @ptrCast(ip.zon_file_deps.keys()));
addBuf(&bufs, @ptrCast(ip.zon_file_deps.values()));
addBuf(&bufs, @ptrCast(ip.embed_file_deps.keys()));
@ -4489,7 +4481,7 @@ pub fn addModuleErrorMsg(
const root_name: ?[]const u8 = switch (ref.referencer.unwrap()) {
.@"comptime" => "comptime",
.nav_val, .nav_ty => |nav| ip.getNav(nav).name.toSlice(ip),
.type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.type_layout, .type_inits => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip),
.memoized_state => null,
};
@ -4900,15 +4892,7 @@ fn performAllTheWork(
// If there's no work queued, check if there's anything outdated
// which we need to work on, and queue it if so.
if (try zcu.findOutdatedToAnalyze()) |outdated| {
try comp.queueJob(switch (outdated.unwrap()) {
.func => |f| .{ .analyze_func = f },
.memoized_state,
.@"comptime",
.nav_ty,
.nav_val,
.type,
=> .{ .analyze_comptime_unit = outdated },
});
try comp.queueJob(.{ .analyze_unit = outdated });
continue;
}
zcu.sema_prog_node.end();
@ -5151,7 +5135,7 @@ fn dispatchPrelinkWork(comp: *Compilation, main_progress_node: std.Progress.Node
const JobError = Allocator.Error || Io.Cancelable;
pub fn queueJob(comp: *Compilation, job: Job) !void {
try comp.work_queues[Job.stage(job)].pushBack(comp.gpa, job);
try comp.work_queues[job.stage()].pushBack(comp.gpa, job);
}
pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
@ -5166,13 +5150,24 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
var owned_air: ?Air = func.air;
defer if (owned_air) |*air| air.deinit(gpa);
if (!owned_air.?.typesFullyResolved(zcu)) {
// Type resolution failed in a way which affects this function. This is a transitive
// failure, but it doesn't need recording, because this function semantically depends
// on the failed type, so when it is changed the function is updated.
zcu.codegen_prog_node.completeOne();
comp.link_prog_node.completeOne();
return;
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveAirTypesForCodegen(&owned_air.?) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this function impossible. This
// is a transitive failure, but it doesn't need recording, because this
// function semantically depends on the failed type, so when it is changed
// the function will be updated.
zcu.codegen_prog_node.completeOne();
comp.link_prog_node.completeOne();
return;
},
};
}
// Some linkers need to refer to the AIR. In that case, the linker is not running
@ -5198,45 +5193,54 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
}
}
assert(nav.status == .fully_resolved);
if (!Air.valFullyResolved(zcu.navValue(nav_index), zcu)) {
// Type resolution failed in a way which affects this `Nav`. This is a transitive
// failure, but it doesn't need recording, because this `Nav` semantically depends
// on the failed type, so when it is changed the `Nav` will be updated.
comp.link_prog_node.completeOne();
return;
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveValueTypesForCodegen(zcu.navValue(nav_index)) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this `Nav` impossible. This is
// a transitive failure, but it doesn't need recording, because this `Nav`
// semantically depends on the failed type, so when it is changed the value
// of the `Nav` will be updated.
comp.link_prog_node.completeOne();
return;
},
};
}
try comp.link_queue.enqueueZcu(comp, tid, .{ .link_nav = nav_index });
},
.link_type => |ty| {
const zcu = comp.zcu.?;
if (zcu.failed_types.fetchSwapRemove(ty)) |*entry| entry.value.deinit(zcu.gpa);
if (!Air.typeFullyResolved(.fromInterned(ty), zcu)) {
// Type resolution failed in a way which affects this type. This is a transitive
// failure, but it doesn't need recording, because this type semantically depends
// on the failed type, so when that is changed, this type will be updated.
comp.link_prog_node.completeOne();
return;
{
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
defer pt.deactivate();
pt.resolveTypeForCodegen(.fromInterned(ty)) catch |err| switch (err) {
error.OutOfMemory,
error.Canceled,
=> |e| return e,
error.AnalysisFail => {
// Type resolution failed, making codegen of this type impossible. This is
// a transitive failure, but it doesn't need recording, because this type
// semantically depends on the failed type, so when it is changed the type
// will be updated appropriately.
comp.link_prog_node.completeOne();
return;
},
};
}
try comp.link_queue.enqueueZcu(comp, tid, .{ .link_type = ty });
},
.update_line_number => |tracked_inst| {
try comp.link_queue.enqueueZcu(comp, tid, .{ .update_line_number = tracked_inst });
},
.analyze_func => |func| {
const tracy_trace = traceNamed(@src(), "analyze_func");
defer tracy_trace.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
pt.ensureFuncBodyUpToDate(func) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.Canceled => |e| return e,
error.AnalysisFail => return,
};
},
.analyze_comptime_unit => |unit| {
const tracy_trace = traceNamed(@src(), "analyze_comptime_unit");
.analyze_unit => |unit| {
const tracy_trace = traceNamed(@src(), "analyze_unit");
defer tracy_trace.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
@ -5246,9 +5250,10 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
.@"comptime" => |cu| pt.ensureComptimeUnitUpToDate(cu),
.nav_ty => |nav| pt.ensureNavTypeUpToDate(nav),
.nav_val => |nav| pt.ensureNavValUpToDate(nav),
.type => |ty| if (pt.ensureTypeUpToDate(ty)) |_| {} else |err| err,
.type_layout => |ty| pt.ensureTypeLayoutUpToDate(.fromInterned(ty)),
.type_inits => |ty| pt.ensureTypeInitsUpToDate(.fromInterned(ty)),
.memoized_state => |stage| pt.ensureMemoizedStateUpToDate(stage),
.func => unreachable,
.func => |func| pt.ensureFuncBodyUpToDate(func),
};
maybe_err catch |err| switch (err) {
error.OutOfMemory => |e| return e,
@ -5275,27 +5280,15 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
try pt.zcu.ensureFuncBodyAnalysisQueued(ip.getNav(nav).status.fully_resolved.val);
}
},
.resolve_type_fully => |ty| {
const tracy_trace = traceNamed(@src(), "resolve_type_fully");
defer tracy_trace.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
error.OutOfMemory, error.Canceled => |e| return e,
error.AnalysisFail => return,
};
},
.analyze_mod => |mod| {
const tracy_trace = traceNamed(@src(), "analyze_mod");
defer tracy_trace.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
pt.semaMod(mod) catch |err| switch (err) {
error.OutOfMemory, error.Canceled => |e| return e,
error.AnalysisFail => return,
};
const mod_root_file = pt.zcu.module_roots.get(mod).?.unwrap().?;
try pt.ensureFileAnalyzed(mod_root_file);
},
.windows_import_lib => |index| {
const tracy_trace = traceNamed(@src(), "windows_import_lib");

View file

@ -306,12 +306,8 @@ fn handleCommand(zcu: *Zcu, w: *Io.Writer, cmd_str: []const u8, arg_str: []const
try w.print("[{d}] ", .{i});
switch (dependee) {
.src_hash, .namespace, .namespace_name, .zon_file, .embed_file => try w.print("{f}", .{zcu.fmtDependee(dependee)}),
.nav_val, .nav_ty => |nav| try w.print("{s} {d}", .{ @tagName(dependee), @intFromEnum(nav) }),
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.struct_type, .union_type, .enum_type => try w.print("type {d}", .{@intFromEnum(ip_index)}),
.func => try w.print("func {d}", .{@intFromEnum(ip_index)}),
else => unreachable,
},
.nav_val, .nav_ty => |nav| try w.print("{t} {d}", .{ dependee, @intFromEnum(nav) }),
.type_layout, .type_inits, .func_ies => |ip_index| try w.print("{t} {d}", .{ dependee, @intFromEnum(ip_index) }),
.memoized_state => |stage| try w.print("memoized_state {s}", .{@tagName(stage)}),
}
try w.writeByte('\n');
@ -376,8 +372,10 @@ fn parseAnalUnit(str: []const u8) ?AnalUnit {
return .wrap(.{ .nav_val = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "nav_ty")) {
return .wrap(.{ .nav_ty = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "type")) {
return .wrap(.{ .type = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "type_layout")) {
return .wrap(.{ .type_layout = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "type_inits")) {
return .wrap(.{ .type_inits = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "func")) {
return .wrap(.{ .func = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "memoized_state")) {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -125,6 +125,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
return (try pt.aggregateValue(.fromInterned(ty), values)).toIntern();
},
.struct_literal => |init| {
if (true) @panic("MLUGG TODO");
const elems = try self.sema.arena.alloc(InternPool.Index, init.names.len);
for (0..init.names.len) |i| {
elems[i] = try self.lowerExprAnonResTy(init.vals.at(@intCast(i)));
@ -299,7 +300,7 @@ fn checkTypeInner(
} else {
const gop = try visited.getOrPut(sema.arena, ty.toIntern());
if (gop.found_existing) return;
try ty.resolveFields(pt);
try sema.ensureLayoutResolved(ty);
const struct_info = zcu.typeToStruct(ty).?;
for (struct_info.field_types.get(ip)) |field_type| {
try self.checkTypeInner(.fromInterned(field_type), null, visited);
@ -308,7 +309,7 @@ fn checkTypeInner(
.@"union" => {
const gop = try visited.getOrPut(sema.arena, ty.toIntern());
if (gop.found_existing) return;
try ty.resolveFields(pt);
try sema.ensureLayoutResolved(ty);
const union_info = zcu.typeToUnion(ty).?;
for (union_info.field_types.get(ip)) |field_type| {
if (field_type != .void_type) {
@ -767,8 +768,8 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
const io = comp.io;
const ip = &pt.zcu.intern_pool;
try res_ty.resolveFields(self.sema.pt);
try res_ty.resolveStructFieldInits(self.sema.pt);
try self.sema.ensureLayoutResolved(res_ty);
try self.sema.ensureFieldInitsResolved(res_ty);
const struct_info = self.sema.pt.zcu.typeToStruct(res_ty).?;
const fields: @FieldType(Zoir.Node, "struct_literal") = switch (node.get(self.file.zoir.?)) {
@ -779,7 +780,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
const field_values = try self.sema.arena.alloc(InternPool.Index, struct_info.field_names.len);
const field_defaults = struct_info.field_inits.get(ip);
const field_defaults = struct_info.field_defaults.get(ip);
if (field_defaults.len > 0) {
@memcpy(field_values, field_defaults);
} else {
@ -803,7 +804,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
const field_type: Type = .fromInterned(struct_info.field_types.get(ip)[name_index]);
field_values[name_index] = try self.lowerExprKnownResTy(field_node, field_type);
if (struct_info.comptime_bits.getBit(ip, name_index)) {
if (struct_info.field_is_comptime_bits.get(ip, name_index)) {
const val = ip.indexToKey(field_values[name_index]);
const default = ip.indexToKey(field_defaults[name_index]);
if (!val.eql(default, ip)) {
@ -918,9 +919,9 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
const gpa = comp.gpa;
const io = comp.io;
const ip = &pt.zcu.intern_pool;
try res_ty.resolveFields(self.sema.pt);
const union_info = self.sema.pt.zcu.typeToUnion(res_ty).?;
const enum_tag_info = union_info.loadTagType(ip);
try self.sema.ensureLayoutResolved(res_ty);
const union_info = pt.zcu.typeToUnion(res_ty).?;
const enum_tag_info = ip.loadEnumType(union_info.enum_tag_type);
const field_name, const maybe_field_node = switch (node.get(self.file.zoir.?)) {
.enum_literal => |name| b: {
@ -956,7 +957,7 @@ fn lowerUnion(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
const name_index = enum_tag_info.nameIndex(ip, field_name) orelse {
return error.WrongType;
};
const tag = try self.sema.pt.enumValueFieldIndex(.fromInterned(union_info.enum_tag_ty), name_index);
const tag = try self.sema.pt.enumValueFieldIndex(.fromInterned(union_info.enum_tag_type), name_index);
const field_type: Type = .fromInterned(union_info.field_types.get(ip)[name_index]);
const val = if (maybe_field_node) |field_node| b: {
if (field_type.toIntern() == .void_type) {

View file

@ -1053,7 +1053,7 @@ fn shlScalar(
if (rhs_val.isUndef(zcu)) return rhs_val;
},
}
switch (try rhs_val.orderAgainstZeroSema(pt)) {
switch (Value.order(rhs_val, .zero_comptime_int, zcu)) {
.gt => {},
.eq => return lhs_val,
.lt => return sema.failWithNegativeShiftAmount(block, rhs_src, rhs_val, vec_idx),
@ -1090,7 +1090,7 @@ fn shlWithOverflowScalar(
if (lhs_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, lhs_src, vec_idx);
if (rhs_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, rhs_src, vec_idx);
switch (try rhs_val.orderAgainstZeroSema(pt)) {
switch (Value.order(rhs_val, .zero_comptime_int, zcu)) {
.gt => {},
.eq => return .{ .overflow_bit = .zero_u1, .wrapped_result = lhs_val },
.lt => return sema.failWithNegativeShiftAmount(block, rhs_src, rhs_val, vec_idx),
@ -1169,7 +1169,7 @@ fn shrScalar(
if (lhs_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, lhs_src, vec_idx);
if (rhs_val.isUndef(zcu)) return sema.failWithUseOfUndef(block, rhs_src, vec_idx);
switch (try rhs_val.orderAgainstZeroSema(pt)) {
switch (Value.order(rhs_val, .zero_comptime_int, zcu)) {
.gt => {},
.eq => return lhs_val,
.lt => return sema.failWithNegativeShiftAmount(block, rhs_src, rhs_val, vec_idx),
@ -1430,8 +1430,8 @@ fn intAddWithOverflowInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value
const info = ty.intInfo(zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@ -1512,8 +1512,8 @@ fn intSubWithOverflowInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value
const info = ty.intInfo(zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits),
@ -1597,8 +1597,8 @@ fn intMulWithOverflowInner(sema: *Sema, lhs: Value, rhs: Value, ty: Type) !Value
const info = ty.intInfo(zcu);
var lhs_space: Value.BigIntSpace = undefined;
var rhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
const rhs_bigint = try rhs.toBigIntSema(&rhs_space, pt);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const rhs_bigint = rhs.toBigInt(&rhs_space, zcu);
const limbs = try sema.arena.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + rhs_bigint.limbs.len,
@ -1840,7 +1840,7 @@ fn intShl(
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const shift_amt: usize = @intCast(try rhs.toUnsignedIntSema(pt));
const shift_amt: usize = @intCast(rhs.toUnsignedInt(zcu));
if (shift_amt >= info.bits) {
return sema.failWithTooLargeShiftAmount(block, lhs_ty, rhs, rhs_src, vec_idx);
}
@ -1862,7 +1862,7 @@ fn intShlSat(
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const shift_amt: usize = amt: {
if (try rhs.getUnsignedIntSema(pt)) |shift_amt_u64| {
if (rhs.getUnsignedInt(zcu)) |shift_amt_u64| {
if (std.math.cast(usize, shift_amt_u64)) |shift_amt| break :amt shift_amt;
}
// We only support ints with up to 2^16 - 1 bits, so this
@ -1895,9 +1895,9 @@ fn intShlWithOverflow(
const info = lhs_ty.intInfo(zcu);
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const shift_amt: usize = @intCast(try rhs.toUnsignedIntSema(pt));
const shift_amt: usize = @intCast(rhs.toUnsignedInt(zcu));
if (shift_amt >= info.bits) {
return sema.failWithTooLargeShiftAmount(block, lhs_ty, rhs, rhs_src, vec_idx);
}
@ -1924,9 +1924,10 @@ fn comptimeIntShl(
vec_idx: ?usize,
) !Value {
const pt = sema.pt;
const zcu = pt.zcu;
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = try lhs.toBigIntSema(&lhs_space, pt);
if (try rhs.getUnsignedIntSema(pt)) |shift_amt_u64| {
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
if (rhs.getUnsignedInt(zcu)) |shift_amt_u64| {
if (std.math.cast(usize, shift_amt_u64)) |shift_amt| {
const result_bigint = try intShlInner(sema, lhs_bigint, shift_amt);
return pt.intValue_big(.comptime_int, result_bigint.toConst());
@ -1963,15 +1964,15 @@ fn intShr(
const lhs_bigint = lhs.toBigInt(&lhs_space, zcu);
const shift_amt: usize = if (rhs_ty.toIntern() == .comptime_int_type) amt: {
if (try rhs.getUnsignedIntSema(pt)) |shift_amt_u64| {
if (rhs.getUnsignedInt(zcu)) |shift_amt_u64| {
if (std.math.cast(usize, shift_amt_u64)) |shift_amt| break :amt shift_amt;
}
if (try rhs.compareAllWithZeroSema(.lt, pt)) {
if (rhs.compareAllWithZero(.lt, zcu)) {
return sema.failWithNegativeShiftAmount(block, rhs_src, rhs, vec_idx);
} else {
return sema.failWithUnsupportedComptimeShiftAmount(block, rhs_src, vec_idx);
}
} else @intCast(try rhs.toUnsignedIntSema(pt));
} else @intCast(rhs.toUnsignedInt(zcu));
if (lhs_ty.toIntern() != .comptime_int_type and shift_amt >= lhs_ty.intInfo(zcu).bits) {
return sema.failWithTooLargeShiftAmount(block, lhs_ty, rhs, rhs_src, vec_idx);
@ -2006,7 +2007,7 @@ fn intBitReverse(sema: *Sema, val: Value, ty: Type) !Value {
const info = ty.intInfo(zcu);
var val_space: Value.BigIntSpace = undefined;
const val_bigint = try val.toBigIntSema(&val_space, pt);
const val_bigint = val.toBigInt(&val_space, zcu);
const limbs = try sema.arena.alloc(
std.math.big.Limb,

View file

@ -79,8 +79,8 @@ fn bitCastInner(
const val_ty = val.typeOf(zcu);
try val_ty.resolveLayout(pt);
try dest_ty.resolveLayout(pt);
val_ty.assertHasLayout(zcu);
try sema.ensureLayoutResolved(dest_ty);
assert(val_ty.hasWellDefinedLayout(zcu));
@ -138,8 +138,8 @@ fn bitCastSpliceInner(
const val_ty = val.typeOf(zcu);
const splice_val_ty = splice_val.typeOf(zcu);
try val_ty.resolveLayout(pt);
try splice_val_ty.resolveLayout(pt);
try sema.ensureLayoutResolved(val_ty);
try sema.ensureLayoutResolved(splice_val_ty);
const splice_bits = splice_val_ty.bitSize(zcu);
@ -673,6 +673,9 @@ const PackValueBits = struct {
fn primitive(pack: *PackValueBits, want_ty: Type) BitCastError!Value {
const pt = pack.pt;
const zcu = pt.zcu;
if (try want_ty.onePossibleValue(pt)) |opv| return opv;
const vals, const bit_offset = pack.prepareBits(want_ty.bitSize(zcu));
for (vals) |val| {

View file

@ -67,7 +67,7 @@ pub fn storeComptimePtr(
{
const store_ty: Type = .fromInterned(ptr_info.child);
if (!try store_ty.comptimeOnlySema(pt) and !try store_ty.hasRuntimeBitsIgnoreComptimeSema(pt)) {
if (!store_ty.comptimeOnly(zcu) and !store_ty.hasRuntimeBits(zcu)) {
// zero-bit store; nothing to do
return .success;
}
@ -354,8 +354,8 @@ fn loadComptimePtrInner(
const load_one_ty, const load_count = load_ty.arrayBase(zcu);
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try load_one_ty.comptimeOnlySema(pt)) break :restructure_array;
const elem_len = try load_one_ty.abiSizeSema(pt);
if (load_one_ty.comptimeOnly(zcu)) break :restructure_array;
const elem_len = load_one_ty.abiSize(zcu);
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len);
};
@ -401,12 +401,12 @@ fn loadComptimePtrInner(
var cur_offset = ptr.byte_offset;
if (load_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
cur_offset += try load_ty.childType(zcu).abiSizeSema(pt) * array_offset;
cur_offset += load_ty.childType(zcu).abiSize(zcu) * array_offset;
}
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else try load_ty.abiSizeSema(pt);
const need_bytes = if (host_bits > 0) (host_bits + 7) / 8 else load_ty.abiSize(zcu);
if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
if (cur_offset + need_bytes > cur_val.typeOf(zcu).abiSize(zcu)) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
}
@ -441,7 +441,7 @@ fn loadComptimePtrInner(
.optional => break, // this can only be a pointer-like optional so is terminal
.array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try elem_ty.abiSizeSema(pt);
const elem_size = elem_ty.abiSize(zcu);
const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
@ -457,7 +457,7 @@ fn loadComptimePtrInner(
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
const end_off = start_off + cur_ty.fieldType(field_idx, zcu).abiSize(zcu);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.getElem(sema.pt, field_idx);
cur_offset -= start_off;
@ -484,7 +484,7 @@ fn loadComptimePtrInner(
};
// The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it.
if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
if (payload.typeOf(zcu).abiSize(zcu) >= need_bytes) {
cur_val = payload;
} else {
break;
@ -753,8 +753,8 @@ fn prepareComptimePtrStore(
const store_one_ty, const store_count = store_ty.arrayBase(zcu);
const extra_base_index: u64 = if (ptr.byte_offset == 0) 0 else idx: {
if (try store_one_ty.comptimeOnlySema(pt)) break :restructure_array;
const elem_len = try store_one_ty.abiSizeSema(pt);
if (store_one_ty.comptimeOnly(zcu)) break :restructure_array;
const elem_len = store_one_ty.abiSize(zcu);
if (ptr.byte_offset % elem_len != 0) break :restructure_array;
break :idx @divExact(ptr.byte_offset, elem_len);
};
@ -807,11 +807,11 @@ fn prepareComptimePtrStore(
var cur_val: *MutableValue, var cur_offset: u64 = switch (base_strat) {
.direct => |direct| .{ direct.val, 0 },
// It's okay to do `abiSize` - the comptime-only case will be caught below.
.index => |index| .{ index.val, index.elem_index * try index.val.typeOf(zcu).childType(zcu).abiSizeSema(pt) },
.index => |index| .{ index.val, index.elem_index * index.val.typeOf(zcu).childType(zcu).abiSize(zcu) },
.flat_index => |flat_index| .{
flat_index.val,
// It's okay to do `abiSize` - the comptime-only case will be caught below.
flat_index.flat_elem_index * try flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSizeSema(pt),
flat_index.flat_elem_index * flat_index.val.typeOf(zcu).arrayBase(zcu)[0].abiSize(zcu),
},
.reinterpret => |r| .{ r.val, r.byte_offset },
else => unreachable,
@ -823,12 +823,12 @@ fn prepareComptimePtrStore(
}
if (store_ty.zigTypeTag(zcu) == .array and array_offset > 0) {
cur_offset += try store_ty.childType(zcu).abiSizeSema(pt) * array_offset;
cur_offset += store_ty.childType(zcu).abiSize(zcu) * array_offset;
}
const need_bytes = try store_ty.abiSizeSema(pt);
const need_bytes = store_ty.abiSize(zcu);
if (cur_offset + need_bytes > try cur_val.typeOf(zcu).abiSizeSema(pt)) {
if (cur_offset + need_bytes > cur_val.typeOf(zcu).abiSize(zcu)) {
return .{ .out_of_bounds = cur_val.typeOf(zcu) };
}
@ -863,7 +863,7 @@ fn prepareComptimePtrStore(
.optional => break, // this can only be a pointer-like optional so is terminal
.array => {
const elem_ty = cur_ty.childType(zcu);
const elem_size = try elem_ty.abiSizeSema(pt);
const elem_size = elem_ty.abiSize(zcu);
const elem_idx = cur_offset / elem_size;
const next_elem_off = elem_size * (elem_idx + 1);
if (cur_offset + need_bytes <= next_elem_off) {
@ -879,7 +879,7 @@ fn prepareComptimePtrStore(
.@"packed" => break, // let the bitcast logic handle this
.@"extern" => for (0..cur_ty.structFieldCount(zcu)) |field_idx| {
const start_off = cur_ty.structFieldOffset(field_idx, zcu);
const end_off = start_off + try cur_ty.fieldType(field_idx, zcu).abiSizeSema(pt);
const end_off = start_off + cur_ty.fieldType(field_idx, zcu).abiSize(zcu);
if (cur_offset >= start_off and cur_offset + need_bytes <= end_off) {
cur_val = try cur_val.elem(pt, sema.arena, field_idx);
cur_offset -= start_off;
@ -902,7 +902,7 @@ fn prepareComptimePtrStore(
};
// The payload always has offset 0. If it's big enough
// to represent the whole load type, we can use it.
if (try payload.typeOf(zcu).abiSizeSema(pt) >= need_bytes) {
if (payload.typeOf(zcu).abiSize(zcu) >= need_bytes) {
cur_val = payload;
} else {
break;

View file

@ -0,0 +1,993 @@
const std = @import("std");
const assert = std.debug.assert;
const mem = std.mem;
const Sema = @import("../Sema.zig");
const Block = Sema.Block;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");
const CompileError = Zcu.CompileError;
const SemaError = Zcu.SemaError;
const LazySrcLoc = Zcu.LazySrcLoc;
const InternPool = @import("../InternPool.zig");
const Alignment = InternPool.Alignment;
const arith = @import("arith.zig");
/// Ensures that `ty` has known layout, including alignment, size, and (where relevant) field offsets.
/// `ty` may be any type; its layout is resolved *recursively* if necessary.
/// Adds incremental dependencies tracking any required type resolution.
/// MLUGG TODO: to make the langspec non-stupid, we need to call this from WAY fewer places (the conditions need to be less specific).
/// e.g. I think creating the type `fn (A, B) C` should force layout resolution of `A`,`B`,`C`, which will simplify some `analyzeCall` logic.
/// wait i just realised that's probably a terrible idea, fns are a common cause of dep loops rn... so maybe not lol idk...
/// perhaps "layout resolution" for a function should resolve layout of ret ty and stuff, idk. justification: the "layout" of a function is whether
/// fnHasRuntimeBits, which depends whether the ret ty is comptime-only, i.e. the ret ty layout
/// MLUGG TODO: to be clear, i should audit EVERY use of this before PRing
pub fn ensureLayoutResolved(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.int_type,
.ptr_type,
.anyframe_type,
.simple_type,
.opaque_type,
.enum_type,
.error_set_type,
.inferred_error_set_type,
=> {},
.func_type => |func_type| {
for (func_type.param_types.get(ip)) |param_ty| {
try ensureLayoutResolved(sema, .fromInterned(param_ty));
}
try ensureLayoutResolved(sema, .fromInterned(func_type.return_type));
},
.array_type => |arr| return ensureLayoutResolved(sema, .fromInterned(arr.child)),
.vector_type => |vec| return ensureLayoutResolved(sema, .fromInterned(vec.child)),
.opt_type => |child| return ensureLayoutResolved(sema, .fromInterned(child)),
.error_union_type => |eu| return ensureLayoutResolved(sema, .fromInterned(eu.payload_type)),
.tuple_type => |tuple| for (tuple.types.get(ip)) |field_ty| {
try ensureLayoutResolved(sema, .fromInterned(field_ty));
},
.struct_type, .union_type => {
try sema.declareDependency(.{ .type_layout = ty.toIntern() });
if (zcu.analysis_in_progress.contains(.wrap(.{ .type_layout = ty.toIntern() }))) {
// TODO: better error message
return sema.failWithOwnedErrorMsg(null, try sema.errMsg(
ty.srcLoc(zcu),
"{s} '{f}' depends on itself",
.{ @tagName(ty.zigTypeTag(zcu)), ty.fmt(pt) },
));
}
try pt.ensureTypeLayoutUpToDate(ty);
},
// values, not types
.undef,
.simple_value,
.variable,
.@"extern",
.func,
.int,
.err,
.error_union,
.enum_literal,
.enum_tag,
.empty_enum_value,
.float,
.ptr,
.slice,
.opt,
.aggregate,
.un,
// memoization, not types
.memoized_call,
=> unreachable,
}
}
/// Asserts that `ty` is either a `struct` type, or an `enum` type.
/// If `ty` is a struct, ensures that fields' default values are resolved.
/// If `ty` is an enum, ensures that fields' integer tag valus are resolved.
/// Adds incremental dependencies tracking the required type resolution.
pub fn ensureFieldInitsResolved(sema: *Sema, ty: Type) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type, .enum_type => {},
else => unreachable, // assertion failure
}
try sema.declareDependency(.{ .type_inits = ty.toIntern() });
if (zcu.analysis_in_progress.contains(.wrap(.{ .type_inits = ty.toIntern() }))) {
// TODO: better error message
return sema.failWithOwnedErrorMsg(null, try sema.errMsg(
ty.srcLoc(zcu),
"{s} '{f}' depends on itself",
.{ @tagName(ty.zigTypeTag(zcu)), ty.fmt(pt) },
));
}
try pt.ensureTypeInitsUpToDate(ty);
}
/// Asserts that `struct_ty` is a non-packed non-tuple struct, and that `sema.owner` is that type.
/// This function *does* register the `src_hash` dependency on the struct.
pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_layout == struct_ty.toIntern());
const struct_obj = ip.loadStructType(struct_ty.toIntern());
const zir_index = struct_obj.zir_index.resolve(ip).?;
assert(struct_obj.layout != .@"packed");
try sema.declareDependency(.{ .src_hash = struct_obj.zir_index });
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = struct_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = struct_obj.zir_index,
.type_name_ctx = struct_obj.name,
};
defer assert(block.instructions.items.len == 0);
const zir_struct = sema.code.getStructDecl(zir_index);
var field_it = zir_struct.iterateFields();
while (field_it.next()) |zir_field| {
const field_ty_src: LazySrcLoc = .{
.base_node_inst = struct_obj.zir_index,
.offset = .{ .container_field_type = zir_field.idx },
};
const field_align_src: LazySrcLoc = .{
.base_node_inst = struct_obj.zir_index,
.offset = .{ .container_field_align = zir_field.idx },
};
const field_ty: Type = field_ty: {
block.comptime_reason = .{ .reason = .{
.src = field_ty_src,
.r = .{ .simple = .struct_field_types },
} };
const type_ref = try sema.resolveInlineBody(&block, zir_field.type_body, zir_index);
break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref);
};
assert(!field_ty.isGenericPoison());
try sema.ensureLayoutResolved(field_ty);
const explicit_field_align: Alignment = a: {
block.comptime_reason = .{ .reason = .{
.src = field_align_src,
.r = .{ .simple = .struct_field_attrs },
} };
const align_body = zir_field.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index);
break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref);
};
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in struct", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{});
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
if (struct_obj.layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "extern structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, field_ty_src, field_ty, .struct_field);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
struct_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern();
if (struct_obj.field_aligns.len != 0) {
struct_obj.field_aligns.get(ip)[zir_field.idx] = explicit_field_align;
} else {
assert(explicit_field_align == .none);
}
}
try finishStructLayout(sema, &block, struct_ty.srcLoc(zcu), struct_ty.toIntern(), &struct_obj);
}
/// Called after populating field types and alignments; populates field offsets, runtime order, and
/// overall struct layout information (size, alignment, comptime-only state, etc).
pub fn finishStructLayout(
sema: *Sema,
/// Only used to report compile errors.
block: *Block,
struct_src: LazySrcLoc,
struct_ty: InternPool.Index,
struct_obj: *const InternPool.LoadedStructType,
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const io = comp.io;
const ip = &zcu.intern_pool;
var comptime_only = false;
var one_possible_value = true;
var struct_align: Alignment = .@"1";
// Unlike `struct_obj.field_aligns`, these are not `.none`.
const resolved_field_aligns = try sema.arena.alloc(Alignment, struct_obj.field_names.len);
for (resolved_field_aligns, 0..) |*align_out, field_idx| {
const field_ty: Type = .fromInterned(struct_obj.field_types.get(ip)[field_idx]);
const field_align: Alignment = a: {
if (struct_obj.field_aligns.len != 0) {
const a = struct_obj.field_aligns.get(ip)[field_idx];
if (a != .none) break :a a;
}
break :a field_ty.defaultStructFieldAlignment(struct_obj.layout, zcu);
};
if (!struct_obj.field_is_comptime_bits.get(ip, field_idx)) {
// Non-`comptime` fields contribute to the struct's layout.
struct_align = struct_align.maxStrict(field_align);
if (field_ty.comptimeOnly(zcu)) comptime_only = true;
if (try field_ty.onePossibleValue(pt) == null) one_possible_value = false;
if (struct_obj.layout == .auto) {
struct_obj.field_runtime_order.get(ip)[field_idx] = @enumFromInt(field_idx);
}
} else if (struct_obj.layout == .auto) {
struct_obj.field_runtime_order.get(ip)[field_idx] = .omitted; // comptime fields are not in the runtime order
}
align_out.* = field_align;
}
if (struct_obj.layout == .auto) {
const runtime_order = struct_obj.field_runtime_order.get(ip);
// This logic does not reorder fields; it only moves the omitted ones to the end so that logic
// elsewhere does not need to special-case. TODO: support field reordering in all the backends!
if (!zcu.backendSupportsFeature(.field_reordering)) {
var i: usize = 0;
var off: usize = 0;
while (i + off < runtime_order.len) {
if (runtime_order[i + off] == .omitted) {
off += 1;
} else {
runtime_order[i] = runtime_order[i + off];
i += 1;
}
}
} else {
// Sort by descending alignment to minimize padding.
const RuntimeOrder = InternPool.LoadedStructType.RuntimeOrder;
const AlignSortCtx = struct {
aligns: []const Alignment,
fn lessThan(ctx: @This(), a: RuntimeOrder, b: RuntimeOrder) bool {
assert(a != .unresolved);
assert(b != .unresolved);
if (a == .omitted) return false;
if (b == .omitted) return true;
const a_align = ctx.aligns[@intFromEnum(a)];
const b_align = ctx.aligns[@intFromEnum(b)];
return a_align.compare(.gt, b_align);
}
};
mem.sortUnstable(
RuntimeOrder,
runtime_order,
@as(AlignSortCtx, .{ .aligns = resolved_field_aligns }),
AlignSortCtx.lessThan,
);
}
}
var runtime_order_it = struct_obj.iterateRuntimeOrder(ip);
var cur_offset: u64 = 0;
while (runtime_order_it.next()) |field_idx| {
const field_ty: Type = .fromInterned(struct_obj.field_types.get(ip)[field_idx]);
const offset = resolved_field_aligns[field_idx].forward(cur_offset);
struct_obj.field_offsets.get(ip)[field_idx] = @truncate(offset); // truncate because the overflow is handled below
cur_offset = offset + field_ty.abiSize(zcu);
}
const struct_size = std.math.cast(u32, struct_align.forward(cur_offset)) orelse return sema.fail(
block,
struct_src,
"struct layout requires size {d}, this compiler implementation supports up to {d}",
.{ struct_align.forward(cur_offset), std.math.maxInt(u32) },
);
ip.resolveStructLayout(
io,
struct_ty,
struct_size,
struct_align,
false, // MLUGG TODO XXX NPV
one_possible_value,
comptime_only,
);
}
/// Asserts that `struct_ty` is a packed struct, and that `sema.owner` is that type.
/// This function *does* register the `src_hash` dependency on the struct.
pub fn resolvePackedStructLayout(sema: *Sema, struct_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_layout == struct_ty.toIntern());
const struct_obj = ip.loadStructType(struct_ty.toIntern());
const zir_index = struct_obj.zir_index.resolve(ip).?;
assert(struct_obj.layout == .@"packed");
try sema.declareDependency(.{ .src_hash = struct_obj.zir_index });
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = struct_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = struct_obj.zir_index,
.type_name_ctx = struct_obj.name,
};
defer assert(block.instructions.items.len == 0);
var field_bits: u64 = 0;
const zir_struct = sema.code.getStructDecl(zir_index);
var field_it = zir_struct.iterateFields();
while (field_it.next()) |zir_field| {
const field_ty_src: LazySrcLoc = .{
.base_node_inst = struct_obj.zir_index,
.offset = .{ .container_field_type = zir_field.idx },
};
const field_ty: Type = field_ty: {
block.comptime_reason = .{ .reason = .{
.src = field_ty_src,
.r = .{ .simple = .struct_field_types },
} };
const type_ref = try sema.resolveInlineBody(&block, zir_field.type_body, zir_index);
break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref);
};
assert(!field_ty.isGenericPoison());
struct_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern();
try sema.ensureLayoutResolved(field_ty);
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in struct", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{});
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
if (!field_ty.packable(zcu)) {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "packed structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotPackable(msg, field_ty_src, field_ty);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
assert(!field_ty.comptimeOnly(zcu)); // packable types are not comptime-only
field_bits += field_ty.bitSize(zcu);
}
try resolvePackedStructBackingInt(sema, &block, field_bits, struct_ty, &struct_obj);
}
pub fn resolvePackedStructBackingInt(
sema: *Sema,
block: *Block,
field_bits: u64,
struct_ty: Type,
struct_obj: *const InternPool.LoadedStructType,
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
switch (struct_obj.packed_backing_mode) {
.explicit => {
// We only need to validate the type.
const backing_ty: Type = .fromInterned(struct_obj.packed_backing_int_type);
assert(backing_ty.zigTypeTag(zcu) == .int);
if (field_bits != backing_ty.intInfo(zcu).bits) return sema.failWithOwnedErrorMsg(block, msg: {
const src = struct_ty.srcLoc(zcu);
const msg = try sema.errMsg(src, "backing integer bit width does not match total bit width of fields", .{});
errdefer msg.destroy(gpa);
try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) });
try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits});
break :msg msg;
});
},
.auto => {
// We need to generate the inferred tag.
const want_bits = std.math.cast(u16, field_bits) orelse return sema.fail(
block,
struct_ty.srcLoc(zcu),
"packed struct bit width '{d}' exceeds maximum bit width of 65535",
.{field_bits},
);
const backing_int = try pt.intType(.unsigned, want_bits);
ip.resolvePackedStructBackingInt(io, struct_ty.toIntern(), backing_int.toIntern());
},
}
}
/// Asserts that `struct_ty` is a non-tuple struct, and that `sema.owner` is that type.
/// This function *does* register the `src_hash` dependency on the struct.
pub fn resolveStructDefaults(sema: *Sema, struct_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_inits == struct_ty.toIntern());
try sema.ensureLayoutResolved(struct_ty);
const struct_obj = ip.loadStructType(struct_ty.toIntern());
const zir_index = struct_obj.zir_index.resolve(ip).?;
try sema.declareDependency(.{ .src_hash = struct_obj.zir_index });
if (struct_obj.field_defaults.len == 0) {
// The struct has no default field values, so the slice has been omitted.
return;
}
const field_types = struct_obj.field_types.get(ip);
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = struct_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = struct_obj.zir_index,
.type_name_ctx = struct_obj.name,
};
defer assert(block.instructions.items.len == 0);
// We'll need to map the struct decl instruction to provide result types
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{zir_index});
const zir_struct = sema.code.getStructDecl(zir_index);
var field_it = zir_struct.iterateFields();
while (field_it.next()) |zir_field| {
const default_val_src: LazySrcLoc = .{
.base_node_inst = struct_obj.zir_index,
.offset = .{ .container_field_value = zir_field.idx },
};
block.comptime_reason = .{ .reason = .{
.src = default_val_src,
.r = .{ .simple = .struct_field_default_value },
} };
const default_body = zir_field.default_body orelse {
struct_obj.field_defaults.get(ip)[zir_field.idx] = .none;
continue;
};
const field_ty: Type = .fromInterned(field_types[zir_field.idx]);
const uncoerced = ref: {
// Provide the result type
sema.inst_map.putAssumeCapacity(zir_index, .fromIntern(field_ty.toIntern()));
defer assert(sema.inst_map.remove(zir_index));
break :ref try sema.resolveInlineBody(&block, default_body, zir_index);
};
const coerced = try sema.coerce(&block, field_ty, uncoerced, default_val_src);
const default_val = try sema.resolveConstValue(&block, default_val_src, coerced, null);
if (default_val.canMutateComptimeVarState(zcu)) {
const field_name = struct_obj.field_names.get(ip)[zir_field.idx];
return sema.failWithContainsReferenceToComptimeVar(&block, default_val_src, field_name, "field default value", default_val);
}
struct_obj.field_defaults.get(ip)[zir_field.idx] = default_val.toIntern();
}
}
/// This logic must be kept in sync with `Type.getUnionLayout`.
pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_layout == union_ty.toIntern());
const union_obj = ip.loadUnionType(union_ty.toIntern());
const zir_index = union_obj.zir_index.resolve(ip).?;
assert(union_obj.layout != .@"packed");
try sema.declareDependency(.{ .src_hash = union_obj.zir_index });
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = union_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = union_obj.zir_index,
.type_name_ctx = union_obj.name,
};
defer assert(block.instructions.items.len == 0);
const zir_union = sema.code.getUnionDecl(zir_index);
var field_it = zir_union.iterateFields();
while (field_it.next()) |zir_field| {
const field_ty_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_type = zir_field.idx },
};
const field_align_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_align = zir_field.idx },
};
const field_ty: Type = field_ty: {
block.comptime_reason = .{ .reason = .{
.src = field_ty_src,
.r = .{ .simple = .union_field_types },
} };
const type_body = zir_field.type_body orelse break :field_ty .void;
const type_ref = try sema.resolveInlineBody(&block, type_body, zir_index);
break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref);
};
assert(!field_ty.isGenericPoison());
union_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern();
try sema.ensureLayoutResolved(field_ty);
const explicit_field_align: Alignment = a: {
block.comptime_reason = .{ .reason = .{
.src = field_align_src,
.r = .{ .simple = .union_field_attrs },
} };
const align_body = zir_field.align_body orelse break :a .none;
const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index);
break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref);
};
if (union_obj.field_aligns.len != 0) {
union_obj.field_aligns.get(ip)[zir_field.idx] = explicit_field_align;
} else {
assert(explicit_field_align == .none);
}
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in union", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{});
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
if (union_obj.layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "extern unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotExtern(msg, field_ty_src, field_ty, .union_field);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
}
try finishUnionLayout(
sema,
&block,
union_ty.srcLoc(zcu),
union_ty.toIntern(),
&union_obj,
.fromInterned(union_obj.enum_tag_type),
);
}
/// Called after populating field types and alignments; populates overall union layout
/// information (size, alignment, comptime-only state, etc).
pub fn finishUnionLayout(
sema: *Sema,
/// Only used to report compile errors.
block: *Block,
union_src: LazySrcLoc,
union_ty: InternPool.Index,
union_obj: *const InternPool.LoadedUnionType,
enum_tag_ty: Type,
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const io = comp.io;
const ip = &zcu.intern_pool;
var payload_align: Alignment = .@"1";
var payload_size: u64 = 0;
var comptime_only = false;
var possible_values: enum { none, one, many } = .none;
for (0..union_obj.field_types.len) |field_idx| {
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]);
const field_align: Alignment = a: {
if (union_obj.field_aligns.len != 0) {
const a = union_obj.field_aligns.get(ip)[field_idx];
if (a != .none) break :a a;
}
break :a field_ty.abiAlignment(zcu);
};
payload_align = payload_align.maxStrict(field_align);
payload_size = @max(payload_size, field_ty.abiSize(zcu));
if (field_ty.comptimeOnly(zcu)) comptime_only = true;
if (!field_ty.isNoReturn(zcu)) {
if (try field_ty.onePossibleValue(pt) != null) {
possible_values = .many; // this field alone has many possible values
} else switch (possible_values) {
.none => possible_values = .one, // there were none, now there is this field's OPV
.one => possible_values = .many, // there was one, now there are two
.many => {},
}
}
}
const size: u64, const padding: u64, const alignment: Alignment = layout: {
if (union_obj.runtime_tag == .none) {
break :layout .{ payload_align.forward(payload_size), 0, payload_align };
}
const tag_align = enum_tag_ty.abiAlignment(zcu);
const tag_size = enum_tag_ty.abiSize(zcu);
// The layout will either be (tag, payload, padding) or (payload, tag, padding) depending on
// which has larger alignment. So the overall size is just the tag and payload sizes, added,
// and padded to the larger alignment.
const alignment = tag_align.maxStrict(payload_align);
const unpadded_size = tag_size + payload_size;
const size = alignment.forward(unpadded_size);
break :layout .{ size, size - unpadded_size, alignment };
};
const casted_size = std.math.cast(u32, size) orelse return sema.fail(
block,
union_src,
"union layout requires size {d}, this compiler implementation supports up to {d}",
.{ size, std.math.maxInt(u32) },
);
ip.resolveUnionLayout(
io,
union_ty,
casted_size,
@intCast(padding), // okay because padding is no greater than size
alignment,
possible_values == .none, // MLUGG TODO: make sure queries use `LoadedUnionType.has_no_possible_value`!
possible_values == .one,
comptime_only,
);
}
pub fn resolvePackedUnionLayout(sema: *Sema, union_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_layout == union_ty.toIntern());
const union_obj = ip.loadUnionType(union_ty.toIntern());
const zir_index = union_obj.zir_index.resolve(ip).?;
assert(union_obj.layout == .@"packed");
try sema.declareDependency(.{ .src_hash = union_obj.zir_index });
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = union_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = union_obj.zir_index,
.type_name_ctx = union_obj.name,
};
defer assert(block.instructions.items.len == 0);
const zir_union = sema.code.getUnionDecl(zir_index);
var field_it = zir_union.iterateFields();
while (field_it.next()) |zir_field| {
const field_ty_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_type = zir_field.idx },
};
const field_ty: Type = field_ty: {
block.comptime_reason = .{ .reason = .{
.src = field_ty_src,
.r = .{ .simple = .union_field_types },
} };
// MLUGG TODO: i think this should probably be a compile error? (if so, it's an astgen one, right?)
const type_body = zir_field.type_body orelse break :field_ty .void;
const type_ref = try sema.resolveInlineBody(&block, type_body, zir_index);
break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref);
};
assert(!field_ty.isGenericPoison());
union_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern();
assert(zir_field.align_body == null); // packed union fields cannot be aligned
assert(zir_field.value_body == null); // packed union fields cannot have tag values
try sema.ensureLayoutResolved(field_ty);
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in union", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{});
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
if (!field_ty.packable(zcu)) {
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_ty_src, "packed unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)});
errdefer msg.destroy(gpa);
try sema.explainWhyTypeIsNotPackable(msg, field_ty_src, field_ty);
try sema.addDeclaredHereNote(msg, field_ty);
break :msg msg;
});
}
assert(!field_ty.comptimeOnly(zcu)); // packable types are not comptime-only
}
try resolvePackedUnionBackingInt(sema, &block, union_ty, &union_obj, false);
}
/// MLUGG TODO doc comment; asserts all fields are resolved or whatever
pub fn resolvePackedUnionBackingInt(
sema: *Sema,
block: *Block,
union_ty: Type,
union_obj: *const InternPool.LoadedUnionType,
is_reified: bool,
) SemaError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const io = comp.io;
const ip = &zcu.intern_pool;
switch (union_obj.packed_backing_mode) {
.explicit => {
const backing_int_type: Type = .fromInterned(union_obj.packed_backing_int_type);
const backing_int_bits = backing_int_type.intInfo(zcu).bits;
for (union_obj.field_types.get(ip), 0..) |field_type_ip, field_idx| {
const field_type: Type = .fromInterned(field_type_ip);
const field_bits = field_type.bitSize(zcu);
if (field_bits != backing_int_bits) return sema.failWithOwnedErrorMsg(block, msg: {
const field_ty_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = if (is_reified)
.nodeOffset(.zero)
else
.{ .container_field_type = @intCast(field_idx) },
};
const msg = try sema.errMsg(field_ty_src, "field bit width does not match backing integer", .{});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits });
try sema.errNote(field_ty_src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_int_type.fmt(pt), backing_int_bits });
try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{});
break :msg msg;
});
}
},
.auto => switch (union_obj.field_types.len) {
0 => ip.resolvePackedUnionBackingInt(io, union_ty.toIntern(), .u0_type),
else => {
const field_types = union_obj.field_types.get(ip);
const first_field_type: Type = .fromInterned(field_types[0]);
const first_field_bits = first_field_type.bitSize(zcu);
for (field_types[1..], 1..) |field_type_ip, field_idx| {
const field_type: Type = .fromInterned(field_type_ip);
const field_bits = field_type.bitSize(zcu);
if (field_bits != first_field_bits) return sema.failWithOwnedErrorMsg(block, msg: {
const first_field_ty_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = if (is_reified)
.nodeOffset(.zero)
else
.{ .container_field_type = 0 },
};
const field_ty_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = if (is_reified)
.nodeOffset(.zero)
else
.{ .container_field_type = @intCast(field_idx) },
};
const msg = try sema.errMsg(field_ty_src, "field bit width does not match earlier field", .{});
errdefer msg.destroy(gpa);
try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits });
try sema.errNote(first_field_ty_src, msg, "other field type '{f}' has bit width '{d}'", .{ first_field_type.fmt(pt), first_field_bits });
try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{});
break :msg msg;
});
}
const backing_int_bits = std.math.cast(u16, first_field_bits) orelse return sema.fail(
block,
block.nodeOffset(.zero),
"packed union bit width '{d}' exceeds maximum bit width of 65535",
.{first_field_bits},
);
const backing_int_type = try pt.intType(.unsigned, backing_int_bits);
ip.resolvePackedUnionBackingInt(io, union_ty.toIntern(), backing_int_type.toIntern());
},
},
}
}
/// Asserts that `enum_ty` is an enum and that `sema.owner` is that type.
/// This function *does* register the `src_hash` dependency on the enum.
pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void {
const pt = sema.pt;
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = comp.gpa;
const ip = &zcu.intern_pool;
assert(sema.owner.unwrap().type_inits == enum_ty.toIntern());
const enum_obj = ip.loadEnumType(enum_ty.toIntern());
// We'll populate this map.
const field_value_map = enum_obj.field_value_map.unwrap() orelse {
// The enum has an automatically generated tag and is auto-numbered. We know that we have
// generated a suitably large type in `analyzeEnumDecl`, so we have no work to do.
return;
};
const maybe_parent_union_obj: ?InternPool.LoadedUnionType = un: {
if (enum_obj.owner_union == .none) break :un null;
break :un ip.loadUnionType(enum_obj.owner_union);
};
const tracked_inst = enum_obj.zir_index.unwrap() orelse maybe_parent_union_obj.?.zir_index;
const zir_index = tracked_inst.resolve(ip).?;
try sema.declareDependency(.{ .src_hash = tracked_inst });
var block: Block = .{
.parent = null,
.sema = sema,
.namespace = enum_obj.namespace,
.instructions = .{},
.inlining = null,
.comptime_reason = undefined, // always set before using `block`
.src_base_inst = tracked_inst,
.type_name_ctx = enum_obj.name,
};
defer assert(block.instructions.items.len == 0);
const int_tag_ty: Type = .fromInterned(enum_obj.int_tag_type);
// Map the enum (or union) decl instruction to provide the tag type as the result type
try sema.inst_map.ensureSpaceForInstructions(gpa, &.{zir_index});
sema.inst_map.putAssumeCapacity(zir_index, .fromIntern(int_tag_ty.toIntern()));
defer assert(sema.inst_map.remove(zir_index));
// First, populate any explicitly provided values. This is the part that actually depends on
// the ZIR, and hence depends on whether this is a declared or generated enum. If any explicit
// value is invalid, we'll emit an error here.
if (maybe_parent_union_obj) |union_obj| {
const zir_union = sema.code.getUnionDecl(zir_index);
var field_it = zir_union.iterateFields();
while (field_it.next()) |zir_field| {
const field_val_src: LazySrcLoc = .{
.base_node_inst = union_obj.zir_index,
.offset = .{ .container_field_value = zir_field.idx },
};
block.comptime_reason = .{ .reason = .{
.src = field_val_src,
.r = .{ .simple = .enum_field_values },
} };
const value_body = zir_field.value_body orelse {
enum_obj.field_values.get(ip)[zir_field.idx] = .none;
continue;
};
const uncoerced = try sema.resolveInlineBody(&block, value_body, zir_index);
const coerced = try sema.coerce(&block, int_tag_ty, uncoerced, field_val_src);
const val = try sema.resolveConstValue(&block, field_val_src, coerced, null);
enum_obj.field_values.get(ip)[zir_field.idx] = val.toIntern();
}
} else {
const zir_enum = sema.code.getEnumDecl(zir_index);
var field_it = zir_enum.iterateFields();
while (field_it.next()) |zir_field| {
const field_val_src: LazySrcLoc = .{
.base_node_inst = enum_obj.zir_index.unwrap().?,
.offset = .{ .container_field_value = zir_field.idx },
};
block.comptime_reason = .{ .reason = .{
.src = field_val_src,
.r = .{ .simple = .enum_field_values },
} };
const value_body = zir_field.value_body orelse {
enum_obj.field_values.get(ip)[zir_field.idx] = .none;
continue;
};
const uncoerced = try sema.resolveInlineBody(&block, value_body, zir_index);
const coerced = try sema.coerce(&block, int_tag_ty, uncoerced, field_val_src);
const val = try sema.resolveConstDefinedValue(&block, field_val_src, coerced, null);
enum_obj.field_values.get(ip)[zir_field.idx] = val.toIntern();
}
}
// Explicit values are set. Now we'll go through the whole array and figure out the final
// field values. This is also where we'll detect duplicates.
for (0..enum_obj.field_names.len) |field_idx| {
const field_val_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = @intCast(field_idx) },
};
// If the field value was not specified, compute the implicit value.
const field_val = val: {
const explicit_val = enum_obj.field_values.get(ip)[field_idx];
if (explicit_val != .none) break :val explicit_val;
if (field_idx == 0) {
// Implicit value is 0, which is valid for every integer type.
const val = (try pt.intValue(int_tag_ty, 0)).toIntern();
enum_obj.field_values.get(ip)[field_idx] = val;
break :val val;
}
// Implicit non-initial value: take the previous field value and add one.
const prev_field_val: Value = .fromInterned(enum_obj.field_values.get(ip)[field_idx - 1]);
const result = try arith.incrementDefinedInt(sema, int_tag_ty, prev_field_val);
if (result.overflow) return sema.fail(
&block,
field_val_src,
"enum tag value '{f}' too large for type '{f}'",
.{ result.val.fmtValueSema(pt, sema), int_tag_ty.fmt(pt) },
);
const val = result.val.toIntern();
enum_obj.field_values.get(ip)[field_idx] = val;
break :val val;
};
const adapter: InternPool.Index.Adapter = .{ .indexes = enum_obj.field_values.get(ip)[0..field_idx] };
const gop = field_value_map.get(ip).getOrPutAssumeCapacityAdapted(field_val, adapter);
if (!gop.found_existing) continue;
const prev_field_val_src: LazySrcLoc = .{
.base_node_inst = tracked_inst,
.offset = .{ .container_field_value = @intCast(gop.index) },
};
return sema.failWithOwnedErrorMsg(&block, msg: {
const msg = try sema.errMsg(field_val_src, "enum tag value '{f}' already taken", .{
Value.fromInterned(field_val).fmtValueSema(pt, sema),
});
errdefer msg.destroy(gpa);
try sema.errNote(prev_field_val_src, msg, "previous occurrence here", .{});
break :msg msg;
});
}
if (enum_obj.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
const fields_len = enum_obj.field_names.len;
if (fields_len >= 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(zcu)) {
return sema.fail(&block, block.nodeOffset(.zero), "non-exhaustive enum specifies every value", .{});
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -14,6 +14,8 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const log = std.log.scoped(.zcu);
const deps_log = std.log.scoped(.zcu_deps);
const refs_log = std.log.scoped(.zcu_refs);
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
@ -2685,10 +2687,10 @@ pub const LazySrcLoc = struct {
.struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node,
.struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
.extended => switch (inst.data.extended.opcode) {
.struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_node,
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node,
.enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_node,
.opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_node,
.struct_decl => zir.getStructDecl(zir_inst).src_node,
.union_decl => zir.getUnionDecl(zir_inst).src_node,
.enum_decl => zir.getEnumDecl(zir_inst).src_node,
.opaque_decl => zir.getOpaqueDecl(zir_inst).src_node,
.reify_enum => zir.extraData(Zir.Inst.ReifyEnum, inst.data.extended.operand).data.node,
.reify_struct => zir.extraData(Zir.Inst.ReifyStruct, inst.data.extended.operand).data.node,
.reify_union => zir.extraData(Zir.Inst.ReifyUnion, inst.data.extended.operand).data.node,
@ -3063,7 +3065,7 @@ pub fn markDependeeOutdated(
marked_po: enum { not_marked_po, marked_po },
dependee: InternPool.Dependee,
) !void {
log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
deps_log.debug("outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
@ -3071,9 +3073,9 @@ pub fn markDependeeOutdated(
.not_marked_po => {},
.marked_po => {
po_dep_count.* -= 1;
log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
deps_log.debug("outdated {f} => already outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
},
@ -3094,9 +3096,9 @@ pub fn markDependeeOutdated(
depender,
new_po_dep_count,
);
log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
deps_log.debug("outdated {f} => new outdated {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
if (new_po_dep_count == 0) {
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
// If this is a Decl and was not previously PO, we must recursively
@ -3109,16 +3111,16 @@ pub fn markDependeeOutdated(
}
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
deps_log.debug("up-to-date dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = zcu.intern_pool.dependencyIterator(dependee);
while (it.next()) |depender| {
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
// This depender is already outdated, but it now has one
// less PO dependency!
po_dep_count.* -= 1;
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
deps_log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
if (po_dep_count.* == 0) {
log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
deps_log.debug("outdated ready: {f}", .{zcu.fmtAnalUnit(depender)});
try zcu.outdated_ready.put(zcu.gpa, depender, {});
}
continue;
@ -3132,11 +3134,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
};
if (ptr.* > 1) {
ptr.* -= 1;
log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
deps_log.debug("up-to-date {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
continue;
}
log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
deps_log.debug("up-to-date {f} => {f} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
// This dependency is no longer PO, i.e. is known to be up-to-date.
assert(zcu.potentially_outdated.swapRemove(depender));
@ -3146,8 +3148,9 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
.@"comptime" => {},
.nav_val => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }),
.nav_ty => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_ty = nav }),
.type => |ty| try zcu.markPoDependeeUpToDate(.{ .interned = ty }),
.func => |func| try zcu.markPoDependeeUpToDate(.{ .interned = func }),
.type_layout => |ty| try zcu.markPoDependeeUpToDate(.{ .type_layout = ty }),
.type_inits => |ty| try zcu.markPoDependeeUpToDate(.{ .type_inits = ty }),
.func => |func| try zcu.markPoDependeeUpToDate(.{ .func_ies = func }),
.memoized_state => |stage| try zcu.markPoDependeeUpToDate(.{ .memoized_state = stage }),
}
}
@ -3161,11 +3164,12 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
.@"comptime" => return, // analysis of a comptime decl can't outdate any dependencies
.nav_val => |nav| .{ .nav_val = nav },
.nav_ty => |nav| .{ .nav_ty = nav },
.type => |ty| .{ .interned = ty },
.func => |func_index| .{ .interned = func_index }, // IES
.type_layout => |ty| .{ .type_layout = ty },
.type_inits => |ty| .{ .type_inits = ty },
.func => |func_index| .{ .func_ies = func_index },
.memoized_state => |stage| .{ .memoized_state = stage },
};
log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
deps_log.debug("potentially outdated dependee: {f}", .{zcu.fmtDependee(dependee)});
var it = ip.dependencyIterator(dependee);
while (it.next()) |po| {
if (zcu.outdated.getPtr(po)) |po_dep_count| {
@ -3175,17 +3179,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
_ = zcu.outdated_ready.swapRemove(po);
}
po_dep_count.* += 1;
log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
deps_log.debug("po {f} => {f} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
continue;
}
if (zcu.potentially_outdated.getPtr(po)) |n| {
// There is now one more PO dependency.
n.* += 1;
log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
deps_log.debug("po {f} => {f} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
continue;
}
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
deps_log.debug("po {f} => {f} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
try zcu.markTransitiveDependersPotentiallyOutdated(po);
}
@ -3240,13 +3244,15 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
var chosen_unit: ?AnalUnit = null;
var chosen_unit_dependers: u32 = undefined;
// MLUGG TODO: i'm 99% sure this is now impossible. check!!!
inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
for (outdated_units) |unit| {
var n: u32 = 0;
var it = ip.dependencyIterator(switch (unit.unwrap()) {
.func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
.@"comptime" => continue, // a `comptime` block can't even be depended on so it is a terrible choice
.type => |ty| .{ .interned = ty },
.type_layout => |ty| .{ .type_layout = ty },
.type_inits => |ty| .{ .type_inits = ty },
.nav_val => |nav| .{ .nav_val = nav },
.nav_ty => |nav| .{ .nav_ty = nav },
.memoized_state => {
@ -3377,25 +3383,21 @@ pub fn mapOldZirToNew(
var comptime_decls: std.ArrayList(Zir.Inst.Index) = .empty;
defer comptime_decls.deinit(gpa);
{
var old_decl_it = old_zir.declIterator(match_item.old_inst);
while (old_decl_it.next()) |old_decl_inst| {
const old_decl = old_zir.getDeclaration(old_decl_inst);
switch (old_decl.kind) {
.@"comptime" => try comptime_decls.append(gpa, old_decl_inst),
.unnamed_test => try unnamed_tests.append(gpa, old_decl_inst),
.@"test" => try named_tests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.decltest => try named_decltests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.@"const", .@"var" => try named_decls.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
}
for (old_zir.typeDecls(match_item.old_inst)) |old_decl_inst| {
const old_decl = old_zir.getDeclaration(old_decl_inst);
switch (old_decl.kind) {
.@"comptime" => try comptime_decls.append(gpa, old_decl_inst),
.unnamed_test => try unnamed_tests.append(gpa, old_decl_inst),
.@"test" => try named_tests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.decltest => try named_decltests.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
.@"const", .@"var" => try named_decls.put(gpa, old_zir.nullTerminatedString(old_decl.name), old_decl_inst),
}
}
var unnamed_test_idx: u32 = 0;
var comptime_decl_idx: u32 = 0;
var new_decl_it = new_zir.declIterator(match_item.new_inst);
while (new_decl_it.next()) |new_decl_inst| {
for (new_zir.typeDecls(match_item.new_inst)) |new_decl_inst| {
const new_decl = new_zir.getDeclaration(new_decl_inst);
// Attempt to match this to a declaration in the old ZIR:
// * For named declarations (`const`/`var`/`fn`), we match based on name.
@ -3494,7 +3496,7 @@ pub fn ensureFuncBodyAnalysisQueued(zcu: *Zcu, func_index: InternPool.Index) !vo
}
try zcu.func_body_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
try zcu.comp.queueJob(.{ .analyze_func = func_index });
try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .func = func_index }) });
zcu.func_body_analysis_queued.putAssumeCapacityNoClobber(func_index, {});
}
@ -3513,7 +3515,7 @@ pub fn ensureNavValAnalysisQueued(zcu: *Zcu, nav_id: InternPool.Nav.Index) !void
}
try zcu.nav_val_analysis_queued.ensureUnusedCapacity(zcu.gpa, 1);
try zcu.comp.queueJob(.{ .analyze_comptime_unit = .wrap(.{ .nav_val = nav_id }) });
try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .nav_val = nav_id }) });
zcu.nav_val_analysis_queued.putAssumeCapacityNoClobber(nav_id, {});
}
@ -3908,8 +3910,7 @@ pub fn atomicPtrAlignment(
return error.BadType;
}
/// Returns null in the following cases:
/// * Not a struct.
/// Returns null if `ty` is not a struct.
pub fn typeToStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
@ -3936,7 +3937,6 @@ pub fn structPackedFieldBitOffset(
) u16 {
const ip = &zcu.intern_pool;
assert(struct_type.layout == .@"packed");
assert(struct_type.haveLayout(ip));
var bit_sum: u64 = 0;
for (0..struct_type.field_types.len) |i| {
if (i == field_index) {
@ -3995,8 +3995,10 @@ pub const UnionLayout = struct {
pub fn unionTagFieldIndex(zcu: *const Zcu, loaded_union: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &zcu.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == loaded_union.enum_tag_ty);
return loaded_union.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
const enum_tag_key = ip.indexToKey(enum_tag.toIntern()).enum_tag;
assert(enum_tag_key.ty == loaded_union.enum_tag_type);
const loaded_enum = ip.loadEnumType(loaded_union.enum_tag_type);
return loaded_enum.tagValueIndex(ip, enum_tag_key.int);
}
pub const ResolvedReference = struct {
@ -4049,31 +4051,36 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const referencer = types.values()[type_idx];
type_idx += 1;
log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
refs_log.debug("handle type '{f}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
// If this type undergoes type resolution, the corresponding `AnalUnit` is automatically referenced.
const has_resolution: bool = switch (ip.indexToKey(ty)) {
.struct_type, .union_type => true,
.enum_type => |k| k != .generated_tag,
.opaque_type => false,
// If this type undergoes type resolution, the corresponding `AnalUnit`s are automatically referenced.
const has_layout: bool, const has_inits: bool = switch (ip.indexToKey(ty)) {
.struct_type => .{ true, true },
.union_type => .{ true, false },
.enum_type => .{ false, true },
.opaque_type => .{ false, false },
else => unreachable,
};
if (has_resolution) {
if (has_layout) {
// this should only be referenced by the type
const unit: AnalUnit = .wrap(.{ .type = ty });
const unit: AnalUnit = .wrap(.{ .type_layout = ty });
try units.putNoClobber(gpa, unit, referencer);
}
if (has_inits) {
// this should only be referenced by the type
const unit: AnalUnit = .wrap(.{ .type_inits = ty });
try units.putNoClobber(gpa, unit, referencer);
}
// If this is a union with a generated tag, its tag type is automatically referenced.
// We don't add this reference for non-generated tags, as those will already be referenced via the union's type resolution, with a better source location.
if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| {
const tag_ty = union_obj.enum_tag_ty;
if (tag_ty != .none) {
if (ip.indexToKey(tag_ty).enum_type == .generated_tag) {
const gop = try types.getOrPut(gpa, tag_ty);
if (!gop.found_existing) gop.value_ptr.* = referencer;
}
}
implicit_tag: {
const loaded_union = zcu.typeToUnion(.fromInterned(ty)) orelse break :implicit_tag;
const tag_ty = loaded_union.enum_tag_type;
if (ip.indexToKey(tag_ty).enum_type != .generated_union_tag) break :implicit_tag;
const gop = try types.getOrPut(gpa, tag_ty);
if (gop.found_existing) break :implicit_tag;
gop.value_ptr.* = referencer;
}
// Queue any decls within this type which would be automatically analyzed.
@ -4084,7 +4091,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const unit: AnalUnit = .wrap(.{ .@"comptime" = cu });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
log.debug("type '{f}': ref comptime %{}", .{
refs_log.debug("type '{f}': ref comptime %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(ip.getComptimeUnit(cu).zir_index.resolve(ip) orelse continue),
});
@ -4118,7 +4125,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
{
const gop = try units.getOrPut(gpa, .wrap(.{ .nav_val = nav_id }));
if (!gop.found_existing) {
log.debug("type '{f}': ref test %{}", .{
refs_log.debug("type '{f}': ref test %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
@ -4141,7 +4148,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
log.debug("type '{f}': ref named %{}", .{
refs_log.debug("type '{f}': ref named %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
@ -4158,7 +4165,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const unit: AnalUnit = .wrap(.{ .nav_val = nav });
const gop = try units.getOrPut(gpa, unit);
if (!gop.found_existing) {
log.debug("type '{f}': ref named %{}", .{
refs_log.debug("type '{f}': ref named %{}", .{
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
@intFromEnum(inst_info.inst),
});
@ -4177,14 +4184,14 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const other: AnalUnit = .wrap(switch (unit.unwrap()) {
.nav_val => |n| .{ .nav_ty = n },
.nav_ty => |n| .{ .nav_val = n },
.@"comptime", .type, .func, .memoized_state => break :queue_paired,
.@"comptime", .type_layout, .type_inits, .func, .memoized_state => break :queue_paired,
});
const gop = try units.getOrPut(gpa, other);
if (gop.found_existing) break :queue_paired;
gop.value_ptr.* = units.values()[unit_idx]; // same reference location
}
log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
refs_log.debug("handle unit '{f}'", .{zcu.fmtAnalUnit(unit)});
if (zcu.reference_table.get(unit)) |first_ref_idx| {
assert(first_ref_idx != std.math.maxInt(u32));
@ -4193,7 +4200,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const ref = zcu.all_references.items[ref_idx];
const gop = try units.getOrPut(gpa, ref.referenced);
if (!gop.found_existing) {
log.debug("unit '{f}': ref unit '{f}'", .{
refs_log.debug("unit '{f}': ref unit '{f}'", .{
zcu.fmtAnalUnit(unit),
zcu.fmtAnalUnit(ref.referenced),
});
@ -4213,7 +4220,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const ref = zcu.all_type_references.items[ref_idx];
const gop = try types.getOrPut(gpa, ref.referenced);
if (!gop.found_existing) {
log.debug("unit '{f}': ref type '{f}'", .{
refs_log.debug("unit '{f}': ref type '{f}'", .{
zcu.fmtAnalUnit(unit),
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
});
@ -4323,9 +4330,8 @@ fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void
return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
}
},
.nav_val => |nav| return writer.print("nav_val('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
.nav_ty => |nav| return writer.print("nav_ty('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
.type => |ty| return writer.print("ty('{f}' [{}])", .{ Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
.nav_val, .nav_ty => |nav, tag| return writer.print("{t}('{f}' [{}])", .{ tag, ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
.type_layout, .type_inits => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
.func => |func| {
const nav = zcu.funcInfo(func).owner_nav;
return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
@ -4347,18 +4353,17 @@ fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void
const file_path = zcu.fileByIndex(info.file).path;
return writer.print("inst('{f}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
},
.nav_val => |nav| {
.nav_val, .nav_ty => |nav, tag| {
const fqn = ip.getNav(nav).fqn;
return writer.print("nav_val('{f}')", .{fqn.fmt(ip)});
return writer.print("{t}('{f}')", .{ tag, fqn.fmt(ip) });
},
.nav_ty => |nav| {
const fqn = ip.getNav(nav).fqn;
return writer.print("nav_ty('{f}')", .{fqn.fmt(ip)});
.type_layout, .type_inits => |ip_index, tag| {
const name = Type.fromInterned(ip_index).containerTypeName(ip);
return writer.print("{t}('{f}')", .{ tag, name.fmt(ip) });
},
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
.struct_type, .union_type, .enum_type => return writer.print("type('{f}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
.func => |f| return writer.print("ies('{f}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
else => unreachable,
.func_ies => |ip_index| {
const fqn = ip.getNav(ip.indexToKey(ip_index).func.owner_nav).fqn;
return writer.print("func_ies('{f}')", .{fqn.fmt(ip)});
},
.zon_file => |file| {
const file_path = zcu.fileByIndex(file).path;

File diff suppressed because it is too large Load diff

View file

@ -1088,7 +1088,7 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
return .{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? };
}
} else if (ty.zigTypeTag(zcu) == .pointer) {
const elem_ty = ty.elemType2(zcu);
const elem_ty = ty.childType(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return .{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? };
}

View file

@ -2464,7 +2464,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
const ty_pl = air.data(air.inst_index).ty_pl;
const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
const elem_size = ty_pl.ty.toType().elemType2(zcu).abiSize(zcu);
const elem_size = ty_pl.ty.toType().childType(zcu).abiSize(zcu);
const base_vi = try isel.use(bin_op.lhs);
var base_part_it = base_vi.field(ty_pl.ty.toType(), 0, 8);
@ -6145,7 +6145,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
} else {
const elem_ptr_ra = try isel.allocIntReg();
defer isel.freeReg(elem_ptr_ra);
if (!try elem_vi.value.load(isel, slice_ty.elemType2(zcu), elem_ptr_ra, .{
if (!try elem_vi.value.load(isel, slice_ty.childType(zcu), elem_ptr_ra, .{
.@"volatile" = ptr_info.flags.is_volatile,
})) break :unused;
const slice_vi = try isel.use(bin_op.lhs);
@ -6253,7 +6253,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
} else {
const elem_ptr_ra = try isel.allocIntReg();
defer isel.freeReg(elem_ptr_ra);
if (!try elem_vi.value.load(isel, ptr_ty.elemType2(zcu), elem_ptr_ra, .{
if (!try elem_vi.value.load(isel, ptr_ty.childType(zcu), elem_ptr_ra, .{
.@"volatile" = ptr_info.flags.is_volatile,
})) break :unused;
const base_vi = try isel.use(bin_op.lhs);
@ -6594,7 +6594,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte|
break :fill_byte .{ .constant = fill_byte };
}
switch (dst_ty.elemType2(zcu).abiSize(zcu)) {
switch (dst_ty.indexablePtrElem(zcu).abiSize(zcu)) {
0 => unreachable,
1 => break :fill_byte .{ .value = bin_op.rhs },
2, 4, 8 => |size| {

View file

@ -3676,7 +3676,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const elem_has_bits = ptr_ty.indexablePtrElem(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@ -3738,7 +3738,7 @@ fn airSliceElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const slice_ty = f.typeOf(bin_op.lhs);
const elem_ty = slice_ty.elemType2(zcu);
const elem_ty = slice_ty.childType(zcu);
const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu);
const slice = try f.resolveInst(bin_op.lhs);
@ -4502,7 +4502,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const elem_ty = inst_scalar_ty.elemType2(zcu);
const elem_ty = inst_scalar_ty.indexablePtrElem(zcu);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
@ -7037,7 +7037,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index, function_paren: []const u8) !CV
try w.writeAll(", ");
try writeArrayLen(f, dest_ptr, dest_ty);
try w.writeAll(" * sizeof(");
try f.renderType(w, dest_ty.elemType2(zcu));
try f.renderType(w, dest_ty.indexablePtrElem(zcu));
try w.writeAll("));");
try f.object.newline();

View file

@ -2112,7 +2112,7 @@ pub const Object = struct {
return debug_array_type;
},
.vector => {
const elem_ty = ty.elemType2(zcu);
const elem_ty = ty.childType(zcu);
// Vector elements cannot be padded since that would make
// @bitSizOf(elem) * len > @bitSizOf(vec).
// Neither gdb nor lldb seem to be able to display non-byte sized

View file

@ -44,7 +44,7 @@ pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class {
return .byval;
},
.vector => {
const elem_type = ty.elemType2(zcu);
const elem_type = ty.childType(zcu);
switch (elem_type.zigTypeTag(zcu)) {
.bool, .int => {
const bit_size = ty.bitSize(zcu);

View file

@ -2673,7 +2673,7 @@ fn genBinOp(
defer func.register_manager.unlockReg(tmp_lock);
// RISC-V has no immediate mul, so we copy the size to a temporary register
const elem_size = lhs_ty.elemType2(zcu).abiSize(zcu);
const elem_size = lhs_ty.indexablePtrElem(zcu).abiSize(zcu);
const elem_size_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = elem_size });
try func.genBinOp(
@ -3913,9 +3913,8 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void {
const base_ptr_ty = func.typeOf(bin_op.lhs);
const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else result: {
const elem_ty = base_ptr_ty.elemType2(zcu);
const elem_ty = base_ptr_ty.indexablePtrElem(zcu);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
const base_ptr_mcv = try func.resolveInst(bin_op.lhs);
const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {
.register => |reg| func.register_manager.lockRegAssumeUnused(reg),

View file

@ -4381,7 +4381,7 @@ fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
fn ptrElemPtr(cg: *CodeGen, ptr_ty: Type, ptr_id: Id, index_id: Id) !Id {
const zcu = cg.module.zcu;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(zcu); // use elemType() so that we get T for *[N]T.
const elem_ty = ptr_ty.indexablePtrElem(zcu);
const elem_ty_id = try cg.resolveType(elem_ty, .indirect);
const elem_ptr_ty_id = try cg.module.ptrType(elem_ty_id, cg.module.storageClass(ptr_ty.ptrAddressSpace(zcu)));
if (ptr_ty.isSinglePointer(zcu)) {

View file

@ -43261,7 +43261,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
try ops[0].toSlicePtr(cg);
var res: [1]Temp = undefined;
if (!hack_around_sema_opv_bugs or ty_pl.ty.toType().elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
if (!hack_around_sema_opv_bugs or ty_pl.ty.toType().childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
@ -43375,7 +43375,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
try ops[0].toSlicePtr(cg);
var res: [1]Temp = undefined;
if (!hack_around_sema_opv_bugs or ty_pl.ty.toType().elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
if (!hack_around_sema_opv_bugs or ty_pl.ty.toType().childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) cg.select(&res, &.{ty_pl.ty.toType()}, &ops, comptime &.{ .{
.patterns = &.{
.{ .src = .{ .to_gpr, .simm32, .none } },
},
@ -103926,7 +103926,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.array_elem_val, .legalize_vec_elem_val => {
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
const array_ty = cg.typeOf(bin_op.lhs);
const res_ty = array_ty.elemType2(zcu);
const res_ty = array_ty.childType(zcu);
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
var res: [1]Temp = undefined;
cg.select(&res, &.{res_ty}, &ops, comptime &.{ .{
@ -104121,7 +104121,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.slice_elem_val, .ptr_elem_val => {
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
const res_ty = cg.typeOf(bin_op.lhs).elemType2(zcu);
const res_ty = cg.typeOf(bin_op.lhs).indexablePtrElem(zcu);
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
try ops[0].toSlicePtr(cg);
var res: [1]Temp = undefined;
@ -187919,7 +187919,6 @@ const Select = struct {
unsigned_int: Memory.Size,
elem_size_is: u8,
po2_elem_size,
elem_int: Memory.Size,
const OfIsSizes = struct { of: Memory.Size, is: Memory.Size };
@ -188178,12 +188177,8 @@ const Select = struct {
.signed => false,
.unsigned => size.bitSize(cg.target) >= int_info.bits,
} else false,
.elem_size_is => |size| size == ty.elemType2(zcu).abiSize(zcu),
.po2_elem_size => std.math.isPowerOfTwo(ty.elemType2(zcu).abiSize(zcu)),
.elem_int => |size| if (cg.intInfo(ty.elemType2(zcu))) |elem_int_info|
size.bitSize(cg.target) >= elem_int_info.bits
else
false,
.elem_size_is => |size| size == ty.indexablePtrElem(zcu).abiSize(zcu),
.po2_elem_size => std.math.isPowerOfTwo(ty.indexablePtrElem(zcu).abiSize(zcu)),
};
}
};
@ -189918,20 +189913,20 @@ const Select = struct {
.dst0_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).abiSize(s.cg.pt.zcu)),
.delta_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).abiSize(s.cg.pt.zcu)))),
.delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)))),
.delta_elem_size => @intCast(@as(SignedImm, @intCast(op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))) -
@as(SignedImm, @intCast(op.flags.index.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)))),
.unaligned_size => @intCast(s.cg.unalignedSize(op.flags.base.ref.typeOf(s))),
.unaligned_size_add_elem_size => {
const ty = op.flags.base.ref.typeOf(s);
break :lhs @intCast(s.cg.unalignedSize(ty) + ty.elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu));
break :lhs @intCast(s.cg.unalignedSize(ty) + ty.scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu));
},
.unaligned_size_sub_elem_size => {
const ty = op.flags.base.ref.typeOf(s);
break :lhs @intCast(s.cg.unalignedSize(ty) - ty.elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu));
break :lhs @intCast(s.cg.unalignedSize(ty) - ty.scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu));
},
.unaligned_size_sub_2_elem_size => {
const ty = op.flags.base.ref.typeOf(s);
break :lhs @intCast(s.cg.unalignedSize(ty) - ty.elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) * 2);
break :lhs @intCast(s.cg.unalignedSize(ty) - ty.scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) * 2);
},
.bit_size => @intCast(s.cg.nonBoolScalarBitSize(op.flags.base.ref.typeOf(s))),
.src0_bit_size => @intCast(s.cg.nonBoolScalarBitSize(Select.Operand.Ref.src0.typeOf(s))),
@ -189944,10 +189939,10 @@ const Select = struct {
op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu),
@divExact(op.flags.base.size.bitSize(s.cg.target), 8),
)),
.elem_size => @intCast(op.flags.base.ref.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size_mul_src1 => @intCast(Select.Operand.Ref.src0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) *
.elem_size => @intCast(op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size_mul_src1 => @intCast(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) *
Select.Operand.Ref.src1.valueOf(s).immediate),
.vector_index => switch (op.flags.base.ref.typeOf(s).ptrInfo(s.cg.pt.zcu).flags.vector_index) {
.none => unreachable,
@ -189956,7 +189951,7 @@ const Select = struct {
.src1 => @intCast(Select.Operand.Ref.src1.valueOf(s).immediate),
.src1_sub_bit_size => @as(SignedImm, @intCast(Select.Operand.Ref.src1.valueOf(s).immediate)) -
@as(SignedImm, @intCast(s.cg.nonBoolScalarBitSize(op.flags.base.ref.typeOf(s)))),
.log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))),
.log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))),
.elem_mask => @as(u8, std.math.maxInt(u8)) >> @intCast(
8 - ((s.cg.unalignedSize(op.flags.base.ref.typeOf(s)) - 1) %
@divExact(op.flags.base.size.bitSize(s.cg.target), 8) + 1 >>

View file

@ -4575,10 +4575,10 @@ fn updateContainerTypeWriterError(
const name_strat: Zir.Inst.NameStrategy = switch (decl_inst.tag) {
.struct_init, .struct_init_ref, .struct_init_anon => .anon,
.extended => switch (decl_inst.data.extended.opcode) {
.struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
.struct_decl => file.zir.?.getStructDecl(inst_info.inst).name_strategy,
.union_decl => file.zir.?.getUnionDecl(inst_info.inst).name_strategy,
.enum_decl => file.zir.?.getEnumDecl(inst_info.inst).name_strategy,
.opaque_decl => file.zir.?.getOpaqueDecl(inst_info.inst).name_strategy,
.reify_enum,
.reify_struct,

View file

@ -18,7 +18,7 @@ pub const MutableValue = union(enum) {
opt_payload: SubValue,
/// An aggregate consisting of a single repeated value.
repeated: SubValue,
/// An aggregate of `u8` consisting of "plain" bytes (no lazy or undefined elements).
/// An aggregate of `u8` consisting of "plain" bytes (no undefined elements).
bytes: Bytes,
/// An aggregate with arbitrary sub-values.
aggregate: Aggregate,
@ -415,16 +415,7 @@ pub const MutableValue = union(enum) {
} else if (!is_struct and is_trivial_int and Type.fromInterned(a.ty).childType(zcu).toIntern() == .u8_type) {
// See if we can switch to `bytes` repr
for (a.elems) |e| {
switch (e) {
else => break,
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
else => break,
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => {},
.lazy_align, .lazy_size => break,
},
},
}
if (!e.isTrivialInt(zcu)) break;
} else {
const bytes = try arena.alloc(u8, a.elems.len);
for (a.elems, bytes) |elem_val, *b| {
@ -494,10 +485,7 @@ pub const MutableValue = union(enum) {
else => false,
.interned => |ip_index| switch (zcu.intern_pool.indexToKey(ip_index)) {
else => false,
.int => |int| switch (int.storage) {
.u64, .i64, .big_int => true,
.lazy_align, .lazy_size => false,
},
.int => true,
},
};
}

View file

@ -81,14 +81,6 @@ pub fn print(
.int => |int| switch (int.storage) {
inline .u64, .i64 => |x| try writer.print("{d}", .{x}),
.big_int => |x| try writer.print("{d}", .{x}),
.lazy_align => |ty| if (opt_sema != null) {
const a = try Type.fromInterned(ty).abiAlignmentSema(pt);
try writer.print("{d}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({f})", .{Type.fromInterned(ty).fmt(pt)}),
.lazy_size => |ty| if (opt_sema != null) {
const s = try Type.fromInterned(ty).abiSizeSema(pt);
try writer.print("{d}", .{s});
} else try writer.print("@sizeOf({f})", .{Type.fromInterned(ty).fmt(pt)}),
},
.err => |err| try writer.print("error.{f}", .{
err.name.fmt(ip),
@ -104,8 +96,8 @@ pub fn print(
}),
.enum_tag => |enum_tag| {
const enum_type = ip.loadEnumType(val.typeOf(zcu).toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
return writer.print(".{f}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
if (enum_type.tagValueIndex(ip, enum_tag.int)) |tag_index| {
return writer.print(".{f}", .{enum_type.field_names.get(ip)[tag_index].fmt(ip)});
}
if (level == 0) {
return writer.writeAll("@enumFromInt(...)");