compiler: represent bitpacks as their backing integer

Now that https://github.com/ziglang/zig/issues/24657 has been
implemented, the compiler can simplify its internal representation of
comptime-known `packed struct` and `packed union` values. Instead of
storing them field-wise, we can simply store their backing integer
value. This simplifies many operations and improves efficiency in some
cases.
This commit is contained in:
Matthew Lugg 2026-01-28 13:10:07 +00:00
parent d6f61e7d3e
commit ffcf654907
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
19 changed files with 528 additions and 499 deletions

View file

@ -692,33 +692,23 @@ const Writer = struct {
const zcu = w.pt.zcu;
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| {
switch (elem) {
.bool_true => {
const clobber = struct_type.structFieldName(i, zcu).toSlice(ip).?;
assert(clobber.len != 0);
try s.writeAll(", ~{");
try s.writeAll(clobber);
try s.writeAll("}");
},
.bool_false => continue,
else => unreachable,
}
},
.repeated_elem => |elem| {
try s.writeAll(", ");
try s.writeAll(switch (elem) {
.bool_true => "<all clobbers>",
.bool_false => "<no clobbers>",
else => unreachable,
});
},
.bytes => |bytes| {
try s.print(", {x}", .{bytes});
},
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
assert(clobber.len != 0);
try s.writeAll(", ~{");
try s.writeAll(clobber);
try s.writeAll("}");
}
const asm_source = unwrapped_asm.source;
try s.print(", \"{f}\"", .{std.zig.fmtString(asm_source)});

View file

@ -2130,6 +2130,8 @@ pub const Key = union(enum) {
aggregate: Aggregate,
/// An instance of a union.
un: Union,
/// An instance of a `packed struct` or `packed union`.
bitpack: Bitpack,
/// A comptime function call with a memoized result.
memoized_call: Key.MemoizedCall,
@ -2681,6 +2683,15 @@ pub const Key = union(enum) {
};
};
/// As well as a key, this type doubles as the payload in `extra` for `Tag.bitpack`.
pub const Bitpack = struct {
/// The `packed struct` or `packed union` type.
ty: Index,
/// The contents of the bitpack, represented as the backing integer value. The type of this
/// value is the same as the backing integer type of `ty`.
backing_int_val: Index,
};
pub const MemoizedCall = struct {
func: Index,
arg_values: []const Index,
@ -2919,6 +2930,8 @@ pub const Key = union(enum) {
asBytes(&e.relocation) ++
asBytes(&e.is_const) ++ asBytes(&e.alignment) ++ asBytes(&e.@"addrspace") ++
asBytes(&e.zir_index) ++ &[1]u8{@intFromEnum(e.source)}),
.bitpack => |bitpack| Hash.hash(seed, asBytes(&bitpack.ty) ++ asBytes(&bitpack.backing_int_val)),
};
}
@ -2996,6 +3009,10 @@ pub const Key = union(enum) {
const b_info = b.empty_enum_value;
return a_info == b_info;
},
.bitpack => |a_info| {
const b_info = b.bitpack;
return a_info.ty == b_info.ty and a_info.backing_int_val == b_info.backing_int_val;
},
.variable => |a_info| {
const b_info = b.variable;
@ -3271,6 +3288,7 @@ pub const Key = union(enum) {
.enum_tag,
.aggregate,
.un,
.bitpack,
=> |x| x.ty,
.enum_literal => .enum_literal_type,
@ -4417,6 +4435,7 @@ pub const Index = enum(u32) {
trailing: struct { element_values: []Index },
},
repeated: struct { data: *Repeated },
bitpack: struct { data: *Key.Bitpack },
memoized_call: struct {
const @"data.args_len" = opaque {};
@ -5152,6 +5171,9 @@ pub const Tag = enum(u8) {
/// An instance of an array or vector with every element being the same value.
/// data is extra index to `Repeated`.
repeated,
/// An instance of a `packed struct` or `packed union`.
/// data is extra index to `Key.Bitpack`.
bitpack,
/// A memoized comptime function call result.
/// data is extra index to `MemoizedCall`
@ -5485,6 +5507,7 @@ pub const Tag = enum(u8) {
.config = .{ .@"trailing.elements.len" = .@"payload.ty.payload.fields_len" },
},
.repeated = .{ .summary = .@"@as({.payload.ty%summary}, @splat({.payload.elem_val%summary}))", .payload = Repeated },
.bitpack = .{ .summary = .@"@as({.payload.ty%summary}, {})", .payload = Key.Bitpack },
.memoized_call = .{
.summary = .@"@memoize({.payload.func%summary})",
@ -7043,6 +7066,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.enum_literal => .{ .enum_literal = @enumFromInt(data) },
.enum_tag => .{ .enum_tag = extraData(unwrapped_index.getExtra(ip), Tag.EnumTag, data) },
.bitpack => .{ .bitpack = extraData(unwrapped_index.getExtra(ip), Key.Bitpack, data) },
.memoized_call => {
const extra_list = unwrapped_index.getExtra(ip);
@ -7938,15 +7962,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
.aggregate => |aggregate| {
const ty_key = ip.indexToKey(aggregate.ty);
const len = ip.aggregateTypeLen(aggregate.ty);
const child = switch (ty_key) {
.array_type => |array_type| array_type.child,
.vector_type => |vector_type| vector_type.child,
.tuple_type, .struct_type => .none,
else => unreachable,
};
const sentinel = switch (ty_key) {
.array_type => |array_type| array_type.sentinel,
.vector_type, .tuple_type, .struct_type => .none,
const child: Index, const sentinel: Index = switch (ty_key) {
.array_type => |array_type| .{ array_type.child, array_type.sentinel },
.vector_type => |vector_type| .{ vector_type.child, .none },
.tuple_type => .{ .none, .none },
.struct_type => child: {
assert(ip.loadStructType(aggregate.ty).layout != .@"packed");
break :child .{ .none, .none };
},
else => unreachable,
};
const len_including_sentinel = len + @intFromBool(sentinel != .none);
@ -8128,6 +8151,18 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key:
extra.appendSliceAssumeCapacity(.{@ptrCast(aggregate.storage.elems)});
if (sentinel != .none) extra.appendAssumeCapacity(.{@intFromEnum(sentinel)});
},
.bitpack => |bitpack| {
switch (ip.zigTypeTag(bitpack.ty)) {
.@"struct" => assert(ip.typeOf(bitpack.backing_int_val) == ip.loadStructType(bitpack.ty).packed_backing_int_type),
.@"union" => assert(ip.typeOf(bitpack.backing_int_val) == ip.loadUnionType(bitpack.ty).packed_backing_int_type),
else => unreachable,
}
assert(!ip.isUndef(bitpack.backing_int_val));
items.appendAssumeCapacity(.{
.tag = .bitpack,
.data = try addExtra(extra, bitpack),
});
},
.memoized_call => |memoized_call| {
for (memoized_call.arg_values) |arg| assert(arg != .none);
@ -9095,6 +9130,10 @@ pub fn getUnion(
tid: Zcu.PerThread.Id,
un: Key.Union,
) Allocator.Error!Index {
assert(un.ty != .none);
assert(un.val != .none);
assert(ip.loadUnionType(un.ty).layout != .@"packed");
var gop = try ip.getOrPutKey(gpa, io, tid, .{ .un = un });
defer gop.deinit();
if (gop == .existing) return gop.existing;
@ -9103,8 +9142,6 @@ pub fn getUnion(
const extra = local.getMutableExtra(gpa, io);
try items.ensureUnusedCapacity(1);
assert(un.ty != .none);
assert(un.val != .none);
items.appendAssumeCapacity(.{
.tag = .union_value,
.data = try addExtra(extra, un),
@ -11003,6 +11040,7 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo
.func_coerced => @sizeOf(Tag.FuncCoerced),
.only_possible_value => 0,
.union_value => @sizeOf(Key.Union),
.bitpack => 2 * @sizeOf(u32),
.memoized_call => b: {
const info = extraData(extra_list, MemoizedCall, data);
@ -11117,6 +11155,7 @@ fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void {
.func_instance,
.func_coerced,
.union_value,
.bitpack,
.memoized_call,
=> try w.print("{d}", .{data}),
@ -11871,6 +11910,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
.bytes,
.aggregate,
.repeated,
.bitpack,
=> |t| {
const extra_list = unwrapped_index.getExtra(ip);
return @enumFromInt(extra_list.view().items(.@"0")[item.data + std.meta.fieldIndex(t.Payload(), "ty").?]);
@ -12264,6 +12304,7 @@ pub fn zigTypeTag(ip: *const InternPool, index: Index) std.builtin.TypeId {
.bytes,
.aggregate,
.repeated,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,

View file

@ -3583,7 +3583,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
},
.field => |idx| ptr: {
const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
if (zcu.typeToUnion(maybe_union_ty)) |union_obj| {
if (zcu.typeToUnion(maybe_union_ty)) |union_obj| if (union_obj.layout == .auto) {
// As this is a union field, we must store to the pointer now to set the tag.
// The payload value will be stored later, so undef is a sufficent payload for now.
const payload_ty: Type = .fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
@ -3591,7 +3591,7 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_type), idx);
const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, .unneeded, .fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
};
break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, pt)).toIntern();
},
.elem => |idx| (try Value.fromInterned(decl_parent_ptr).ptrElem(idx, pt)).toIntern(),
@ -18510,6 +18510,10 @@ fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
const payload = try sema.coerce(block, field_ty, sema.resolveInst(extra.init), payload_src);
if (union_ty.containerLayout(zcu) == .@"packed") {
return sema.bitCast(block, union_ty, payload, block.nodeOffset(inst_data.src_node), payload_src);
}
if (sema.resolveValue(payload)) |payload_val| {
const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index);
@ -18643,6 +18647,10 @@ fn zirStructInit(
const uncoerced_init_inst = sema.resolveInst(item.data.init);
const init_inst = try sema.coerce(block, field_ty, uncoerced_init_inst, field_src);
if (resolved_ty.containerLayout(zcu) == .@"packed") {
return sema.bitCast(block, resolved_ty, init_inst, src, field_src);
}
if (sema.resolveValue(init_inst)) |val| {
const struct_val = Value.fromInterned(try pt.internUnion(.{
.ty = resolved_ty.toIntern(),
@ -18789,15 +18797,35 @@ fn finishStructInit(
}
} else null;
const runtime_index = opt_runtime_index orelse {
const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
for (elems, field_inits) |*elem, field_init| {
elem.* = sema.resolveValue(field_init).?.toIntern();
}
const struct_val = try pt.aggregateValue(struct_ty, elems);
const final_val_inst = try sema.coerce(block, result_ty, Air.internedToRef(struct_val.toIntern()), init_src);
const final_val = sema.resolveValue(final_val_inst).?;
return sema.addConstantMaybeRef(final_val.toIntern(), is_ref);
const runtime_index = opt_runtime_index orelse switch (struct_ty.containerLayout(zcu)) {
.auto, .@"extern" => {
const elems = try sema.arena.alloc(InternPool.Index, field_inits.len);
for (elems, field_inits) |*elem, field_init| {
elem.* = sema.resolveValue(field_init).?.toIntern();
}
const struct_val = try pt.aggregateValue(struct_ty, elems);
const final_val_ref = try sema.coerce(block, result_ty, .fromValue(struct_val), init_src);
return sema.addConstantMaybeRef(final_val_ref.toInterned().?, is_ref);
},
.@"packed" => {
const buf = try sema.arena.alloc(u8, (struct_ty.bitSize(zcu) + 7) / 8);
var bit_offset: u16 = 0;
for (field_inits) |field_init| {
const field_val = sema.resolveValue(field_init).?;
field_val.writeToPackedMemory(pt, buf, bit_offset) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable, // bitpack fields cannot be pointers
error.OutOfMemory => |e| return e,
};
bit_offset += @intCast(field_val.typeOf(zcu).bitSize(zcu));
}
assert(bit_offset == struct_ty.bitSize(zcu));
const struct_val = Value.readFromPackedMemory(struct_ty, pt, buf, 0, sema.arena) catch |err| switch (err) {
error.IllDefinedMemoryLayout => unreachable, // bitpacks have well-defined layout
error.OutOfMemory => |e| return e,
};
const final_val_ref = try sema.coerce(block, result_ty, .fromValue(struct_val), init_src);
return sema.addConstantMaybeRef(final_val_ref.toInterned().?, is_ref);
},
};
if (struct_ty.comptimeOnly(zcu)) {

View file

@ -273,6 +273,8 @@ const UnpackValueBits = struct {
.opt,
=> try unpack.primitive(val),
.bitpack => |bitpack| try unpack.primitive(.fromInterned(bitpack.backing_int_val)),
.aggregate => switch (ty.zigTypeTag(zcu)) {
.vector => {
const len: usize = @intCast(ty.arrayLen(zcu));
@ -443,7 +445,7 @@ const UnpackValueBits = struct {
// This @intCast is okay because no primitive can exceed the size of a u16.
const int_ty = try unpack.pt.intType(.unsigned, @intCast(bit_count));
const buf = try unpack.arena.alloc(u8, @intCast((val_bits + 7) / 8));
try val.writeToPackedMemory(ty, unpack.pt, buf, 0);
try val.writeToPackedMemory(unpack.pt, buf, 0);
const sub_val = try Value.readFromPackedMemory(int_ty, unpack.pt, buf, @intCast(bit_offset), unpack.arena);
try unpack.primitive(sub_val);
},
@ -565,102 +567,103 @@ const PackValueBits = struct {
return pt.aggregateValue(ty, elems);
},
.@"packed" => {
// All fields are in order with no padding.
// This is identical between LE and BE targets.
const elems = try arena.alloc(InternPool.Index, ty.structFieldCount(zcu));
for (elems, 0..) |*elem, i| {
const field_ty = ty.fieldType(i, zcu);
elem.* = (try pack.get(field_ty)).toIntern();
}
return pt.aggregateValue(ty, elems);
const backing_int_val = try pack.primitive(ty.bitpackBackingInt(zcu));
return pt.bitpackValue(ty, backing_int_val);
},
},
.@"union" => {
// We will attempt to read as the backing representation. If this emits
// `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
// We will also attempt smaller fields when we get `undefined`, as if some bits are
// defined we want to include them.
// TODO: this is very very bad. We need a more sophisticated union representation.
.@"union" => switch (ty.containerLayout(zcu)) {
.auto => unreachable, // ill-defined layout
.@"extern" => {
// We will attempt to read as the backing representation. If this emits
// `error.ReinterpretDeclRef`, we will try each union field, preferring larger ones.
// We will also attempt smaller fields when we get `undefined`, as if some bits are
// defined we want to include them.
// TODO: this is very very bad. We need a more sophisticated union representation.
const prev_unpacked = pack.unpacked;
const prev_bit_offset = pack.bit_offset;
const prev_unpacked = pack.unpacked;
const prev_bit_offset = pack.bit_offset;
const backing_ty = try ty.unionBackingType(pt);
const backing_ty = try ty.externUnionBackingType(pt);
backing: {
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
backing: {
const backing_val = pack.get(backing_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
break :backing;
},
else => |e| return e,
};
if (backing_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
break :backing;
},
else => |e| return e,
};
if (backing_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
break :backing;
}
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
}));
}
const field_order = try pack.arena.alloc(u32, ty.unionTagTypeHypothetical(zcu).enumFieldCount(zcu));
for (field_order, 0..) |*f, i| f.* = @intCast(i);
// Sort `field_order` to put the fields with the largest bit sizes first.
const SizeSortCtx = struct {
zcu: *Zcu,
field_types: []const InternPool.Index,
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
}
};
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
.zcu = zcu,
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
}, SizeSortCtx.lessThan);
const padding_after = endian == .little or ty.containerLayout(zcu) == .@"packed";
for (field_order) |field_idx| {
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
if (!padding_after) try pack.padding(pad_bits);
const field_val = pack.get(field_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
},
else => |e| return e,
};
if (padding_after) try pack.padding(pad_bits);
if (field_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
}
const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = field_val.toIntern(),
}));
}
// No field could represent the value. Just do whatever happens when we try to read
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
const backing_val = try pack.get(backing_ty);
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
}));
}
const field_order = try pack.arena.alloc(u32, ty.unionTagTypeHypothetical(zcu).enumFieldCount(zcu));
for (field_order, 0..) |*f, i| f.* = @intCast(i);
// Sort `field_order` to put the fields with the largest bit sizes first.
const SizeSortCtx = struct {
zcu: *Zcu,
field_types: []const InternPool.Index,
fn lessThan(ctx: @This(), a_idx: u32, b_idx: u32) bool {
const a_ty = Type.fromInterned(ctx.field_types[a_idx]);
const b_ty = Type.fromInterned(ctx.field_types[b_idx]);
return a_ty.bitSize(ctx.zcu) > b_ty.bitSize(ctx.zcu);
}
};
std.mem.sortUnstable(u32, field_order, SizeSortCtx{
.zcu = zcu,
.field_types = zcu.typeToUnion(ty).?.field_types.get(ip),
}, SizeSortCtx.lessThan);
const padding_after = endian == .little or ty.containerLayout(zcu) == .@"packed";
for (field_order) |field_idx| {
const field_ty = Type.fromInterned(zcu.typeToUnion(ty).?.field_types.get(ip)[field_idx]);
const pad_bits = ty.bitSize(zcu) - field_ty.bitSize(zcu);
if (!padding_after) try pack.padding(pad_bits);
const field_val = pack.get(field_ty) catch |err| switch (err) {
error.ReinterpretDeclRef => {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
},
else => |e| return e,
};
if (padding_after) try pack.padding(pad_bits);
if (field_val.isUndef(zcu)) {
pack.unpacked = prev_unpacked;
pack.bit_offset = prev_bit_offset;
continue;
}
const tag_val = try pt.enumValueFieldIndex(ty.unionTagTypeHypothetical(zcu), field_idx);
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = tag_val.toIntern(),
.val = field_val.toIntern(),
}));
}
// No field could represent the value. Just do whatever happens when we try to read
// the backing type - either `undefined` or `error.ReinterpretDeclRef`.
const backing_val = try pack.get(backing_ty);
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = .none,
.val = backing_val.toIntern(),
}));
},
.@"packed" => {
const backing_int_val = try pack.primitive(ty.bitpackBackingInt(zcu));
return pt.bitpackValue(ty, backing_int_val);
},
},
else => return pack.primitive(ty),
}
@ -722,7 +725,7 @@ const PackValueBits = struct {
const val = Value.fromInterned(ip_val);
const ty = val.typeOf(zcu);
if (!val.isUndef(zcu)) {
try val.writeToPackedMemory(ty, pt, buf, cur_bit_off);
try val.writeToPackedMemory(pt, buf, cur_bit_off);
}
cur_bit_off += @intCast(ty.bitSize(zcu));
}

View file

@ -79,6 +79,7 @@ pub fn ensureLayoutResolved(sema: *Sema, ty: Type, src: LazySrcLoc) SemaError!vo
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,

View file

@ -407,6 +407,7 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread, ctx: ?*Compari
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -543,6 +544,7 @@ pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -639,6 +641,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -846,6 +849,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -978,6 +982,7 @@ pub fn abiSize(ty: Type, zcu: *const Zcu) u64 {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -1102,6 +1107,7 @@ pub fn bitSize(ty: Type, zcu: *const Zcu) u64 {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -1393,16 +1399,16 @@ pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool {
}
/// Returns the type used for backing storage of this union during comptime operations.
/// Asserts the type is either an extern or packed union.
pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
/// Asserts the type is an extern union.
pub fn externUnionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
const zcu = pt.zcu;
assertHasLayout(ty, zcu);
const loaded_union = zcu.intern_pool.loadUnionType(ty.toIntern());
return switch (loaded_union.layout) {
.@"extern" => try pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }),
.@"packed" => .fromInterned(loaded_union.packed_backing_int_type),
switch (loaded_union.layout) {
.@"extern" => return pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }),
.@"packed" => unreachable,
.auto => unreachable,
};
}
}
pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout {
@ -1421,6 +1427,15 @@ pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayo
};
}
pub fn bitpackBackingInt(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => .fromInterned(ip.loadStructType(ty.toIntern()).packed_backing_int_type),
.union_type => .fromInterned(ip.loadUnionType(ty.toIntern()).packed_backing_int_type),
else => unreachable,
};
}
/// Asserts that the type is an error union.
pub fn errorUnionPayload(ty: Type, zcu: *const Zcu) Type {
return Type.fromInterned(zcu.intern_pool.indexToKey(ty.toIntern()).error_union_type.payload_type);
@ -1635,6 +1650,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -1842,7 +1858,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
if (struct_obj.layout == .@"packed") {
const backing_ty: Type = .fromInterned(struct_obj.packed_backing_int_type);
const backing_val = try backing_ty.onePossibleValue(pt) orelse return null;
_ = backing_val; // MLUGG TODO: represent unions as their bits!
return try pt.bitpackValue(ty, backing_val);
} else {
if (!struct_obj.has_one_possible_value) return null;
}
@ -1893,8 +1909,13 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
},
.union_type => {
// MLUGG TODO: is this nonsensical or what!!!!!!
const union_obj = ip.loadUnionType(ty.toIntern());
if (union_obj.layout == .@"packed") {
const backing_ty: Type = .fromInterned(union_obj.packed_backing_int_type);
const backing_val = try backing_ty.onePossibleValue(pt) orelse return null;
return try pt.bitpackValue(ty, backing_val);
}
// MLUGG TODO: is this nonsensical or what!!!!!!
const tag_val = (try Type.fromInterned(union_obj.enum_tag_type).onePossibleValue(pt)) orelse
return null;
if (union_obj.field_types.len == 0) {
@ -1957,6 +1978,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -2061,6 +2083,7 @@ pub fn comptimeOnly(ty: Type, zcu: *const Zcu) bool {
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -3080,6 +3103,7 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.opt,
.aggregate,
.un,
.bitpack,
.undef,
// memoization, not types
.memoized_call,
@ -3158,6 +3182,7 @@ fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUn
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,

View file

@ -158,6 +158,7 @@ pub fn toBigInt(val: Value, space: *BigIntSpace, zcu: *Zcu) BigIntConst {
const ip = &zcu.intern_pool;
const int_key = switch (ip.indexToKey(val.toIntern())) {
.enum_tag => |enum_tag| ip.indexToKey(enum_tag.int).int,
.bitpack => |bitpack| ip.indexToKey(bitpack.backing_int_val).int,
.int => |int| int,
else => unreachable,
};
@ -216,6 +217,7 @@ pub fn getUnsignedInt(val: Value, zcu: *const Zcu) ?u64 {
else => |payload| Value.fromInterned(payload).getUnsignedInt(zcu),
},
.enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).getUnsignedInt(zcu),
.bitpack => |bitpack| Value.fromInterned(bitpack.backing_int_val).getUnsignedInt(zcu),
.err => |err| zcu.intern_pool.getErrorValueIfExists(err.name).?,
else => null,
},
@ -309,7 +311,7 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
return writeToPackedMemory(val, pt, buffer[0..byte_count], 0);
},
.@"struct" => {
const struct_type = zcu.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
@ -328,8 +330,8 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
try writeToMemory(field_val, pt, buffer[off..]);
},
.@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
const int_index = ip.indexToKey(val.toIntern()).bitpack.backing_int_val;
return Value.fromInterned(int_index).writeToMemory(pt, buffer);
},
}
},
@ -344,15 +346,14 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
const byte_count: usize = @intCast(field_type.abiSize(zcu));
return writeToMemory(field_val, pt, buffer[0..byte_count]);
} else {
const backing_ty = try ty.unionBackingType(pt);
const backing_ty = try ty.externUnionBackingType(pt);
const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
return writeToMemory(val.unionValue(zcu), pt, buffer[0..byte_count]);
}
},
.@"packed" => {
const backing_ty = try ty.unionBackingType(pt);
const byte_count: usize = @intCast(backing_ty.abiSize(zcu));
return writeToPackedMemory(val, ty, pt, buffer[0..byte_count], 0);
const int_val: Value = .fromInterned(ip.indexToKey(val.toIntern()).bitpack.backing_int_val);
return writeToMemory(int_val, pt, buffer);
},
},
.optional => {
@ -374,7 +375,6 @@ pub fn writeToMemory(val: Value, pt: Zcu.PerThread, buffer: []u8) error{
/// big-endian packed memory layouts start at the end of the buffer.
pub fn writeToPackedMemory(
val: Value,
ty: Type,
pt: Zcu.PerThread,
buffer: []u8,
bit_offset: usize,
@ -383,6 +383,7 @@ pub fn writeToPackedMemory(
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
const endian = target.cpu.arch.endian();
const ty = val.typeOf(zcu);
if (val.isUndef(zcu)) {
const bit_size: usize = @intCast(ty.bitSize(zcu));
if (bit_size != 0) {
@ -405,7 +406,13 @@ pub fn writeToPackedMemory(
},
.@"enum" => {
const int_val = val.intFromEnum(zcu);
return int_val.writeToPackedMemory(int_val.typeOf(zcu), pt, buffer, bit_offset);
return int_val.writeToPackedMemory(pt, buffer, bit_offset);
},
.pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
const addr = val.toUnsignedInt(zcu);
std.mem.writeVarPackedInt(buffer, bit_offset, zcu.getTarget().ptrBitWidth(), addr, endian);
},
.int => {
const bits = ty.intInfo(zcu).bits;
@ -434,54 +441,21 @@ pub fn writeToPackedMemory(
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i;
const elem_val = try val.elemValue(pt, tgt_elem_i);
try elem_val.writeToPackedMemory(elem_ty, pt, buffer, bit_offset + bits);
try elem_val.writeToPackedMemory(pt, buffer, bit_offset + bits);
bits += elem_bit_size;
}
},
.@"struct" => {
const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
assert(struct_type.layout == .@"packed");
var bits: u16 = 0;
for (0..struct_type.field_types.len) |i| {
const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[i],
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
try field_val.writeToPackedMemory(field_ty, pt, buffer, bit_offset + bits);
bits += field_bits;
}
},
.@"union" => {
const union_obj = zcu.typeToUnion(ty).?;
assert(union_obj.layout == .@"packed");
if (val.unionTag(zcu)) |union_tag| {
const field_index = zcu.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_val = try val.fieldValue(pt, field_index);
return field_val.writeToPackedMemory(field_type, pt, buffer, bit_offset);
} else {
const backing_ty = try ty.unionBackingType(pt);
return val.unionValue(zcu).writeToPackedMemory(backing_ty, pt, buffer, bit_offset);
}
},
.pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
if (ip.getBackingAddrTag(val.toIntern()).? != .int) return error.ReinterpretDeclRef;
return val.writeToPackedMemory(Type.usize, pt, buffer, bit_offset);
.@"struct", .@"union" => {
assert(ty.containerLayout(zcu) == .@"packed");
const int_val: Value = .fromInterned(ip.indexToKey(val.toIntern()).bitpack.backing_int_val);
return int_val.writeToPackedMemory(pt, buffer, bit_offset);
},
.optional => {
assert(ty.isPtrLikeOptional(zcu));
const child = ty.optionalChild(zcu);
const opt_val = val.optionalValue(zcu);
if (opt_val) |some| {
return some.writeToPackedMemory(child, pt, buffer, bit_offset);
if (val.optionalValue(zcu)) |ptr_val| {
return ptr_val.writeToPackedMemory(pt, buffer, bit_offset);
} else {
return writeToPackedMemory(try pt.intValue(Type.usize, 0), Type.usize, pt, buffer, bit_offset);
return Value.zero_usize.writeToPackedMemory(pt, buffer, bit_offset);
}
},
else => @panic("TODO implement writeToPackedMemory for more types"),
@ -531,13 +505,12 @@ pub fn readFromPackedMemory(
pt: Zcu.PerThread,
buffer: []const u8,
bit_offset: usize,
arena: Allocator,
gpa: Allocator,
) error{
IllDefinedMemoryLayout,
OutOfMemory,
}!Value {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(zcu)) {
@ -571,7 +544,8 @@ pub fn readFromPackedMemory(
const abi_size: usize = @intCast(ty.abiSize(zcu));
const Limb = std.math.big.Limb;
const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
const limbs_buffer = try gpa.alloc(Limb, limb_count);
defer gpa.free(limbs_buffer);
var bigint = BigIntMutable.init(limbs_buffer, 0);
bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness);
@ -579,7 +553,7 @@ pub fn readFromPackedMemory(
},
.@"enum" => {
const int_ty = ty.intTagType(zcu);
const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, arena);
const int_val = try Value.readFromPackedMemory(int_ty, pt, buffer, bit_offset, gpa);
return pt.getCoerced(int_val, ty);
},
.float => return Value.fromInterned(try pt.intern(.{ .float = .{
@ -595,52 +569,32 @@ pub fn readFromPackedMemory(
} })),
.vector => {
const elem_ty = ty.childType(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
const elems = try gpa.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
defer gpa.free(elems);
var bits: u16 = 0;
const elem_bit_size: u16 = @intCast(elem_ty.bitSize(zcu));
for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
elems[tgt_elem_i] = (try readFromPackedMemory(elem_ty, pt, buffer, bit_offset + bits, gpa)).toIntern();
bits += elem_bit_size;
}
return pt.aggregateValue(ty, elems);
},
.@"struct" => {
// Sema is supposed to have emitted a compile error already for Auto layout structs,
// and Extern is handled by non-packed readFromMemory.
const struct_type = zcu.typeToPackedStruct(ty).?;
var bits: u16 = 0;
const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len);
for (field_vals, 0..) |*field_val, i| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_bits: u16 = @intCast(field_ty.bitSize(zcu));
field_val.* = (try readFromPackedMemory(field_ty, pt, buffer, bit_offset + bits, arena)).toIntern();
bits += field_bits;
}
return pt.aggregateValue(ty, field_vals);
},
.@"union" => switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
.@"packed" => {
const backing_ty = try ty.unionBackingType(pt);
const val = (try readFromPackedMemory(backing_ty, pt, buffer, bit_offset, arena)).toIntern();
return Value.fromInterned(try pt.internUnion(.{
.ty = ty.toIntern(),
.tag = .none,
.val = val,
}));
},
.@"struct", .@"union" => {
assert(ty.containerLayout(zcu) == .@"packed");
const int_val: Value = try .readFromPackedMemory(ty.bitpackBackingInt(zcu), pt, buffer, bit_offset, gpa);
return pt.bitpackValue(ty, int_val);
},
.pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
const addr = (try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena)).toUnsignedInt(zcu);
const addr = (try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, gpa)).toUnsignedInt(zcu);
return pt.ptrIntValue(ty, addr);
},
.optional => {
assert(ty.isPtrLikeOptional(zcu));
const addr = (try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, arena)).toUnsignedInt(zcu);
const addr = (try readFromPackedMemory(Type.usize, pt, buffer, bit_offset, gpa)).toUnsignedInt(zcu);
return .fromInterned(try pt.intern(.{ .opt = .{
.ty = ty.toIntern(),
.val = if (addr == 0) .none else (try pt.ptrIntValue(ty.childType(zcu), addr)).toIntern(),
@ -915,8 +869,44 @@ pub fn fieldValue(val: Value, pt: Zcu.PerThread, index: usize) !Value {
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
}),
// TODO assert the tag is correct
.un => |un| Value.fromInterned(un.val),
.un => |un| {
switch (Type.fromInterned(un.ty).containerLayout(zcu)) {
.auto, .@"extern" => {}, // TODO assert the tag is correct
.@"packed" => unreachable,
}
return .fromInterned(un.val);
},
.bitpack => |bitpack| {
const ty: Type = .fromInterned(bitpack.ty);
assert(ty.containerLayout(zcu) == .@"packed");
const int_val: Value = .fromInterned(bitpack.backing_int_val);
assert(!int_val.isUndef(zcu));
const field_ty = ty.fieldType(index, zcu);
const field_bit_offset: u16 = switch (ty.zigTypeTag(zcu)) {
.@"union" => 0,
.@"struct" => off: {
var off: u16 = 0;
for (0..index) |preceding_field_index| {
off += @intCast(ty.fieldType(preceding_field_index, zcu).bitSize(zcu));
}
break :off off;
},
else => unreachable,
};
// Avoid hitting gpa for accesses to small packed structs
var sfba_state = std.heap.stackFallback(128, zcu.comp.gpa);
const sfba = sfba_state.get();
const buf = try sfba.alloc(u8, (ty.bitSize(zcu) + 7) / 8);
defer sfba.free(buf);
int_val.writeToPackedMemory(pt, buf, 0) catch |err| switch (err) {
error.ReinterpretDeclRef => unreachable, // it's an integer
error.OutOfMemory => |e| return e,
};
return Value.readFromPackedMemory(field_ty, pt, buf, field_bit_offset, sfba) catch |err| switch (err) {
error.IllDefinedMemoryLayout => unreachable, // it's a bitpack
error.OutOfMemory => |e| return e,
};
},
else => unreachable,
};
}

View file

@ -3950,6 +3950,15 @@ pub fn floatValue(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Value
} }));
}
/// Create a value whose type is a `packed struct` or `packed union`, from the backing integer value.
pub fn bitpackValue(pt: Zcu.PerThread, ty: Type, backing_int_val: Value) Allocator.Error!Value {
assert(backing_int_val.typeOf(pt.zcu).toIntern() == ty.bitpackBackingInt(pt.zcu).toIntern());
return .fromInterned(try pt.intern(.{ .bitpack = .{
.ty = ty.toIntern(),
.backing_int_val = backing_int_val.toIntern(),
} }));
}
pub fn nullValue(pt: Zcu.PerThread, opt_ty: Type) Allocator.Error!Value {
assert(pt.zcu.intern_pool.isOptionalType(opt_ty.toIntern()));
return Value.fromInterned(try pt.intern(.{ .opt = .{

View file

@ -570,42 +570,7 @@ pub fn generateSymbol(
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
.@"packed" => {
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
const start = w.end;
const buffer = try w.writableSlice(abi_size);
@memset(buffer, 0);
var bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
};
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(zcu) == .pointer) {
const field_offset = std.math.divExact(u16, bits, 8) catch |err| switch (err) {
error.DivisionByZero => unreachable,
error.UnexpectedRemainder => return error.RelocationNotByteAligned,
};
w.end = start + field_offset;
defer {
assert(w.end == start + field_offset + @divExact(target.ptrBitWidth(), 8));
w.end = start + abi_size;
}
try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), w, reloc_parent);
} else {
Value.fromInterned(field_val).writeToPackedMemory(.fromInterned(field_ty), pt, buffer, bits) catch unreachable;
}
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
}
},
.@"packed" => unreachable,
.auto, .@"extern" => {
const struct_begin = w.end;
const field_types = struct_type.field_types.get(ip);
@ -683,6 +648,7 @@ pub fn generateSymbol(
}
}
},
.bitpack => |bitpack| try generateSymbol(bin_file, pt, src_loc, .fromInterned(bitpack.backing_int_val), w, reloc_parent),
.memoized_call => unreachable,
}
}
@ -1120,6 +1086,10 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
target,
);
},
.@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed") {
const bitpack = ip.indexToKey(val.toIntern()).bitpack;
return lowerValue(pt, .fromInterned(bitpack.backing_int_val), target);
},
.error_set => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = ip.getErrorValueIfExists(err_name).?;

View file

@ -2791,17 +2791,17 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
} else return isel.fail("invalid constraint: '{s}'", .{constraint});
}
const clobbers = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const clobbers_ty: ZigType = .fromInterned(clobbers.ty);
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
switch (switch (clobbers.storage) {
.bytes => unreachable,
.elems => |elems| elems[field_index],
.repeated_elem => |repeated_elem| repeated_elem,
}) {
else => unreachable,
.bool_false => continue,
.bool_true => {},
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
if (std.mem.eql(u8, clobber_name, "memory")) continue;
@ -2816,14 +2816,11 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
}
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
switch (switch (clobbers.storage) {
.bytes => unreachable,
.elems => |elems| elems[field_index],
.repeated_elem => |repeated_elem| repeated_elem,
}) {
else => unreachable,
.bool_false => continue,
.bool_true => {},
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> field_index % limb_bits))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
if (std.mem.eql(u8, clobber_name, "memory")) continue;
@ -2872,14 +2869,11 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
switch (switch (clobbers.storage) {
.bytes => unreachable,
.elems => |elems| elems[field_index],
.repeated_elem => |repeated_elem| repeated_elem,
}) {
else => unreachable,
.bool_false => continue,
.bool_true => {},
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> field_index % limb_bits))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
if (std.mem.eql(u8, clobber_name, "memory")) continue;

View file

@ -1362,77 +1362,42 @@ pub const DeclGen = struct {
},
.struct_type => {
const loaded_struct = ip.loadStructType(ty.toIntern());
switch (loaded_struct.layout) {
.auto, .@"extern" => {
if (!location.isInitializer()) {
try w.writeByte('(');
try dg.renderCType(w, ctype);
try w.writeByte(')');
}
assert(loaded_struct.layout != .@"packed");
try w.writeByte('{');
var field_it = loaded_struct.iterateRuntimeOrder(ip);
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try w.writeByte(',');
need_comma = true;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(w, Value.fromInterned(field_val), initializer_type);
}
try w.writeByte('}');
},
.@"packed" => {
// https://github.com/ziglang/zig/issues/24657 will eliminate most of the
// following logic, leaving only the recursive `renderValue` call. Once
// that proposal is implemented, a `packed struct` will literally be
// represented in the InternPool by its comptime-known backing integer.
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
defer arena.deinit();
const backing_ty: Type = .fromInterned(loaded_struct.backingIntTypeUnordered(ip));
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
val.writeToMemory(pt, buf) catch |err| switch (err) {
error.IllDefinedMemoryLayout => unreachable,
error.OutOfMemory => |e| return e,
error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed struct value", .{}),
};
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
return dg.renderValue(w, backing_val, location);
},
if (!location.isInitializer()) {
try w.writeByte('(');
try dg.renderCType(w, ctype);
try w.writeByte(')');
}
try w.writeByte('{');
var field_it = loaded_struct.iterateRuntimeOrder(ip);
var need_comma = false;
while (field_it.next()) |field_index| {
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (need_comma) try w.writeByte(',');
need_comma = true;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(w, Value.fromInterned(field_val), initializer_type);
}
try w.writeByte('}');
},
else => unreachable,
},
.bitpack => |bitpack| return dg.renderValue(w, .fromInterned(bitpack.backing_int_val), location),
.un => |un| {
const loaded_union = ip.loadUnionType(ty.toIntern());
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
// https://github.com/ziglang/zig/issues/24657 will eliminate most of the
// following logic, leaving only the recursive `renderValue` call. Once
// that proposal is implemented, a `packed union` will literally be
// represented in the InternPool by its comptime-known backing integer.
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
defer arena.deinit();
const backing_ty = try ty.unionBackingType(pt);
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
val.writeToMemory(pt, buf) catch |err| switch (err) {
error.IllDefinedMemoryLayout => unreachable,
error.OutOfMemory => |e| return e,
error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed union value", .{}),
};
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
return dg.renderValue(w, backing_val, location);
}
if (un.tag == .none) {
const backing_ty = try ty.unionBackingType(pt);
const backing_ty = try ty.externUnionBackingType(pt);
assert(loaded_union.flagsUnordered(ip).layout == .@"extern");
if (location == .StaticInitializer) {
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
@ -1642,11 +1607,7 @@ pub const DeclGen = struct {
}
return w.writeByte('}');
},
.@"packed" => return dg.renderUndefValue(
w,
.fromInterned(loaded_struct.backingIntTypeUnordered(ip)),
location,
),
.@"packed" => return dg.renderUndefValue(w, ty.bitpackBackingInt(zcu), location),
}
},
.tuple_type => |tuple_info| {
@ -1714,11 +1675,7 @@ pub const DeclGen = struct {
}
if (has_tag) try w.writeByte('}');
},
.@"packed" => return dg.renderUndefValue(
w,
try ty.unionBackingType(pt),
location,
),
.@"packed" => return dg.renderUndefValue(w, ty.bitpackBackingInt(zcu), location),
}
},
.error_union_type => |error_union_type| switch (ctype.info(ctype_pool)) {
@ -5623,48 +5580,45 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
try w.writeByte(':');
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| switch (elem) {
.bool_true => {
const field_name = struct_type.structFieldName(i, zcu).toSlice(ip).?;
assert(field_name.len != 0);
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const field_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
assert(field_name.len != 0);
const target = &f.object.dg.mod.resolved_target.result;
var c_name_buf: [16]u8 = undefined;
const name =
if ((target.cpu.arch.isMIPS() or target.cpu.arch == .alpha) and field_name[0] == 'r') name: {
// Convert "rN" to "$N"
const c_name = (&c_name_buf)[0..field_name.len];
@memcpy(c_name, field_name);
c_name_buf[0] = '$';
break :name c_name;
} else if ((target.cpu.arch.isMIPS() and (mem.startsWith(u8, field_name, "fcc") or field_name[0] == 'w')) or
((target.cpu.arch.isMIPS() or target.cpu.arch == .alpha) and field_name[0] == 'f') or
(target.cpu.arch == .kvx and !mem.eql(u8, field_name, "memory"))) name: {
// "$" prefix for these registers
c_name_buf[0] = '$';
@memcpy((&c_name_buf)[1..][0..field_name.len], field_name);
break :name (&c_name_buf)[0 .. 1 + field_name.len];
} else if (target.cpu.arch.isSPARC() and
(mem.eql(u8, field_name, "ccr") or mem.eql(u8, field_name, "icc") or mem.eql(u8, field_name, "xcc"))) name: {
// C compilers just use `icc` to encompass all of these.
break :name "icc";
} else field_name;
const target = &f.object.dg.mod.resolved_target.result;
var c_name_buf: [16]u8 = undefined;
const name =
if ((target.cpu.arch.isMIPS() or target.cpu.arch == .alpha) and field_name[0] == 'r') name: {
// Convert "rN" to "$N"
const c_name = (&c_name_buf)[0..field_name.len];
@memcpy(c_name, field_name);
c_name_buf[0] = '$';
break :name c_name;
} else if ((target.cpu.arch.isMIPS() and (mem.startsWith(u8, field_name, "fcc") or field_name[0] == 'w')) or
((target.cpu.arch.isMIPS() or target.cpu.arch == .alpha) and field_name[0] == 'f') or
(target.cpu.arch == .kvx and !mem.eql(u8, field_name, "memory"))) name: {
// "$" prefix for these registers
c_name_buf[0] = '$';
@memcpy((&c_name_buf)[1..][0..field_name.len], field_name);
break :name (&c_name_buf)[0 .. 1 + field_name.len];
} else if (target.cpu.arch.isSPARC() and
(mem.eql(u8, field_name, "ccr") or mem.eql(u8, field_name, "icc") or mem.eql(u8, field_name, "xcc"))) name: {
// C compilers just use `icc` to encompass all of these.
break :name "icc";
} else field_name;
try w.print(" {f}", .{fmtStringLiteral(name, null)});
(try w.writableArray(1))[0] = ',';
},
.bool_false => continue,
else => unreachable,
},
.repeated_elem => |elem| switch (elem) {
.bool_true => @panic("TODO"),
.bool_false => {},
else => unreachable,
},
.bytes => @panic("TODO"),
try w.print(" {f}", .{fmtStringLiteral(name, null)});
(try w.writableArray(1))[0] = ',';
}
w.undo(1); // erase the last comma
try w.writeAll(");");

View file

@ -3680,7 +3680,7 @@ pub const Object = struct {
const limbs = try allocator.alloc(std.math.big.Limb, std.math.big.int.calcTwosCompLimbCount(bits));
defer allocator.free(limbs);
val.writeToPackedMemory(ty, pt, buffer, 0) catch unreachable;
val.writeToPackedMemory(pt, buffer, 0) catch unreachable;
var big: std.math.big.int.Mutable = .init(limbs, 0);
big.readTwosComplement(buffer, bits, target.cpu.arch.endian(), .unsigned);
@ -7467,29 +7467,20 @@ pub const FuncGen = struct {
}
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
if (total_i != 0) try llvm_constraints.append(gpa, ',');
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| {
switch (elem) {
.bool_true => {
const name = struct_type.structFieldName(i, zcu).toSlice(ip).?;
total_i += try appendConstraints(gpa, &llvm_constraints, name, target);
},
.bool_false => continue,
else => unreachable,
}
},
.repeated_elem => |elem| switch (elem) {
.bool_true => for (0..struct_type.structFieldCount(zcu)) |i| {
const name = struct_type.structFieldName(i, zcu).toSlice(ip).?;
total_i += try appendConstraints(gpa, &llvm_constraints, name, target);
},
.bool_false => {},
else => unreachable,
},
.bytes => @panic("TODO"),
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
total_i += try appendConstraints(gpa, &llvm_constraints, name, target);
}
// We have finished scanning through all inputs/outputs, so the number of

View file

@ -6149,31 +6149,26 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const zcu = func.pt.zcu;
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| {
switch (elem) {
.bool_true => {
const clobber = struct_type.structFieldName(i, zcu).toSlice(ip).?;
assert(clobber.len != 0);
if (std.mem.eql(u8, clobber, "memory")) {
// nothing really to do
} else {
try func.register_manager.getReg(parseRegName(clobber) orelse
return func.fail("invalid clobber: '{s}'", .{clobber}), null);
}
},
.bool_false => continue,
else => unreachable,
}
},
.repeated_elem => |elem| switch (elem) {
.bool_true => @panic("TODO"),
.bool_false => {},
else => unreachable,
},
.bytes => @panic("TODO"),
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
assert(clobber.len != 0);
if (std.mem.eql(u8, clobber, "memory")) {
// nothing really to do
} else {
try func.register_manager.getReg(parseRegName(clobber) orelse
return func.fail("invalid clobber: '{s}'", .{clobber}), null);
}
}
const Label = struct {

View file

@ -969,7 +969,7 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
const bytes = std.mem.alignForward(u16, cg.module.backingIntBits(bits).@"0", 8) / 8;
var limbs: [8]u8 = undefined;
@memset(&limbs, 0);
val.writeToPackedMemory(ty, pt, limbs[0..bytes], 0) catch unreachable;
val.writeToPackedMemory(pt, limbs[0..bytes], 0) catch unreachable;
const backing_ty: Type = .fromInterned(struct_type.backingIntTypeUnordered(ip));
return try cg.constInt(backing_ty, @as(u64, @bitCast(limbs)));
}

View file

@ -3253,7 +3253,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
// are by-ref types.
assert(struct_type.layout == .@"packed");
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable;
val.writeToPackedMemory(pt, &buf, 0) catch unreachable;
const backing_int_ty = Type.fromInterned(struct_type.backingIntTypeUnordered(ip));
const int_val = try pt.intValue(
backing_int_ty,
@ -3267,7 +3267,7 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
const int_type = try pt.intType(.unsigned, @intCast(ty.bitSize(zcu)));
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
val.writeToPackedMemory(ty, pt, &buf, 0) catch unreachable;
val.writeToPackedMemory(pt, &buf, 0) catch unreachable;
const int_val = try pt.intValue(
int_type,
mem.readInt(u64, &buf, .little),

View file

@ -177294,41 +177294,38 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
}
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| switch (elem) {
.bool_true => {
const clobber = struct_type.structFieldName(i, zcu).toSlice(ip).?;
assert(clobber.len != 0);
const clobbers_val: Value = .fromInterned(unwrapped_asm.clobbers);
const clobbers_ty = clobbers_val.typeOf(zcu);
var clobbers_bigint_buf: Value.BigIntSpace = undefined;
const clobbers_bigint = clobbers_val.toBigInt(&clobbers_bigint_buf, zcu);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
assert(clobbers_ty.fieldType(field_index, zcu).toIntern() == .bool_type);
const limb_bits = @bitSizeOf(std.math.big.Limb);
if (field_index / limb_bits >= clobbers_bigint.limbs.len) continue; // field is false
switch (@as(u1, @truncate(clobbers_bigint.limbs[field_index / limb_bits] >> @intCast(field_index % limb_bits)))) {
0 => continue, // field is false
1 => {}, // field is true
}
const clobber = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
assert(clobber.len != 0);
if (std.mem.eql(u8, clobber, "memory") or
std.mem.eql(u8, clobber, "fpsr") or
std.mem.eql(u8, clobber, "fpcr") or
std.mem.eql(u8, clobber, "mxcsr") or
std.mem.eql(u8, clobber, "dirflag"))
{
// ok, sure
} else if (std.mem.eql(u8, clobber, "cc") or
std.mem.eql(u8, clobber, "flags") or
std.mem.eql(u8, clobber, "eflags") or
std.mem.eql(u8, clobber, "rflags"))
{
try self.spillEflagsIfOccupied();
} else {
try self.register_manager.getReg(parseRegName(clobber) orelse
return self.fail("invalid clobber: '{s}'", .{clobber}), null);
}
},
.bool_false => continue,
else => unreachable,
},
.repeated_elem => |elem| switch (elem) {
.bool_true => @panic("TODO"),
.bool_false => {},
else => unreachable,
},
.bytes => @panic("TODO"),
if (std.mem.eql(u8, clobber, "memory") or
std.mem.eql(u8, clobber, "fpsr") or
std.mem.eql(u8, clobber, "fpcr") or
std.mem.eql(u8, clobber, "mxcsr") or
std.mem.eql(u8, clobber, "dirflag"))
{
// ok, sure
} else if (std.mem.eql(u8, clobber, "cc") or
std.mem.eql(u8, clobber, "flags") or
std.mem.eql(u8, clobber, "eflags") or
std.mem.eql(u8, clobber, "rflags"))
{
try self.spillEflagsIfOccupied();
} else {
try self.register_manager.getReg(parseRegName(clobber) orelse
return self.fail("invalid clobber: '{s}'", .{clobber}), null);
}
}
const Label = struct {

View file

@ -3378,6 +3378,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
.opt,
.aggregate,
.un,
.bitpack,
=> .decl_const,
.variable => .decl_var,
.@"extern" => unreachable,
@ -4014,6 +4015,7 @@ fn updateLazyType(
.opt,
.aggregate,
.un,
.bitpack,
// memoization, not types
.memoized_call,
=> unreachable,
@ -4092,6 +4094,15 @@ fn updateLazyValue(
}, .fromInterned(int.ty), Value.fromInterned(value_index).toBigInt(&big_int_space, zcu));
try wip_nav.refType(.fromInterned(int.ty));
},
.bitpack => |bitpack| {
const backing_int_val: Value = .fromInterned(bitpack.backing_int_val);
try wip_nav.bigIntConstValue(.{
.sdata = .sdata_comptime_value,
.udata = .udata_comptime_value,
.block = .block_comptime_value,
}, backing_int_val.typeOf(zcu), backing_int_val.toBigInt(&big_int_space, zcu));
try wip_nav.refType(.fromInterned(bitpack.ty));
},
.err => |err| {
try wip_nav.abbrevCode(.udata_comptime_value);
try wip_nav.refType(.fromInterned(err.ty));

View file

@ -97,8 +97,8 @@ pub const MutableValue = union(enum) {
/// * Non-error error unions use `eu_payload`
/// * Non-null optionals use `eu_payload
/// * Slices use `slice`
/// * Unions use `un`
/// * Aggregates use `repeated` or `bytes` or `aggregate`
/// * Unions use `un` (excluding packed unions)
/// * Aggregates use `repeated` or `bytes` or `aggregate` (excluding packed structs)
/// If `!allow_bytes`, the `bytes` representation will not be used.
/// If `!allow_repeated`, the `repeated` representation will not be used.
pub fn unintern(
@ -209,6 +209,7 @@ pub const MutableValue = union(enum) {
.undef => |ty_ip| switch (Type.fromInterned(ty_ip).zigTypeTag(zcu)) {
.@"struct", .array, .vector => |type_tag| {
const ty = Type.fromInterned(ty_ip);
if (type_tag == .@"struct" and ty.containerLayout(zcu) == .@"packed") return;
const opt_sent = ty.sentinel(zcu);
if (type_tag == .@"struct" or opt_sent != null or !allow_repeated) {
const len_no_sent = ip.aggregateTypeLen(ty_ip);
@ -241,15 +242,18 @@ pub const MutableValue = union(enum) {
} };
}
},
.@"union" => {
const payload = try arena.create(MutableValue);
const backing_ty = try Type.fromInterned(ty_ip).unionBackingType(pt);
payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
.payload = payload,
} };
.@"union" => switch (Type.fromInterned(ty_ip).containerLayout(zcu)) {
.auto, .@"packed" => {},
.@"extern" => {
const payload = try arena.create(MutableValue);
const backing_ty = try Type.fromInterned(ty_ip).externUnionBackingType(pt);
payload.* = .{ .interned = try pt.intern(.{ .undef = backing_ty.toIntern() }) };
mv.* = .{ .un = .{
.ty = ty_ip,
.tag = .none,
.payload = payload,
} };
},
},
.pointer => {
const ptr_ty = ip.indexToKey(ty_ip).ptr_type;

View file

@ -164,7 +164,7 @@ pub fn print(
return;
}
if (un.tag == .none) {
const backing_ty = try val.typeOf(zcu).unionBackingType(pt);
const backing_ty = try val.typeOf(zcu).externUnionBackingType(pt);
try writer.print("@bitCast(@as({f}, ", .{backing_ty.fmt(pt)});
try print(Value.fromInterned(un.val), writer, level - 1, pt, opt_sema);
try writer.writeAll("))");
@ -176,6 +176,32 @@ pub fn print(
try writer.writeAll(" }");
}
},
.bitpack => |bitpack| {
const ty: Type = .fromInterned(bitpack.ty);
switch (ty.zigTypeTag(zcu)) {
.@"struct" => {
if (ty.structFieldCount(zcu) == 0) {
return writer.writeAll(".{}");
}
try writer.writeAll(".{ ");
const max_len = @min(ty.structFieldCount(zcu), max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const field_name = ty.structFieldName(@intCast(i), zcu).unwrap().?;
try writer.print(".{f} = ", .{field_name.fmt(ip)});
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
try writer.writeAll(" }");
return;
},
.@"union" => {
try writer.print("@bitCast(@as({f}, ", .{ty.bitpackBackingInt(zcu).fmt(pt)});
try print(.fromInterned(bitpack.backing_int_val), writer, level - 1, pt, opt_sema);
try writer.writeAll("))");
},
else => unreachable,
}
},
.memoized_call => unreachable,
}
}