type resolution progress

This commit is contained in:
Matthew Lugg 2026-01-23 09:59:32 +00:00
parent f0a9ed38af
commit 3e21e6777e
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
27 changed files with 3047 additions and 2971 deletions

View file

@ -837,6 +837,10 @@ pub const SimpleComptimeReason = enum(u32) {
tuple_field_types,
enum_field_names,
enum_field_values,
union_enum_tag_type,
enum_int_tag_type,
packed_struct_backing_int_type,
packed_union_backing_int_type,
// Evaluating at comptime because decl/field name must be comptime-known.
decl_name,
@ -925,6 +929,11 @@ pub const SimpleComptimeReason = enum(u32) {
.enum_field_names => "enum field names must be comptime-known",
.enum_field_values => "enum field values must be comptime-known",
.union_enum_tag_type => "enum tag type of union must be comptime-known",
.enum_int_tag_type => "integer tag type of enum must be comptime-known",
.packed_struct_backing_int_type => "packed struct backing integer type must be comptime-known",
.packed_union_backing_int_type => "packed struct backing integer type must be comptime-known",
.decl_name => "declaration name must be comptime-known",
.field_name => "field name must be comptime-known",
.tuple_field_index => "tuple field index must be comptime-known",

View file

@ -4922,24 +4922,14 @@ fn structDeclInner(
astgen.advanceSourceCursorToNode(node);
const backing_int_type_ref: Zir.Inst.Ref = ty: {
const backing_int_node = maybe_backing_int_node.unwrap() orelse break :ty .none;
if (layout != .@"packed") return astgen.failNode(
backing_int_node,
"non-packed struct does not support backing integer type",
.{},
);
break :ty try typeExpr(gz, scope, backing_int_node);
};
const decl_inst = try gz.reserveInstructionIndex();
if (container_decl.ast.members.len == 0 and backing_int_type_ref == .none) {
if (container_decl.ast.members.len == 0 and maybe_backing_int_node == .none) {
try gz.setStruct(decl_inst, .{
.src_node = node,
.name_strat = name_strat,
.layout = layout,
.backing_int_type = .none,
.backing_int_type_body_len = null,
.decls_len = 0,
.fields_len = 0,
.any_field_aligns = false,
@ -4993,6 +4983,22 @@ fn structDeclInner(
);
if (field_comptime_bits) |bits| @memset(bits.get(astgen), 0);
// Before any field bodies comes the backing int type, if specified.
const backing_int_type_body_len: ?u32 = if (maybe_backing_int_node.unwrap()) |backing_int_node| len: {
if (layout != .@"packed") return astgen.failNode(
backing_int_node,
"non-packed struct does not support backing integer type",
.{},
);
const type_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node);
if (!block_scope.endsWithNoReturn()) {
_ = try block_scope.addBreak(.break_inline, decl_inst, type_ref);
}
const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice());
block_scope.instructions.items.len = block_scope.instructions_top;
break :len body_len;
} else null;
const old_hasher = astgen.src_hasher;
defer astgen.src_hasher = old_hasher;
astgen.src_hasher = .init(.{});
@ -5076,7 +5082,7 @@ fn structDeclInner(
.src_node = node,
.name_strat = name_strat,
.layout = layout,
.backing_int_type = backing_int_type_ref,
.backing_int_type_body_len = backing_int_type_body_len,
.decls_len = scan_result.decls_len,
.fields_len = scan_result.fields_len,
.any_field_aligns = scan_result.any_field_aligns,
@ -5220,11 +5226,6 @@ fn unionDeclInner(
astgen.advanceSourceCursorToNode(node);
const arg_type_ref: Zir.Inst.Ref = ref: {
const arg_node = opt_arg_node.unwrap() orelse break :ref .none;
break :ref try typeExpr(gz, scope, arg_node);
};
const decl_inst = try gz.reserveInstructionIndex();
var namespace: Scope.Namespace = .{
@ -5262,6 +5263,17 @@ fn unionDeclInner(
const field_align_body_lens = try scratch.addOptionalSlice(scan_result.any_field_aligns, scan_result.fields_len);
const field_value_body_lens = try scratch.addOptionalSlice(scan_result.any_field_values, scan_result.fields_len);
// Before any field bodies comes the tag/backing type, if specified.
const arg_type_body_len: ?u32 = if (opt_arg_node.unwrap()) |arg_node| len: {
const type_ref = try typeExpr(&block_scope, &namespace.base, arg_node);
if (!block_scope.endsWithNoReturn()) {
_ = try block_scope.addBreak(.break_inline, decl_inst, type_ref);
}
const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice());
block_scope.instructions.items.len = block_scope.instructions_top;
break :len body_len;
} else null;
const old_hasher = astgen.src_hasher;
defer astgen.src_hasher = old_hasher;
astgen.src_hasher = .init(.{});
@ -5358,7 +5370,7 @@ fn unionDeclInner(
.@"extern" => .@"extern",
.@"packed" => if (opt_arg_node != .none) .packed_explicit else .@"packed",
},
.arg_type = arg_type_ref,
.arg_type_body_len = arg_type_body_len,
.decls_len = scan_result.decls_len,
.fields_len = scan_result.fields_len,
.any_field_aligns = scan_result.any_field_aligns,
@ -5420,11 +5432,6 @@ fn containerDecl(
astgen.advanceSourceCursorToNode(node);
const tag_type_ref: Zir.Inst.Ref = ref: {
const arg_node = container_decl.ast.arg.unwrap() orelse break :ref .none;
break :ref try typeExpr(gz, scope, arg_node);
};
const decl_inst = try gz.reserveInstructionIndex();
var namespace: Scope.Namespace = .{
@ -5461,6 +5468,17 @@ fn containerDecl(
const field_names = try scratch.addSlice(fields_len);
const field_value_body_lens = try scratch.addOptionalSlice(scan_result.any_field_values, fields_len);
// Before any field bodies comes the tag type, if specified.
const tag_type_body_len: ?u32 = if (container_decl.ast.arg.unwrap()) |tag_type_node| len: {
const type_ref = try typeExpr(&block_scope, &namespace.base, tag_type_node);
if (!block_scope.endsWithNoReturn()) {
_ = try block_scope.addBreak(.break_inline, decl_inst, type_ref);
}
const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice());
block_scope.instructions.items.len = block_scope.instructions_top;
break :len body_len;
} else null;
const old_hasher = astgen.src_hasher;
defer astgen.src_hasher = old_hasher;
astgen.src_hasher = .init(.{});
@ -5508,7 +5526,7 @@ fn containerDecl(
field_names.get(astgen)[field_idx] = @intFromEnum(try astgen.identAsString(member.ast.main_token));
if (member.ast.value_expr.unwrap()) |value_node| {
if (tag_type_ref == .none) {
if (tag_type_body_len == null) {
return astgen.failNodeNotes(node, "explicitly valued enum missing integer tag type", .{}, &.{
try astgen.errNoteNode(value_node, "tag value specified here", .{}),
});
@ -5535,7 +5553,7 @@ fn containerDecl(
try gz.setEnum(decl_inst, .{
.src_node = node,
.name_strat = name_strat,
.tag_type = tag_type_ref,
.tag_type_body_len = tag_type_body_len,
.nonexhaustive = scan_result.has_underscore_field,
.decls_len = scan_result.decls_len,
.fields_len = fields_len,
@ -12406,7 +12424,7 @@ const GenZir = struct {
src_node: Ast.Node.Index,
name_strat: Zir.Inst.NameStrategy,
layout: std.builtin.Type.ContainerLayout,
backing_int_type: Zir.Inst.Ref,
backing_int_type_body_len: ?u32,
decls_len: u32,
fields_len: u32,
any_field_aligns: bool,
@ -12430,7 +12448,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len +
4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type`
4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type_body_len`
captures_len * 2 + // `capture`, `capture_name`
args.remaining.len);
@ -12446,7 +12464,7 @@ const GenZir = struct {
if (captures_len != 0) astgen.extra.appendAssumeCapacity(captures_len);
if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len);
if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len);
if (args.backing_int_type != .none) astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_type));
if (args.backing_int_type_body_len) |n| astgen.extra.appendAssumeCapacity(n);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names));
astgen.extra.appendSliceAssumeCapacity(args.remaining);
@ -12461,7 +12479,7 @@ const GenZir = struct {
.has_fields_len = args.fields_len != 0,
.name_strategy = args.name_strat,
.layout = args.layout,
.has_backing_int_type = args.backing_int_type != .none,
.has_backing_int_type = args.backing_int_type_body_len != null,
.any_field_aligns = args.any_field_aligns,
.any_field_defaults = args.any_field_defaults,
.any_comptime_fields = args.any_comptime_fields,
@ -12475,7 +12493,7 @@ const GenZir = struct {
src_node: Ast.Node.Index,
name_strat: Zir.Inst.NameStrategy,
kind: Zir.Inst.UnionDecl.Kind,
arg_type: Zir.Inst.Ref,
arg_type_body_len: ?u32,
decls_len: u32,
fields_len: u32,
any_field_aligns: bool,
@ -12497,7 +12515,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).@"struct".fields.len +
4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type`
4 + // `captures_len`, `decls_len`, `fields_len`, `arg_type_body_len`
captures_len * 2 + // `capture`, `capture_name`
args.remaining.len);
@ -12514,10 +12532,9 @@ const GenZir = struct {
if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len);
if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len);
if (args.kind.hasArgType()) {
assert(args.arg_type != .none);
astgen.extra.appendAssumeCapacity(@intFromEnum(args.arg_type));
astgen.extra.appendAssumeCapacity(args.arg_type_body_len.?);
} else {
assert(args.arg_type == .none);
assert(args.arg_type_body_len == null);
}
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names));
@ -12525,28 +12542,26 @@ const GenZir = struct {
astgen.instructions.set(@intFromEnum(inst), .{
.tag = .extended,
.data = .{
.extended = .{
.opcode = .union_decl,
.small = @bitCast(Zir.Inst.UnionDecl.Small{
.has_captures_len = captures_len != 0,
.has_decls_len = args.decls_len != 0,
.has_fields_len = args.fields_len != 0,
.name_strategy = args.name_strat,
.kind = args.kind,
.any_field_aligns = args.any_field_aligns,
.any_field_values = args.any_field_values,
}),
.operand = payload_index,
},
},
.data = .{ .extended = .{
.opcode = .union_decl,
.small = @bitCast(Zir.Inst.UnionDecl.Small{
.has_captures_len = captures_len != 0,
.has_decls_len = args.decls_len != 0,
.has_fields_len = args.fields_len != 0,
.name_strategy = args.name_strat,
.kind = args.kind,
.any_field_aligns = args.any_field_aligns,
.any_field_values = args.any_field_values,
}),
.operand = payload_index,
} },
});
}
fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
name_strat: Zir.Inst.NameStrategy,
tag_type: Zir.Inst.Ref,
tag_type_body_len: ?u32,
nonexhaustive: bool,
decls_len: u32,
fields_len: u32,
@ -12568,7 +12583,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).@"struct".fields.len +
4 + // `captures_len`, `decls_len`, `fields_len`, `tag_type`
4 + // `captures_len`, `decls_len`, `fields_len`, `tag_type_body_len`
captures_len * 2 + // `capture`, `capture_name`
args.remaining.len);
@ -12584,7 +12599,7 @@ const GenZir = struct {
if (captures_len != 0) astgen.extra.appendAssumeCapacity(captures_len);
if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len);
if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len);
if (args.tag_type != .none) astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
if (args.tag_type_body_len) |n| astgen.extra.appendAssumeCapacity(n);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures));
astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names));
astgen.extra.appendSliceAssumeCapacity(args.remaining);
@ -12598,7 +12613,7 @@ const GenZir = struct {
.has_decls_len = args.decls_len != 0,
.has_fields_len = args.fields_len != 0,
.name_strategy = args.name_strat,
.has_tag_type = args.tag_type != .none,
.has_tag_type = args.tag_type_body_len != null,
.nonexhaustive = args.nonexhaustive,
.any_field_values = args.any_field_values,
}),

View file

@ -3465,7 +3465,7 @@ pub const Inst = struct {
/// 0. captures_len: u32 // if `has_captures_len`
/// 1. decls_len: u32, // if `has_decls_len`
/// 2. fields_len: u32, // if `has_fields_len`
/// 3. backing_int_type: Ref // if `has_backing_int`
/// 3. backing_int_body_len: u32 // if `has_backing_int`
/// 4. capture: Capture // for every `captures_len`
/// 5. capture_name: NullTerminatedString // for every `captures_len`
/// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction
@ -3475,7 +3475,8 @@ pub const Inst = struct {
/// 10. field_default_body_len: u32 // for every `fields_len` if `any_field_defaults`
/// 11. field_comptime_bits: u32 // one bit per `fields_len` if `any_comptime_fields`
/// // LSB is first field, minimum number of `u32` needed
/// 12. body_inst: Inst.Index // type body, then align body, then default body, for each field
/// 12. backing_int_body_inst: Inst.Index // for each `backing_int_body_len`
/// 13. body_inst: Inst.Index // type body, then align body, then default body, for each field
pub const StructDecl = struct {
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
// This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc).
@ -3622,13 +3623,14 @@ pub const Inst = struct {
/// 0. captures_len: u32, // if has_captures_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. tag_type: Ref, // if has_tag_type
/// 3. tag_type_body_len: u32, // if has_tag_type
/// 4. capture: Capture // for every `captures_len`
/// 5. capture_name: NullTerminatedString // for every `captures_len`
/// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction
/// 7. field_name: NullTerminatedString // for every `fields_len`
/// 8. field_value_body_len: u32 // for every `fields_len` if `any_field_values`
/// 9. body_inst: Inst.Index // value body for each field
/// 9. tag_type_body_inst: Inst.Index // for each `tag_type_body_len`
/// 10. body_inst: Inst.Index // value body for each field
pub const EnumDecl = struct {
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
// This hash contains the source of all fields, and the backing type if specified.
@ -3656,7 +3658,7 @@ pub const Inst = struct {
/// 0. captures_len: u32 // if `has_captures_len`
/// 1. decls_len: u32, // if `has_decls_len`
/// 2. fields_len: u32, // if `has_fields_len`
/// 3. arg_type: Ref, // if `kind.hasArgType()`
/// 3. arg_type_body_len: u32, // if `kind.hasArgType()`
/// 4. capture: Capture // for every `captures_len`
/// 5. capture_name: NullTerminatedString // for every `captures_len`
/// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction
@ -3664,7 +3666,8 @@ pub const Inst = struct {
/// 8. field_type_body_len: u32 // for every `fields_len`
/// 9 . field_align_body_len: u32 // for every `fields_len` if `any_field_aligns`
/// 10. field_value_body_len: u32 // for every `fields_len` if `any_field_values`
/// 11. body_inst: Inst.Index // type body, then align body, then value body, for each field
/// 11. arg_type_body_inst: Inst.Index // for each `arg_type_body_len`
/// 12. body_inst: Inst.Index // type body, then align body, then value body, for each field
pub const UnionDecl = struct {
// These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`.
// This hash contains the source of all fields, and any specified attributes (`extern` etc).
@ -5235,18 +5238,6 @@ pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void {
}
}
/// MLUGG TODO: maybe delete these two?
pub fn typeCapturesLen(zir: Zir, type_decl: Inst.Index) u32 {
const inst = zir.instructions.get(@intFromEnum(type_decl));
assert(inst.tag == .extended);
return switch (inst.data.extended.opcode) {
.struct_decl => @intCast(zir.getStructDecl(type_decl).captures.len),
.union_decl => @intCast(zir.getUnionDecl(type_decl).captures.len),
.enum_decl => @intCast(zir.getEnumDecl(type_decl).captures.len),
.opaque_decl => @intCast(zir.getOpaqueDecl(type_decl).captures.len),
else => unreachable,
};
}
pub fn typeDecls(zir: Zir, type_decl: Inst.Index) []const Zir.Inst.Index {
const inst = zir.instructions.get(@intFromEnum(type_decl));
assert(inst.tag == .extended);
@ -5281,11 +5272,11 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe
extra_index += 1;
break :blk fields_len;
} else 0;
const backing_int_type: Inst.Ref = if (small.has_backing_int_type) ty: {
const ty = zir.extra[extra_index];
const backing_int_type_body_len: u32 = if (small.has_backing_int_type) len: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :ty @enumFromInt(ty);
} else .none;
break :len body_len;
} else 0;
const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]);
@ -5312,6 +5303,11 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe
extra_index += bits_len;
break :bits bits;
} else null;
const backing_int_type_body: ?[]const Zir.Inst.Index = switch (backing_int_type_body_len) {
0 => null,
else => |n| zir.bodySlice(extra_index, n),
};
extra_index += backing_int_type_body_len;
const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]);
return .{
.src_line = extra.data.src_line,
@ -5321,7 +5317,7 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe
.capture_names = capture_names,
.decls = decls,
.layout = small.layout,
.backing_int_type = backing_int_type,
.backing_int_type_body = backing_int_type_body,
.field_names = field_names,
.field_type_body_lens = field_type_body_lens,
.field_align_body_lens = field_align_body_lens,
@ -5341,7 +5337,7 @@ pub const UnwrappedStructDecl = struct {
decls: []const Inst.Index,
layout: std.builtin.Type.ContainerLayout,
backing_int_type: Inst.Ref,
backing_int_type_body: ?[]const Inst.Index,
field_names: []const NullTerminatedString,
field_type_body_lens: []const u32,
@ -5427,11 +5423,11 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl
extra_index += 1;
break :blk fields_len;
} else 0;
const arg_type: Inst.Ref = if (small.kind.hasArgType()) ty: {
const ty = zir.extra[extra_index];
const arg_type_body_len: u32 = if (small.kind.hasArgType()) len: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :ty @enumFromInt(ty);
} else .none;
break :len body_len;
} else 0;
const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]);
@ -5452,6 +5448,11 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl
extra_index += fields_len;
break :lens @ptrCast(lens);
} else null;
const arg_type_body: ?[]const Zir.Inst.Index = switch (arg_type_body_len) {
0 => null,
else => |n| zir.bodySlice(extra_index, n),
};
extra_index += arg_type_body_len;
const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]);
return .{
.src_line = extra.data.src_line,
@ -5461,7 +5462,7 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl
.capture_names = capture_names,
.decls = decls,
.kind = small.kind,
.arg_type = arg_type,
.arg_type_body = arg_type_body,
.field_names = field_names,
.field_type_body_lens = field_type_body_lens,
.field_align_body_lens = field_align_body_lens,
@ -5480,7 +5481,7 @@ pub const UnwrappedUnionDecl = struct {
decls: []const Inst.Index,
kind: Inst.UnionDecl.Kind,
arg_type: Inst.Ref,
arg_type_body: ?[]const Inst.Index,
field_names: []const NullTerminatedString,
field_type_body_lens: []const u32,
@ -5556,11 +5557,11 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl {
extra_index += 1;
break :blk fields_len;
} else 0;
const tag_type: Inst.Ref = if (small.has_tag_type) ty: {
const ty = zir.extra[extra_index];
const tag_type_body_len: u32 = if (small.has_tag_type) len: {
const body_len = zir.extra[extra_index];
extra_index += 1;
break :ty @enumFromInt(ty);
} else .none;
break :len body_len;
} else 0;
const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]);
@ -5574,6 +5575,11 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl {
extra_index += fields_len;
break :lens @ptrCast(lens);
} else null;
const tag_type_body: ?[]const Zir.Inst.Index = switch (tag_type_body_len) {
0 => null,
else => |n| zir.bodySlice(extra_index, n),
};
extra_index += tag_type_body_len;
const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]);
return .{
.src_line = extra.data.src_line,
@ -5582,7 +5588,7 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl {
.captures = captures,
.capture_names = capture_names,
.decls = decls,
.tag_type = tag_type,
.tag_type_body = tag_type_body,
.nonexhaustive = small.nonexhaustive,
.field_names = field_names,
.field_value_body_lens = field_value_body_lens,
@ -5599,7 +5605,7 @@ pub const UnwrappedEnumDecl = struct {
decls: []const Inst.Index,
tag_type: Inst.Ref,
tag_type_body: ?[]const Inst.Index,
nonexhaustive: bool,
field_names: []const NullTerminatedString,

View file

@ -3713,7 +3713,7 @@ const Header = extern struct {
nav_val_deps_len: u32,
nav_ty_deps_len: u32,
type_layout_deps_len: u32,
type_inits_deps_len: u32,
struct_defaults_deps_len: u32,
func_ies_deps_len: u32,
zon_file_deps_len: u32,
embed_file_deps_len: u32,
@ -3763,7 +3763,7 @@ pub fn saveState(comp: *Compilation) !void {
.nav_val_deps_len = @intCast(ip.nav_val_deps.count()),
.nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()),
.type_layout_deps_len = @intCast(ip.type_layout_deps.count()),
.type_inits_deps_len = @intCast(ip.type_inits_deps.count()),
.struct_defaults_deps_len = @intCast(ip.struct_defaults_deps.count()),
.func_ies_deps_len = @intCast(ip.func_ies_deps.count()),
.zon_file_deps_len = @intCast(ip.zon_file_deps.count()),
.embed_file_deps_len = @intCast(ip.embed_file_deps.count()),
@ -3800,8 +3800,8 @@ pub fn saveState(comp: *Compilation) !void {
addBuf(&bufs, @ptrCast(ip.nav_ty_deps.values()));
addBuf(&bufs, @ptrCast(ip.type_layout_deps.keys()));
addBuf(&bufs, @ptrCast(ip.type_layout_deps.values()));
addBuf(&bufs, @ptrCast(ip.type_inits_deps.keys()));
addBuf(&bufs, @ptrCast(ip.type_inits_deps.values()));
addBuf(&bufs, @ptrCast(ip.struct_defaults_deps.keys()));
addBuf(&bufs, @ptrCast(ip.struct_defaults_deps.values()));
addBuf(&bufs, @ptrCast(ip.func_ies_deps.keys()));
addBuf(&bufs, @ptrCast(ip.func_ies_deps.values()));
addBuf(&bufs, @ptrCast(ip.zon_file_deps.keys()));
@ -4481,7 +4481,7 @@ pub fn addModuleErrorMsg(
const root_name: ?[]const u8 = switch (ref.referencer.unwrap()) {
.@"comptime" => "comptime",
.nav_val, .nav_ty => |nav| ip.getNav(nav).name.toSlice(ip),
.type_layout, .type_inits => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.type_layout, .struct_defaults => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
.func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip),
.memoized_state => null,
};
@ -5251,7 +5251,7 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v
.nav_ty => |nav| pt.ensureNavTypeUpToDate(nav),
.nav_val => |nav| pt.ensureNavValUpToDate(nav),
.type_layout => |ty| pt.ensureTypeLayoutUpToDate(.fromInterned(ty)),
.type_inits => |ty| pt.ensureTypeInitsUpToDate(.fromInterned(ty)),
.struct_defaults => |ty| pt.ensureStructDefaultsUpToDate(.fromInterned(ty)),
.memoized_state => |stage| pt.ensureMemoizedStateUpToDate(stage),
.func => |func| pt.ensureFuncBodyUpToDate(func),
};

View file

@ -307,7 +307,7 @@ fn handleCommand(zcu: *Zcu, w: *Io.Writer, cmd_str: []const u8, arg_str: []const
switch (dependee) {
.src_hash, .namespace, .namespace_name, .zon_file, .embed_file => try w.print("{f}", .{zcu.fmtDependee(dependee)}),
.nav_val, .nav_ty => |nav| try w.print("{t} {d}", .{ dependee, @intFromEnum(nav) }),
.type_layout, .type_inits, .func_ies => |ip_index| try w.print("{t} {d}", .{ dependee, @intFromEnum(ip_index) }),
.type_layout, .struct_defaults, .func_ies => |ip_index| try w.print("{t} {d}", .{ dependee, @intFromEnum(ip_index) }),
.memoized_state => |stage| try w.print("memoized_state {s}", .{@tagName(stage)}),
}
try w.writeByte('\n');
@ -374,8 +374,8 @@ fn parseAnalUnit(str: []const u8) ?AnalUnit {
return .wrap(.{ .nav_ty = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "type_layout")) {
return .wrap(.{ .type_layout = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "type_inits")) {
return .wrap(.{ .type_inits = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "struct_defaults")) {
return .wrap(.{ .struct_defaults = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "func")) {
return .wrap(.{ .func = @enumFromInt(parseIndex(idx_str) orelse return null) });
} else if (std.mem.eql(u8, kind, "memoized_state")) {

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -769,7 +769,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
const ip = &pt.zcu.intern_pool;
try self.sema.ensureLayoutResolved(res_ty);
try self.sema.ensureFieldInitsResolved(res_ty);
try self.sema.ensureStructDefaultsResolved(res_ty);
const struct_info = self.sema.pt.zcu.typeToStruct(res_ty).?;
const fields: @FieldType(Zoir.Node, "struct_literal") = switch (node.get(self.file.zoir.?)) {

File diff suppressed because it is too large Load diff

View file

@ -437,6 +437,7 @@ pub fn toValue(self: Type) Value {
/// - an enum with an explicit tag type has the ABI size of the integer tag type,
/// making it one-possible-value only if the integer tag type has 0 bits.
pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
ty.assertHasLayout(zcu);
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| int_type.bits != 0,
@ -499,14 +500,18 @@ pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
.generic_poison => unreachable,
},
.struct_type => {
// TODO MLUGG: memoize this state when resolving struct?
const struct_obj = ip.loadStructType(ty.toIntern());
for (struct_obj.field_types.get(ip), 0..) |field_ty_ip, field_idx| {
if (struct_obj.field_is_comptime_bits.get(ip, field_idx)) continue;
const field_ty: Type = .fromInterned(field_ty_ip);
if (field_ty.hasRuntimeBits(zcu)) return true;
switch (struct_obj.layout) {
.auto, .@"extern" => return struct_obj.has_runtime_bits,
.@"packed" => return Type.fromInterned(struct_obj.packed_backing_int_type).hasRuntimeBits(zcu),
}
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
switch (union_obj.layout) {
.auto, .@"extern" => return union_obj.has_runtime_bits,
.@"packed" => return Type.fromInterned(union_obj.packed_backing_int_type).hasRuntimeBits(zcu),
}
return false;
},
.tuple_type => |tuple| {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
@ -515,23 +520,8 @@ pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool {
}
return false;
},
.union_type => {
// TODO MLUGG: memoize this state when resolving union?
const union_obj = ip.loadUnionType(ty.toIntern());
switch (union_obj.runtime_tag) {
.none => {},
.safety, .tagged => {
if (Type.fromInterned(union_obj.enum_tag_type).hasRuntimeBits(zcu)) return true;
},
}
for (union_obj.field_types.get(ip)) |field_ty_ip| {
const field_ty: Type = .fromInterned(field_ty_ip);
if (field_ty.hasRuntimeBits(zcu)) return true;
}
return false;
},
// MLUGG TODO: i think this can go away and the assert move to the defer?
// MLUGG TODO: this answer was already here but... does it actually make sense?
.opaque_type => true,
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).int_tag_type).hasRuntimeBits(zcu),
@ -618,17 +608,18 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
.generic_poison,
=> false,
},
.struct_type => ip.loadStructType(ty.toIntern()).layout != .auto,
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
if (union_obj.layout == .auto) return false;
return switch (union_obj.runtime_tag) {
.none => true,
.tagged => false,
.safety => unreachable, // well-defined layout can't have a safety tag
};
.struct_type => switch (ip.loadStructType(ty.toIntern()).layout) {
.auto => false,
.@"extern", .@"packed" => true,
},
.union_type => switch (ip.loadUnionType(ty.toIntern()).layout) {
.auto => false,
.@"extern", .@"packed" => true,
},
.enum_type => switch (ip.loadEnumType(ty.toIntern()).int_tag_mode) {
.explicit => true,
.auto => false,
},
.enum_type => ip.loadEnumType(ty.toIntern()).int_tag_is_explicit,
// values, not types
.undef,
@ -664,28 +655,29 @@ pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool {
if (param_ty == .generic_poison_type) return false;
if (Type.fromInterned(param_ty).comptimeOnly(zcu)) return false;
}
const ret_ty: Type = .fromInterned(fn_info.return_type);
if (ret_ty.toIntern() == .generic_poison_type) {
return false;
}
if (ret_ty.zigTypeTag(zcu) == .error_union and
ret_ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type)
{
return false;
}
if (fn_info.return_type == .generic_poison_type) return false;
if (Type.fromInterned(fn_info.return_type).comptimeOnly(zcu)) return false;
if (fn_info.cc == .@"inline") return false;
return true;
}
pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
/// Like `hasRuntimeBits`, but also returns `true` for runtime functions.
pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool {
switch (ty.zigTypeTag(zcu)) {
.@"fn" => return ty.fnHasRuntimeBits(zcu),
else => return ty.hasRuntimeBits(zcu),
}
}
/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive.
/// MLUGG TODO: this function is a bit silly now...
pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.@"fn" => true,
else => return ty.hasRuntimeBits(zcu),
};
}
pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool {
return zcu.intern_pool.isNoReturn(ty.toIntern());
}
@ -711,7 +703,6 @@ pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace {
}
/// Never returns `none`. Asserts that all necessary type resolution is already done.
/// MLUGG TODO: check that it really does never return `.none`
pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
const ip = &zcu.intern_pool;
const target = zcu.getTarget();
@ -810,7 +801,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
if (val != .none) continue; // comptime field
const field_align = Type.fromInterned(field_ty).abiAlignment(zcu);
big_align = big_align.max(field_align);
big_align = big_align.maxStrict(field_align);
}
return big_align;
},
@ -818,14 +809,20 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment {
const struct_obj = ip.loadStructType(ty.toIntern());
switch (struct_obj.layout) {
.@"packed" => return Type.fromInterned(struct_obj.packed_backing_int_type).abiAlignment(zcu),
.auto, .@"extern" => return struct_obj.alignment,
.auto, .@"extern" => {
assert(struct_obj.alignment != .none);
return struct_obj.alignment;
},
}
},
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
switch (union_obj.layout) {
.@"packed" => return Type.fromInterned(union_obj.packed_backing_int_type).abiAlignment(zcu),
.auto, .@"extern" => return getUnionLayout(union_obj, zcu).abi_align,
.auto, .@"extern" => {
assert(union_obj.alignment != .none);
return union_obj.alignment;
},
}
},
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).int_tag_type).abiAlignment(zcu),
@ -1277,38 +1274,17 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type {
}
}
/// Given that `ty` is an indexable pointer, returns its element type. Specifically:
/// * for `*[n]T`, returns `T`
/// * for `*@Vector(n, T)`, returns `T`
/// * for `[]T`, returns `T`
/// * for `[*]T`, returns `T`
/// * for `[*c]T`, returns `T`
/// Asserts that `ty` is an indexable type, and returns its element type. Tuples (and pointers to
/// tuples) are not supported because they do not have a single element type.
///
/// Tuples are not supported because they do not have a single element type.
///
/// MLUGG TODO: should i even have this one? it's a subset of indexableElem
pub fn indexablePtrElem(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
const ptr_type = ip.indexToKey(ty.toIntern()).ptr_type;
return switch (ptr_type.flags.size) {
.many, .slice, .c => return .fromInterned(ptr_type.child),
.one => switch (ip.indexToKey(ptr_type.child)) {
inline .array_type, .vector_type => |arr| return .fromInterned(arr.child),
else => unreachable,
},
};
}
/// Given that `ty` is an indexable type, returns its element type. Specifically:
/// * for `[n]T`, returns `T`
/// * for `@Vector(n, T)`, returns `T`
/// * for `*[n]T`, returns `T`
/// * for `*@Vector(n, T)`, returns `T`
/// * for `[]T`, returns `T`
/// * for `[*]T`, returns `T`
/// * for `[*c]T`, returns `T`
///
/// Tuples are not supported because they do not have a single element type.
/// Returns `T` for each of the following types:
/// * `[n]T`
/// * `@Vector(n, T)`
/// * `*[n]T`
/// * `*@Vector(n, T)`
/// * `[]T`
/// * `[*]T`
/// * `[*c]T`
pub fn indexableElem(ty: Type, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
@ -1348,6 +1324,7 @@ pub fn optionalChild(ty: Type, zcu: *const Zcu) Type {
/// Returns the tag type of a union, if the type is a union and it has a tag type.
/// Otherwise, returns `null`.
pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.union_type => {},
@ -1363,6 +1340,7 @@ pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type {
/// Same as `unionTagType` but includes safety tag.
/// Codegen should use this version.
pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => {
@ -1377,11 +1355,13 @@ pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type {
/// Asserts the type is a union; returns the tag type, even if the tag will
/// not be stored at runtime.
pub fn unionTagTypeHypothetical(ty: Type, zcu: *const Zcu) Type {
assertHasLayout(ty, zcu);
const union_obj = zcu.typeToUnion(ty).?;
return Type.fromInterned(union_obj.enum_tag_type);
}
pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
const union_obj = zcu.typeToUnion(ty).?;
const union_fields = union_obj.field_types.get(ip);
@ -1390,17 +1370,20 @@ pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type {
}
pub fn unionFieldTypeByIndex(ty: Type, index: usize, zcu: *const Zcu) Type {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
const union_obj = zcu.typeToUnion(ty).?;
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
}
pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
assertHasLayout(ty, zcu);
const union_obj = zcu.typeToUnion(ty).?;
return zcu.unionTagFieldIndex(union_obj, enum_tag);
}
pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
const union_obj = zcu.typeToUnion(ty).?;
for (union_obj.field_types.get(ip)) |field_ty| {
@ -1413,14 +1396,17 @@ pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool {
/// Asserts the type is either an extern or packed union.
pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type {
const zcu = pt.zcu;
return switch (ty.containerLayout(zcu)) {
assertHasLayout(ty, zcu);
const loaded_union = zcu.intern_pool.loadUnionType(ty.toIntern());
return switch (loaded_union.layout) {
.@"extern" => try pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }),
.@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))),
.@"packed" => .fromInterned(loaded_union.packed_backing_int_type),
.auto => unreachable,
};
}
pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout {
assertHasLayout(ty, zcu);
const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern());
return Type.getUnionLayout(union_obj, zcu);
}
@ -1865,11 +1851,8 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
for (field_vals, 0..) |*field_val, i_usize| {
const i: u32 = @intCast(i_usize);
if (struct_obj.field_is_comptime_bits.get(ip, i)) {
// MLUGG TODO: this is kinda a problem... we don't necessarily know the opv field vals!
// for now i'm just not letting structs with comptime fields be opv :)
if (true) return null;
assertHasInits(ty, zcu);
field_val.* = struct_obj.field_defaults.get(ip)[i];
assert(field_val.* != .none);
continue;
}
const field_ty = Type.fromInterned(struct_obj.field_types.get(ip)[i]);
@ -2257,19 +2240,23 @@ pub fn errorSetNames(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.
}
pub fn enumFields(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice {
assertHasLayout(ty, zcu);
return zcu.intern_pool.loadEnumType(ty.toIntern()).field_names;
}
pub fn enumFieldCount(ty: Type, zcu: *const Zcu) usize {
assertHasLayout(ty, zcu);
return zcu.intern_pool.loadEnumType(ty.toIntern()).field_names.len;
}
pub fn enumFieldName(ty: Type, field_index: usize, zcu: *const Zcu) InternPool.NullTerminatedString {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
return ip.loadEnumType(ty.toIntern()).field_names.get(ip)[field_index];
}
pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu: *const Zcu) ?u32 {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(ty.toIntern());
return enum_type.nameIndex(ip, field_name);
@ -2279,6 +2266,7 @@ pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu
/// an integer which represents the enum value. Returns the field index in
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
assertHasLayout(ty, zcu);
const ip = &zcu.intern_pool;
const enum_type = ip.loadEnumType(ty.toIntern());
const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
@ -2293,28 +2281,40 @@ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
/// Returns none in the case of a tuple which uses the integer index as the field name.
pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).field_names.get(ip)[index].toOptional(),
.tuple_type => .none,
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
assertHasLayout(ty, zcu);
return ip.loadStructType(ty.toIntern()).field_names.get(ip)[index].toOptional();
},
.tuple_type => return .none,
else => unreachable,
};
}
}
pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
.tuple_type => |tuple| tuple.types.len,
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
assertHasLayout(ty, zcu);
return ip.loadStructType(ty.toIntern()).field_types.len;
},
.tuple_type => |tuple| return tuple.types.len,
else => unreachable,
};
}
}
/// Returns the field type. Supports structs and unions.
pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
const ip = &zcu.intern_pool;
const types = switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).field_types,
.union_type => ip.loadUnionType(ty.toIntern()).field_types,
.struct_type => types: {
assertHasLayout(ty, zcu);
break :types ip.loadStructType(ty.toIntern()).field_types;
},
.union_type => types: {
assertHasLayout(ty, zcu);
break :types ip.loadUnionType(ty.toIntern()).field_types;
},
.tuple_type => |tuple| tuple.types,
else => unreachable,
};
@ -2335,11 +2335,13 @@ pub fn resolvedFieldAlignment(ty: Type, index: usize, zcu: *const Zcu) Alignment
return switch (ip.indexToKey(ty.toIntern())) {
.tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]).abiAlignment(zcu),
.struct_type => {
assertHasLayout(ty, zcu);
const struct_obj = ip.loadStructType(ty.toIntern());
const field_ty: Type = .fromInterned(struct_obj.field_types.get(ip)[index]);
return field_ty.defaultStructFieldAlignment(struct_obj.layout, zcu);
},
.union_type => {
assertHasLayout(ty, zcu);
const union_obj = ip.loadUnionType(ty.toIntern());
const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]);
return field_ty.abiAlignment(zcu);
@ -2353,12 +2355,14 @@ pub fn explicitFieldAlignment(ty: Type, index: usize, zcu: *const Zcu) Alignment
return switch (ip.indexToKey(ty.toIntern())) {
.tuple_type => .none,
.struct_type => {
assertHasLayout(ty, zcu);
const struct_obj = ip.loadStructType(ty.toIntern());
assert(struct_obj.layout != .@"packed");
if (struct_obj.field_aligns.len == 0) return .none;
return struct_obj.field_aligns.get(ip)[index];
},
.union_type => {
assertHasLayout(ty, zcu);
const union_obj = ip.loadUnionType(ty.toIntern());
assert(union_obj.layout != .@"packed");
if (union_obj.field_aligns.len == 0) return .none;
@ -2413,7 +2417,6 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.field_is_comptime_bits.get(ip, index)) {
assertHasInits(ty, zcu);
return .fromInterned(struct_type.field_defaults.get(ip)[index]);
} else {
return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
@ -2433,11 +2436,14 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val
pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).field_is_comptime_bits.get(ip, index),
.tuple_type => |tuple| tuple.values.get(ip)[index] != .none,
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
assertHasLayout(ty, zcu);
return ip.loadStructType(ty.toIntern()).field_is_comptime_bits.get(ip, index);
},
.tuple_type => |tuple| return tuple.values.get(ip)[index] != .none,
else => unreachable,
};
}
}
pub const FieldOffset = struct {
@ -2850,8 +2856,79 @@ pub fn isNullFromType(ty: Type, zcu: *const Zcu) ?bool {
return null;
}
/// Returns true if `ty` is allowed in packed types.
pub fn packable(ty: Type, zcu: *const Zcu) bool {
pub const UnpackableReason = union(enum) {
comptime_only,
pointer,
enum_inferred_int_tag: Type,
non_packed_struct: Type,
non_packed_union: Type,
other,
};
/// Returns `null` iff `ty` is allowed in packed types.
pub fn unpackable(ty: Type, zcu: *const Zcu) ?UnpackableReason {
return switch (ty.zigTypeTag(zcu)) {
.void,
.bool,
.float,
.int,
=> null,
.type,
.comptime_float,
.comptime_int,
.enum_literal,
.undefined,
.null,
=> .comptime_only,
.noreturn,
.@"opaque",
.error_union,
.error_set,
.frame,
.@"anyframe",
.@"fn",
.array,
.vector,
=> .other,
.optional => if (ty.isPtrLikeOptional(zcu))
.pointer
else
.other,
.pointer => .pointer,
.@"enum" => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).int_tag_mode) {
.explicit => null,
.auto => .{ .enum_inferred_int_tag = ty },
},
.@"struct" => switch (ty.containerLayout(zcu)) {
.@"packed" => null,
.auto, .@"extern" => .{ .non_packed_struct = ty },
},
.@"union" => switch (ty.containerLayout(zcu)) {
.@"packed" => null,
.auto, .@"extern" => .{ .non_packed_union = ty },
},
};
}
pub const ExternPosition = enum {
ret_ty,
param_ty,
union_field,
struct_field,
element,
other,
};
/// Returns true if `ty` is allowed in extern types.
/// Does not require `ty` to be resolved in any way.
/// Keep in sync with `Sema.explainWhyTypeIsNotExtern`.
pub fn validateExtern(ty: Type, position: ExternPosition, zcu: *const Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.type,
.comptime_float,
@ -2862,22 +2939,83 @@ pub fn packable(ty: Type, zcu: *const Zcu) bool {
.error_union,
.error_set,
.frame,
.noreturn,
.@"opaque",
.@"anyframe",
.@"fn",
.array,
=> false,
.optional => return ty.isPtrLikeOptional(zcu),
.void,
.void => switch (position) {
.ret_ty,
.union_field,
.struct_field,
.element,
=> true,
.param_ty,
.other,
=> false,
},
.noreturn => position == .ret_ty,
.@"opaque",
.bool,
.float,
.int,
.vector,
.@"anyframe",
=> true,
.@"enum" => zcu.intern_pool.loadEnumType(ty.toIntern()).int_tag_is_explicit,
.pointer => !ty.isSlice(zcu),
.@"struct", .@"union" => ty.containerLayout(zcu) == .@"packed",
.pointer => {
if (ty.isSlice(zcu)) return false;
const child_ty = ty.childType(zcu);
if (child_ty.zigTypeTag(zcu) == .@"fn") {
return ty.isConstPtr(zcu) and child_ty.validateExtern(.other, zcu);
}
return true;
},
.int => switch (ty.intInfo(zcu).bits) {
0, 8, 16, 32, 64, 128 => true,
else => false,
},
.@"fn" => {
if (position != .other) return false;
// For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI.
// The goal is to experiment with more integrated CPU/GPU code.
if (ty.fnCallingConvention(zcu) == .nvptx_kernel) {
return true;
}
return !target_util.fnCallConvAllowsZigTypes(ty.fnCallingConvention(zcu));
},
.@"enum" => {
const enum_obj = zcu.intern_pool.loadEnumType(ty.toIntern());
return switch (enum_obj.int_tag_mode) {
.auto => false,
.explicit => Type.fromInterned(enum_obj.int_tag_type).validateExtern(position, zcu),
};
},
.@"struct" => {
const struct_obj = zcu.intern_pool.loadStructType(ty.toIntern());
return switch (struct_obj.layout) {
.auto => false,
.@"extern" => true,
.@"packed" => switch (struct_obj.packed_backing_mode) {
.auto => false,
.explicit => Type.fromInterned(struct_obj.packed_backing_int_type).validateExtern(position, zcu),
},
};
},
.@"union" => {
const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern());
return switch (union_obj.layout) {
.auto => false,
.@"extern" => true,
.@"packed" => switch (union_obj.packed_backing_mode) {
.auto => false,
.explicit => Type.fromInterned(union_obj.packed_backing_int_type).validateExtern(position, zcu),
},
};
},
.array => {
if (position == .ret_ty or position == .param_ty) return false;
return ty.childType(zcu).validateExtern(.element, zcu);
},
.vector => ty.childType(zcu).validateExtern(.element, zcu),
.optional => ty.isPtrLikeOptional(zcu),
};
}
@ -2889,7 +3027,6 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.anyframe_type,
.simple_type,
.opaque_type,
.enum_type,
.error_set_type,
.inferred_error_set_type,
=> {},
@ -2906,12 +3043,11 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.tuple_type => |tuple| for (tuple.types.get(&zcu.intern_pool)) |field_ty| {
assertHasLayout(.fromInterned(field_ty), zcu);
},
.struct_type, .union_type => {
.struct_type, .union_type, .enum_type => {
const unit: InternPool.AnalUnit = .wrap(.{ .type_layout = ty.toIntern() });
assert(!zcu.outdated.contains(unit));
assert(!zcu.potentially_outdated.contains(unit));
},
else => unreachable, // assertion failure; not a struct or union
// values, not types
.simple_value,
@ -2930,23 +3066,13 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void {
.opt,
.aggregate,
.un,
.undef,
// memoization, not types
.memoized_call,
=> unreachable,
}
}
/// Asserts that `ty` is an enum or struct type whose field values/defaults are resolved.
pub fn assertHasInits(ty: Type, zcu: *const Zcu) void {
switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
.struct_type, .enum_type => {},
else => unreachable,
}
const unit: InternPool.AnalUnit = .wrap(.{ .type_inits = ty.toIntern() });
assert(!zcu.outdated.contains(unit));
assert(!zcu.potentially_outdated.contains(unit));
}
/// Recursively walks the type and marks for each subtype how many times it has been seen
fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUnmanaged(Type, u16)) error{OutOfMemory}!void {
const zcu = pt.zcu;
@ -3116,6 +3242,7 @@ pub const Comparison = struct {
};
};
pub const @"u0": Type = .{ .ip_index = .u0_type };
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };

View file

@ -2207,7 +2207,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh
const ptr_ty_info = Type.fromInterned(ptr.ty).ptrInfo(zcu);
const need_child: Type = .fromInterned(ptr_ty_info.child);
if (need_child.comptimeOnly(zcu)) {
if (need_child.comptimeOnly(zcu) or need_child.zigTypeTag(zcu) == .@"opaque") {
// No refinement can happen - this pointer is presumably invalid.
// Just offset it.
const parent = try arena.create(PointerDeriveStep);
@ -2595,8 +2595,8 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
pub fn doPointersOverlap(ptr_val_a: Value, ptr_val_b: Value, elem_count: u64, zcu: *const Zcu) bool {
const ip = &zcu.intern_pool;
const a_elem_ty = ptr_val_a.typeOf(zcu).indexablePtrElem(zcu);
const b_elem_ty = ptr_val_b.typeOf(zcu).indexablePtrElem(zcu);
const a_elem_ty = ptr_val_a.typeOf(zcu).indexableElem(zcu);
const b_elem_ty = ptr_val_b.typeOf(zcu).indexableElem(zcu);
const a_ptr = ip.indexToKey(ptr_val_a.toIntern()).ptr;
const b_ptr = ip.indexToKey(ptr_val_b.toIntern()).ptr;
@ -2682,3 +2682,58 @@ pub fn eqlScalarNum(lhs: Value, rhs: Value, zcu: *Zcu) bool {
const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, zcu);
return lhs_bigint.eql(rhs_bigint);
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
/// Vectors are also accepted. Vector results are reduced with AND.
///
/// If provided, `vector_index` reports the first element that failed the range check.
pub fn intFitsInType(
val: Value,
ty: Type,
vector_index: ?*usize,
zcu: *const Zcu,
) bool {
if (ty.toIntern() == .comptime_int_type) return true;
const info = ty.intInfo(zcu);
switch (val.toIntern()) {
.zero_usize, .zero_u8 => return true,
else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
.undef => return true,
.variable, .@"extern", .func, .ptr => {
const target = zcu.getTarget();
const ptr_bits = target.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits,
};
},
.int => |int| {
var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined;
const big_int = int.storage.toBigInt(&buffer);
return big_int.fitsInTwosComp(info.signedness, info.bits);
},
.aggregate => |aggregate| {
assert(ty.zigTypeTag(zcu) == .vector);
return switch (aggregate.storage) {
.bytes => |bytes| for (bytes.toSlice(ty.vectorLen(zcu), &zcu.intern_pool), 0..) |byte, i| {
if (byte == 0) continue;
const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed);
if (info.bits >= actual_needed_bits) continue;
if (vector_index) |vi| vi.* = i;
break false;
} else true,
.elems, .repeated_elem => for (switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems,
.repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem),
}, 0..) |elem, i| {
if (Value.fromInterned(elem).intFitsInType(ty.scalarType(zcu), null, zcu)) continue;
if (vector_index) |vi| vi.* = i;
break false;
} else true,
};
},
else => unreachable,
},
}
}

View file

@ -1912,40 +1912,6 @@ pub const SrcLoc = struct {
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.bit_range_end.unwrap().?);
},
.node_offset_container_tag => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(parent_node)) {
.container_decl_arg, .container_decl_arg_trailing => {
const full = tree.containerDeclArg(parent_node);
const arg_node = full.ast.arg.unwrap().?;
return tree.nodeToSpan(arg_node);
},
.tagged_union_enum_tag, .tagged_union_enum_tag_trailing => {
const full = tree.taggedUnionEnumTag(parent_node);
const arg_node = full.ast.arg.unwrap().?;
return tree.tokensToSpan(
tree.firstToken(arg_node) - 2,
tree.lastToken(arg_node) + 1,
tree.nodeMainToken(arg_node),
);
},
else => unreachable,
}
},
.node_offset_field_default => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full: Ast.full.ContainerField = switch (tree.nodeTag(parent_node)) {
.container_field => tree.containerField(parent_node),
.container_field_init => tree.containerFieldInit(parent_node),
else => unreachable,
};
return tree.nodeToSpan(full.ast.value_expr.unwrap().?);
},
.node_offset_init_ty => |node_off| {
const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
@ -2021,6 +1987,14 @@ pub const SrcLoc = struct {
}
return tree.nodeToSpan(node);
},
.container_arg => {
const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse return tree.nodeToSpan(node);
const arg_node = container_decl.ast.arg.unwrap() orelse return tree.nodeToSpan(node);
return tree.nodeToSpan(arg_node);
},
.container_field_name,
.container_field_value,
.container_field_type,
@ -2262,7 +2236,11 @@ pub const SrcLoc = struct {
var param_it = full.iterate(tree);
for (0..param_idx) |_| assert(param_it.next() != null);
const param = param_it.next().?;
return tree.nodeToSpan(param.type_expr.?);
if (param.anytype_ellipsis3) |tok| {
return tree.tokenToSpan(tok);
} else {
return tree.nodeToSpan(param.type_expr.?);
}
},
}
}
@ -2484,10 +2462,6 @@ pub const LazySrcLoc = struct {
node_offset_ptr_bitoffset: Ast.Node.Offset,
/// The source location points to the host size of a pointer.
node_offset_ptr_hostsize: Ast.Node.Offset,
/// The source location points to the tag type of an union or an enum.
node_offset_container_tag: Ast.Node.Offset,
/// The source location points to the default value of a field.
node_offset_field_default: Ast.Node.Offset,
/// The source location points to the type of an array or struct initializer.
node_offset_init_ty: Ast.Node.Offset,
/// The source location points to the LHS of an assignment (or assign-op, e.g. `+=`).
@ -2532,6 +2506,11 @@ pub const LazySrcLoc = struct {
fn_proto_param_type: FnProtoParam,
array_cat_lhs: ArrayCat,
array_cat_rhs: ArrayCat,
/// The source location points to the backing or tag type expression of
/// the container type declaration at the base node.
///
/// For 'union(enum(T))', this points to 'T', not 'enum(T)'.
container_arg,
/// The source location points to the name of the field at the given index
/// of the container type declaration at the base node.
container_field_name: u32,
@ -3149,7 +3128,7 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
.nav_val => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }),
.nav_ty => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_ty = nav }),
.type_layout => |ty| try zcu.markPoDependeeUpToDate(.{ .type_layout = ty }),
.type_inits => |ty| try zcu.markPoDependeeUpToDate(.{ .type_inits = ty }),
.struct_defaults => |ty| try zcu.markPoDependeeUpToDate(.{ .struct_defaults = ty }),
.func => |func| try zcu.markPoDependeeUpToDate(.{ .func_ies = func }),
.memoized_state => |stage| try zcu.markPoDependeeUpToDate(.{ .memoized_state = stage }),
}
@ -3165,7 +3144,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
.nav_val => |nav| .{ .nav_val = nav },
.nav_ty => |nav| .{ .nav_ty = nav },
.type_layout => |ty| .{ .type_layout = ty },
.type_inits => |ty| .{ .type_inits = ty },
.struct_defaults => |ty| .{ .struct_defaults = ty },
.func => |func_index| .{ .func_ies = func_index },
.memoized_state => |stage| .{ .memoized_state = stage },
};
@ -3195,88 +3174,44 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
}
}
/// Selects an outdated `AnalUnit` to analyze next. Called from the main semantic analysis loop when
/// there is no work immediately queued. The unit is chosen such that it is unlikely to require any
/// recursive analysis (all of its previously-marked dependencies are already up-to-date), because
/// recursive analysis can cause over-analysis on incremental updates.
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
if (!zcu.comp.config.incremental) return null;
if (zcu.outdated.count() == 0) {
// Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
// units have had any outdated dependencies so far, and all of their remaining PO deps are triggered
// by other units in `potentially_outdated`. So, we can safety assume those units up-to-date.
zcu.potentially_outdated.clearRetainingCapacity();
log.debug("findOutdatedToAnalyze: no outdated depender", .{});
return null;
}
// Our goal is to find an outdated AnalUnit which itself has no outdated or
// PO dependencies. Most of the time, such an AnalUnit will exist - we track
// them in the `outdated_ready` set for efficiency. However, this is not
// necessarily the case, since the Decl dependency graph may contain loops
// via mutually recursive definitions:
// pub const A = struct { b: *B };
// pub const B = struct { b: *A };
// In this case, we must defer to more complex logic below.
if (zcu.outdated_ready.count() > 0) {
const unit = zcu.outdated_ready.keys()[0];
log.debug("findOutdatedToAnalyze: trivial {f}", .{zcu.fmtAnalUnit(unit)});
log.debug("findOutdatedToAnalyze: {f}", .{zcu.fmtAnalUnit(unit)});
return unit;
}
// There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
// AnalUnit with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of
// A or B. We should definitely not select a function, since a function can't be responsible for the
// loop (IES dependencies can't have loops). We should also, of course, not select a `comptime`
// declaration, since you can't depend on those!
// Usually, getting here means that everything is up-to-date, so there is no more work to do. We
// will see that `zcu.outdated` and `zcu.potentially_outdated` are both empty.
//
// However, if a previous update had a dependency loop compile error, there is a cycle in the
// dependency graph (which is usually acyclic), which can cause a scenario where no unit appears
// to be ready, because they're all waiting for the next in the loop to be up-to-date. In that
// case, we usually have to just bite the bullet and analyze one of them. An exception is if
// `zcu.outdated` is empty but `zcu.potentially_outdated` is non-empty: in that case, the only
// possible situation is a cycle where everything is actually up-to-date, so we can clear out
// `zcu.potentially_outdated` and we are done.
// The choice of this unit could have a big impact on how much total analysis we perform, since
// if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit
// may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a unit
// which the most things depend on - the idea is that this will resolve a lot of loops (but this
// is only a heuristic).
log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{
zcu.outdated.count(),
zcu.potentially_outdated.count(),
});
const ip = &zcu.intern_pool;
var chosen_unit: ?AnalUnit = null;
var chosen_unit_dependers: u32 = undefined;
// MLUGG TODO: i'm 99% sure this is now impossible. check!!!
inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
for (outdated_units) |unit| {
var n: u32 = 0;
var it = ip.dependencyIterator(switch (unit.unwrap()) {
.func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
.@"comptime" => continue, // a `comptime` block can't even be depended on so it is a terrible choice
.type_layout => |ty| .{ .type_layout = ty },
.type_inits => |ty| .{ .type_inits = ty },
.nav_val => |nav| .{ .nav_val = nav },
.nav_ty => |nav| .{ .nav_ty = nav },
.memoized_state => {
// If we've hit a loop and some `.memoized_state` is outdated, we should make that choice eagerly.
// In general, it's good to resolve this early on, since -- for instance -- almost every function
// references the panic handler.
return unit;
},
});
while (it.next()) |_| n += 1;
if (chosen_unit == null or n > chosen_unit_dependers) {
chosen_unit = unit;
chosen_unit_dependers = n;
}
}
if (zcu.outdated.count() == 0) {
// Everything is up-to-date. There could be lingering entries in `zcu.potentially_outdated`
// from a dependency loop on a previous update.
zcu.potentially_outdated.clearRetainingCapacity();
log.debug("findOutdatedToAnalyze: all up-to-date", .{});
return null;
}
log.debug("findOutdatedToAnalyze: heuristic returned '{f}' ({d} dependers)", .{
zcu.fmtAnalUnit(chosen_unit.?),
chosen_unit_dependers,
const unit = zcu.outdated.keys()[0];
log.debug("findOutdatedToAnalyze: dependency loop affecting {d} units, selected {f}", .{
zcu.outdated.count(),
zcu.fmtAnalUnit(unit),
});
return chosen_unit.?;
return unit;
}
/// During an incremental update, before semantic analysis, call this to flush all values from
@ -3356,12 +3291,59 @@ pub fn mapOldZirToNew(
}
while (match_stack.pop()) |match_item| {
// First, a check: if the number of captures of this type has changed, we can't map it, because
// we wouldn't know how to correlate type information with the last update.
// Synchronizes with logic in `Zcu.PerThread.recreateStructType` etc.
if (old_zir.typeCapturesLen(match_item.old_inst) != new_zir.typeCapturesLen(match_item.new_inst)) {
// Don't map this type or anything within it.
continue;
// There are some properties of type declarations which cannot change across incremental
// updates. If they have, we need to ignore this mapping. These properties are essentially
// everything passed into `InternPool.getDeclaredStructType` (likewise for unions, enums,
// and opaques).
const old_tag = old_zir.instructions.items(.data)[@intFromEnum(match_item.old_inst)].extended.opcode;
const new_tag = new_zir.instructions.items(.data)[@intFromEnum(match_item.new_inst)].extended.opcode;
if (old_tag != new_tag) continue;
switch (old_tag) {
.struct_decl => {
const old = old_zir.getStructDecl(match_item.old_inst);
const new = new_zir.getStructDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.layout != new.layout) continue;
const old_any_field_aligns = old.field_align_body_lens != null;
const old_any_field_defaults = old.field_default_body_lens != null;
const old_any_comptime_fields = old.field_comptime_bits != null;
const old_explicit_backing_int = old.backing_int_type_body != null;
const new_any_field_aligns = new.field_align_body_lens != null;
const new_any_field_defaults = new.field_default_body_lens != null;
const new_any_comptime_fields = new.field_comptime_bits != null;
const new_explicit_backing_int = new.backing_int_type_body != null;
if (old_any_field_aligns != new_any_field_aligns) continue;
if (old_any_field_defaults != new_any_field_defaults) continue;
if (old_any_comptime_fields != new_any_comptime_fields) continue;
if (old_explicit_backing_int != new_explicit_backing_int) continue;
},
.union_decl => {
const old = old_zir.getUnionDecl(match_item.old_inst);
const new = new_zir.getUnionDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.kind != new.kind) continue;
const old_any_field_aligns = old.field_align_body_lens != null;
const new_any_field_aligns = new.field_align_body_lens != null;
if (old_any_field_aligns != new_any_field_aligns) continue;
},
.enum_decl => {
const old = old_zir.getEnumDecl(match_item.old_inst);
const new = new_zir.getEnumDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
if (old.field_names.len != new.field_names.len) continue;
if (old.nonexhaustive != new.nonexhaustive) continue;
const old_explicit_tag_type = old.tag_type_body != null;
const new_explicit_tag_type = new.tag_type_body != null;
if (old_explicit_tag_type != new_explicit_tag_type) continue;
},
.opaque_decl => {
const old = old_zir.getOpaqueDecl(match_item.old_inst);
const new = new_zir.getOpaqueDecl(match_item.new_inst);
if (old.captures.len != new.captures.len) continue;
},
else => unreachable,
}
// Match the namespace declaration itself
@ -4068,7 +4050,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
}
if (has_inits) {
// this should only be referenced by the type
const unit: AnalUnit = .wrap(.{ .type_inits = ty });
const unit: AnalUnit = .wrap(.{ .struct_defaults = ty });
try units.putNoClobber(gpa, unit, referencer);
}
@ -4184,7 +4166,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R
const other: AnalUnit = .wrap(switch (unit.unwrap()) {
.nav_val => |n| .{ .nav_ty = n },
.nav_ty => |n| .{ .nav_val = n },
.@"comptime", .type_layout, .type_inits, .func, .memoized_state => break :queue_paired,
.@"comptime", .type_layout, .struct_defaults, .func, .memoized_state => break :queue_paired,
});
const gop = try units.getOrPut(gpa, other);
if (gop.found_existing) break :queue_paired;
@ -4305,6 +4287,16 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
return zcu.fileByIndex(zcu.navFileScopeIndex(nav));
}
pub fn navAlignment(zcu: *Zcu, nav_index: InternPool.Nav.Index) InternPool.Alignment {
const ty: Type, const alignment = switch (zcu.intern_pool.getNav(nav_index).status) {
.unresolved => unreachable,
.type_resolved => |r| .{ .fromInterned(r.type), r.alignment },
.fully_resolved => |r| .{ Value.fromInterned(r.val).typeOf(zcu), r.alignment },
};
if (alignment != .none) return alignment;
return ty.abiAlignment(zcu);
}
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Alt(FormatAnalUnit, formatAnalUnit) {
return .{ .data = .{ .unit = unit, .zcu = zcu } };
}
@ -4331,7 +4323,7 @@ fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void
}
},
.nav_val, .nav_ty => |nav, tag| return writer.print("{t}('{f}' [{}])", .{ tag, ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }),
.type_layout, .type_inits => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
.type_layout, .struct_defaults => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }),
.func => |func| {
const nav = zcu.funcInfo(func).owner_nav;
return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) });
@ -4357,7 +4349,7 @@ fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void
const fqn = ip.getNav(nav).fqn;
return writer.print("{t}('{f}')", .{ tag, fqn.fmt(ip) });
},
.type_layout, .type_inits => |ip_index, tag| {
.type_layout, .struct_defaults => |ip_index, tag| {
const name = Type.fromInterned(ip_index).containerTypeName(ip);
return writer.print("{t}('{f}')", .{ tag, name.fmt(ip) });
},

View file

@ -695,20 +695,46 @@ pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) (Alloca
.file = file_index,
.inst = .main_struct_inst,
});
const file_root_type = try Sema.analyzeStructDecl(
pt,
file_index,
&file.zir.?,
.none,
tracked_inst,
&struct_decl,
null,
&.{},
.{ .exact = .{
.name = try file.internFullyQualifiedName(pt),
.nav = .none,
} },
);
const wip: InternPool.WipContainerType = switch (try ip.getDeclaredStructType(gpa, io, pt.tid, .{
.zir_index = tracked_inst,
.captures = &.{},
.fields_len = @intCast(struct_decl.field_names.len),
.layout = struct_decl.layout,
.any_comptime_fields = struct_decl.field_comptime_bits != null,
.any_field_defaults = struct_decl.field_default_body_lens != null,
.any_field_aligns = struct_decl.field_align_body_lens != null,
.packed_backing_mode = if (struct_decl.backing_int_type_body != null) .explicit else .auto,
})) {
.existing => unreachable, // it would have been set as `zcu.fileRootType` already
.wip => |wip| wip,
};
errdefer wip.cancel(ip, pt.tid);
wip.setName(ip, try file.internFullyQualifiedName(pt), .none);
const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{
.parent = .none,
.owner_type = wip.index,
.file_scope = file_index,
.generation = zcu.generation,
});
errdefer pt.destroyNamespace(new_namespace_index);
try pt.scanNamespace(new_namespace_index, struct_decl.decls);
// MLUGG TODO: we could potentially revert this language change if we wanted? don't mind
try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) });
try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .struct_defaults = wip.index }) });
if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index);
try zcu.outdated.ensureUnusedCapacity(gpa, 2);
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 2);
errdefer comptime unreachable; // because we don't remove the `outdated` entries
zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0);
zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), 0);
zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {});
zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), {});
const file_root_type: Type = .fromInterned(wip.finish(ip, new_namespace_index));
zcu.setFileRootType(file_index, file_root_type.toIntern());
if (zcu.comp.time_report) |*tr| tr.stats.n_imported_files += 1;
}
@ -1048,11 +1074,6 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void
assert(!zcu.analysis_in_progress.contains(anal_unit));
// Determine whether or not this type is outdated. For this kind of `AnalUnit`, that's
// the only indicator as to whether or not analysis is required; when a struct/union is
// first created, it's marked as outdated.
// MLUGG TODO: make that actually true, it's a good strategy here!
const was_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
@ -1113,17 +1134,11 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void
};
defer sema.deinit();
const result = switch (ty.containerLayout(zcu)) {
.auto, .@"extern" => switch (ty.zigTypeTag(zcu)) {
.@"struct" => Sema.type_resolution.resolveStructLayout(&sema, ty),
.@"union" => Sema.type_resolution.resolveUnionLayout(&sema, ty),
else => unreachable,
},
.@"packed" => switch (ty.zigTypeTag(zcu)) {
.@"struct" => Sema.type_resolution.resolvePackedStructLayout(&sema, ty),
.@"union" => Sema.type_resolution.resolvePackedUnionLayout(&sema, ty),
else => unreachable,
},
const result = switch (ty.zigTypeTag(zcu)) {
.@"enum" => Sema.type_resolution.resolveEnumLayout(&sema, ty),
.@"struct" => Sema.type_resolution.resolveStructLayout(&sema, ty),
.@"union" => Sema.type_resolution.resolveUnionLayout(&sema, ty),
else => unreachable,
};
result catch |err| switch (err) {
error.AnalysisFail => {
@ -1145,36 +1160,31 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void
sema.flushExports() catch |err| switch (err) {
error.OutOfMemory => |e| return e,
};
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
if (file.mod.?.strip) break :codegen_type;
zcu.comp.link_prog_node.increaseEstimatedTotalItems(1);
try zcu.comp.queueJob(.{ .link_type = ty.toIntern() });
}
}
/// Ensures that the default/tag values of the given `struct` or `enum` type are fully up-to-date,
/// performing re-analysis if necessary. Asserts that `ty` is a struct (not a tuple!) or an enum.
/// Returns `error.AnalysisFail` if an analysis error is encountered during resolution; the caller
/// is free to ignore this, since the error is already registered.
pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
/// Ensures that the default values of the given "declared" (not reified) `struct` type are fully
/// up-to-date, performing re-analysis if necessary. Asserts that `ty` is a struct (not tuple) type.
/// Returns `error.AnalysisFail` if an analysis error is encountered while resolving the default
/// field values; the caller is free to ignore this, since the error is already registered.
pub fn ensureStructDefaultsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
const tracy = trace(@src());
defer tracy.end();
const zcu = pt.zcu;
const gpa = zcu.gpa;
const anal_unit: AnalUnit = .wrap(.{ .type_inits = ty.toIntern() });
assert(ty.zigTypeTag(zcu) == .@"struct");
assert(!ty.isTuple(zcu));
log.debug("ensureTypeInitsUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
const anal_unit: AnalUnit = .wrap(.{ .struct_defaults = ty.toIntern() });
log.debug("ensureStructDefaultsUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
assert(!zcu.analysis_in_progress.contains(anal_unit));
// Determine whether or not this type is outdated. For this kind of `AnalUnit`, that's
// the only indicator as to whether or not analysis is required; when a struct/enum is
// first created, it's marked as outdated.
// MLUGG TODO: make that actually true, it's a good strategy here!
const was_outdated = zcu.outdated.swapRemove(anal_unit) or
zcu.potentially_outdated.swapRemove(anal_unit);
@ -1194,7 +1204,7 @@ pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
}
// For types, we already know that we have to invalidate all dependees.
// TODO: we actually *could* detect whether everything was the same. should we bother?
try zcu.markDependeeOutdated(.marked_po, .{ .type_inits = ty.toIntern() });
try zcu.markDependeeOutdated(.marked_po, .{ .struct_defaults = ty.toIntern() });
} else {
// We can trust the current information about this unit.
if (zcu.failed_analysis.contains(anal_unit)) return error.AnalysisFail;
@ -1236,12 +1246,7 @@ pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
};
defer sema.deinit();
const result = switch (ty.zigTypeTag(zcu)) {
.@"struct" => Sema.type_resolution.resolveStructDefaults(&sema, ty),
.@"enum" => Sema.type_resolution.resolveEnumValues(&sema, ty),
else => unreachable,
};
result catch |err| switch (err) {
Sema.type_resolution.resolveStructDefaults(&sema, ty) catch |err| switch (err) {
error.AnalysisFail => {
if (!zcu.failed_analysis.contains(anal_unit)) {
// If this unit caused the error, it would have an entry in `failed_analysis`.
@ -1270,20 +1275,6 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
const tracy = trace(@src());
defer tracy.end();
// TODO: document this elsewhere mlugg!
// For my own benefit, here's how a namespace update for a normal (non-file-root) type works:
// `const S = struct { ... };`
// We are adding or removing a declaration within this `struct`.
// * `S` registers a dependency on `.{ .src_hash = (declaration of S) }`
// * Any change to the `struct` body -- including changing a declaration -- invalidates this
// * `S` is re-analyzed, but notes:
// * there is an existing struct instance (at this `TrackedInst` with these captures)
// * the struct's resolution is up-to-date (because nothing about the fields changed)
// * so, it uses the same `struct`
// * but this doesn't stop it from updating the namespace!
// * we basically do `scanDecls`, updating the namespace as needed
// * so everyone lived happily ever after
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -3033,7 +3024,7 @@ fn analyzeFuncBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.Sem
const zir = file.zir.?;
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
func.setAnalyzed(ip, io);
if (func.analysisUnordered(ip).inferred_error_set) {
@ -3231,9 +3222,6 @@ fn analyzeFuncBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.Sem
func.setResolvedErrorSet(ip, io, ies.resolved);
}
// MLUGG TODO: i think this can go away and the assert move to the defer?
assert(zcu.analysis_in_progress.swapRemove(anal_unit));
try sema.flushExports();
defer {
@ -3835,7 +3823,6 @@ pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocat
/// declaration order.
pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Allocator.Error!Value {
const ip = &pt.zcu.intern_pool;
ty.assertHasInits(pt.zcu);
const enum_type = ip.loadEnumType(ty.toIntern());
assert(field_index < enum_type.field_names.len);
@ -3859,7 +3846,9 @@ pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Alloca
pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value {
if (std.debug.runtime_safety) {
assert(try ty.onePossibleValue(pt) == null);
if (try ty.onePossibleValue(pt)) |opv| {
assert(opv.isUndef(pt.zcu));
}
}
return .fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
}
@ -3941,7 +3930,10 @@ pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Ind
for (elems) |elem| {
if (!Value.fromInterned(elem).isUndef(pt.zcu)) break;
} else if (elems.len > 0) {
return pt.undefValue(ty); // all-undef
// All undef, so return an undef struct. However, don't use `undefValue`, because its
// non-OPV assertion can loop on `[1]@TypeOf(undefined)`: that type has an OPV of
// `.{undefined}`, which here we normalize to `undefined`.
return .fromInterned(try pt.intern(.{ .undef = ty.toIntern() }));
}
return .fromInterned(try pt.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@ -4096,19 +4088,6 @@ pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error!
return result.index;
}
// TODO: this shouldn't need a `PerThread`! Fix the signature of `Type.abiAlignment`.
// MLUGG TODO: that's done, move it!
pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPool.Alignment {
const zcu = pt.zcu;
const ty: Type, const alignment = switch (zcu.intern_pool.getNav(nav_index).status) {
.unresolved => unreachable,
.type_resolved => |r| .{ .fromInterned(r.type), r.alignment },
.fully_resolved => |r| .{ Value.fromInterned(r.val).typeOf(zcu), r.alignment },
};
if (alignment != .none) return alignment;
return ty.abiAlignment(zcu);
}
/// Given a namespace, re-scan its declarations from the type definition if they have not
/// yet been re-scanned on this update.
/// If the type declaration instruction has been lost, returns `error.AnalysisFail`.
@ -4393,7 +4372,7 @@ pub fn resolveTypeForCodegen(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
.struct_type => {
try pt.ensureTypeLayoutUpToDate(ty);
try pt.ensureTypeInitsUpToDate(ty);
try pt.ensureStructDefaultsUpToDate(ty);
},
.tuple_type => |tuple| for (0..tuple.types.len) |i| {
const field_is_comptime = tuple.values.get(ip)[i] != .none;
@ -4405,7 +4384,7 @@ pub fn resolveTypeForCodegen(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void {
},
.@"union" => try pt.ensureTypeLayoutUpToDate(ty),
.@"enum" => try pt.ensureTypeInitsUpToDate(ty),
.@"enum" => try pt.ensureTypeLayoutUpToDate(ty),
}
}
pub fn resolveValueTypesForCodegen(pt: Zcu.PerThread, val: Value) Zcu.SemaError!void {

View file

@ -347,7 +347,6 @@ pub fn generateSymbol(
.void => unreachable, // non-runtime value
.null => unreachable, // non-runtime value
.@"unreachable" => unreachable, // non-runtime value
.empty_tuple => return,
.false, .true => try w.writeByte(switch (simple_value) {
.false => 0,
.true => 1,
@ -1065,20 +1064,20 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
const elem_ty = ty.childType(zcu);
const ptr = ip.indexToKey(val.toIntern()).ptr;
if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset };
switch (ptr.base_addr) {
if (ptr.byte_offset == 0) switch (ptr.base_addr) {
.int => unreachable, // handled above
.nav => |nav| if (elem_ty.isFnOrHasRuntimeBits(zcu)) {
.nav => |nav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_nav = nav };
} else {
// Create the 0xaa bit pattern...
const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3);
// ...but align the pointer
const alignment = pt.navAlignment(nav);
const alignment = zcu.navAlignment(nav);
return .{ .immediate = alignment.forward(undef_ptr_bits) };
},
.uav => |uav| if (elem_ty.isFnOrHasRuntimeBits(zcu)) {
.uav => |uav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
return .{ .lea_uav = uav };
} else {
// Create the 0xaa bit pattern...
@ -1089,7 +1088,7 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo
},
else => {},
}
};
},
},
.int => {

View file

@ -6594,7 +6594,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte|
break :fill_byte .{ .constant = fill_byte };
}
switch (dst_ty.indexablePtrElem(zcu).abiSize(zcu)) {
switch (dst_ty.indexableElem(zcu).abiSize(zcu)) {
0 => unreachable,
1 => break :fill_byte .{ .value = bin_op.rhs },
2, 4, 8 => |size| {
@ -7217,7 +7217,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused;
const ty_nav = air.data(air.inst_index).ty_nav;
if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) {
false => {
try isel.nav_relocs.append(gpa, .{
.nav = ty_nav.nav,
@ -7240,7 +7240,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
});
try isel.emit(.adrp(ptr_ra.x(), 0));
},
} else try isel.movImmediate(ptr_ra.x(), isel.pt.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa));
} else try isel.movImmediate(ptr_ra.x(), zcu.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa));
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
@ -10738,7 +10738,7 @@ pub const Value = struct {
} }),
}),
.simple_value => |simple_value| switch (simple_value) {
.undefined, .void, .null, .empty_tuple, .@"unreachable" => unreachable,
.undefined, .void, .null, .@"unreachable" => unreachable,
.true => continue :constant_key .{ .int = .{
.ty = .bool_type,
.storage = .{ .u64 = 1 },
@ -10931,7 +10931,7 @@ pub const Value = struct {
.ptr => |ptr| {
assert(offset == 0 and size == 8);
break :free switch (ptr.base_addr) {
.nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
.nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) {
false => {
try isel.nav_relocs.append(zcu.gpa, .{
.nav = nav,
@ -10965,9 +10965,9 @@ pub const Value = struct {
},
} else continue :constant_key .{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = isel.pt.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) },
.storage = .{ .u64 = zcu.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) },
} },
.uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isFnOrHasRuntimeBits(zcu)) switch (true) {
.uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) {
false => {
try isel.uav_relocs.append(zcu.gpa, .{
.uav = uav,

View file

@ -789,7 +789,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const ptr_ty: Type = .fromInterned(uav.orig_ty);
if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) {
if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(w, .{ .undef = ptr_ty });
}
@ -862,7 +862,7 @@ pub const DeclGen = struct {
// Render an undefined pointer if we have a pointer to a zero-bit or comptime type.
const nav_ty: Type = .fromInterned(ip.getNav(owner_nav).typeOf(ip));
const ptr_ty = try pt.navPtrType(owner_nav);
if (!nav_ty.isFnOrHasRuntimeBits(zcu)) {
if (!nav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) {
return dg.writeCValue(w, .{ .undef = ptr_ty });
}
@ -1043,7 +1043,6 @@ pub const DeclGen = struct {
.undefined => unreachable,
.void => unreachable,
.null => unreachable,
.empty_tuple => unreachable,
.@"unreachable" => unreachable,
.false => try w.writeAll("false"),
@ -3077,7 +3076,7 @@ pub fn genDecl(o: *Object) Error!void {
const nav = ip.getNav(o.dg.pass.nav);
const nav_ty: Type = .fromInterned(nav.typeOf(ip));
if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return;
if (!nav_ty.hasRuntimeBits(zcu)) return;
switch (ip.indexToKey(nav.status.fully_resolved.val)) {
.@"extern" => |@"extern"| {
if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{
@ -3676,7 +3675,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.typeOfIndex(inst);
const ptr_ty = f.typeOf(bin_op.lhs);
const elem_has_bits = ptr_ty.indexablePtrElem(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const elem_has_bits = ptr_ty.indexableElem(zcu).hasRuntimeBitsIgnoreComptime(zcu);
const ptr = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
@ -3792,7 +3791,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
@ -3829,7 +3828,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const zcu = pt.zcu;
const inst_ty = f.typeOfIndex(inst);
const elem_ty = inst_ty.childType(zcu);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty };
if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty };
const local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(elem_ty, .complete),
@ -4502,7 +4501,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue {
const inst_ty = f.typeOfIndex(inst);
const inst_scalar_ty = inst_ty.scalarType(zcu);
const elem_ty = inst_scalar_ty.indexablePtrElem(zcu);
const elem_ty = inst_scalar_ty.indexableElem(zcu);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs);
const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete);
@ -7037,7 +7036,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index, function_paren: []const u8) !CV
try w.writeAll(", ");
try writeArrayLen(f, dest_ptr, dest_ty);
try w.writeAll(" * sizeof(");
try f.renderType(w, dest_ty.indexablePtrElem(zcu));
try f.renderType(w, dest_ty.indexableElem(zcu));
try w.writeAll("));");
try f.object.newline();

View file

@ -3725,7 +3725,6 @@ pub const Object = struct {
.undefined => unreachable, // non-runtime value
.void => unreachable, // non-runtime value
.null => unreachable, // non-runtime value
.empty_tuple => unreachable, // non-runtime value
.@"unreachable" => unreachable, // non-runtime value
.false => .false,
@ -4604,7 +4603,7 @@ pub const NavGen = struct {
_ = try o.resolveLlvmFunction(pt, owner_nav);
} else {
const variable_index = try o.resolveGlobalNav(pt, nav_index);
variable_index.setAlignment(pt.navAlignment(nav_index).toLlvm(), &o.builder);
variable_index.setAlignment(zcu.navAlignment(nav_index).toLlvm(), &o.builder);
if (resolved.@"linksection".toSlice(ip)) |section|
variable_index.setSection(try o.builder.string(section), &o.builder);
if (is_const) variable_index.setMutability(.constant, &o.builder);
@ -5953,7 +5952,7 @@ pub const FuncGen = struct {
return .none;
}
const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
const have_block_result = inst_ty.hasRuntimeBits(zcu);
var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 };
defer if (have_block_result) breaks.list.deinit(self.gpa);
@ -6000,7 +5999,7 @@ pub const FuncGen = struct {
// Add the values to the lists only if the break provides a value.
const operand_ty = self.typeOf(branch.operand);
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (operand_ty.hasRuntimeBits(zcu)) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@ -9581,7 +9580,7 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
const pointee_type = ptr_ty.childType(zcu);
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
if (!pointee_type.hasRuntimeBits(zcu))
return (try o.lowerPtrToVoid(pt, ptr_ty)).toValue();
const pointee_llvm_ty = try o.lowerType(pt, pointee_type);
@ -9595,7 +9594,7 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const ptr_ty = self.typeOfIndex(inst);
const ret_ty = ptr_ty.childType(zcu);
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu))
if (!ret_ty.hasRuntimeBits(zcu))
return (try o.lowerPtrToVoid(pt, ptr_ty)).toValue();
if (self.ret_ptr != .none) return self.ret_ptr;
const ret_llvm_ty = try o.lowerType(pt, ret_ty);
@ -9897,7 +9896,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType(zcu);
if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .none;
if (!operand_ty.hasRuntimeBits(zcu)) return .none;
const ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const llvm_abi_ty = try o.getAtomicAbiType(pt, operand_ty, false);
@ -11478,7 +11477,7 @@ pub const FuncGen = struct {
const zcu = pt.zcu;
const info = ptr_ty.ptrInfo(zcu);
const elem_ty = Type.fromInterned(info.child);
if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (!elem_ty.hasRuntimeBits(zcu)) {
return;
}
const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm();

View file

@ -2673,7 +2673,7 @@ fn genBinOp(
defer func.register_manager.unlockReg(tmp_lock);
// RISC-V has no immediate mul, so we copy the size to a temporary register
const elem_size = lhs_ty.indexablePtrElem(zcu).abiSize(zcu);
const elem_size = lhs_ty.indexableElem(zcu).abiSize(zcu);
const elem_size_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = elem_size });
try func.genBinOp(
@ -3913,7 +3913,7 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void {
const base_ptr_ty = func.typeOf(bin_op.lhs);
const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else result: {
const elem_ty = base_ptr_ty.indexablePtrElem(zcu);
const elem_ty = base_ptr_ty.indexableElem(zcu);
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
const base_ptr_mcv = try func.resolveInst(bin_op.lhs);
const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) {

View file

@ -821,7 +821,6 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id {
.undefined,
.void,
.null,
.empty_tuple,
.@"unreachable",
=> unreachable, // non-runtime values
@ -1150,7 +1149,7 @@ fn constantUavRef(
}
// const is_fn_body = decl_ty.zigTypeTag(zcu) == .@"fn";
if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (!uav_ty.hasRuntimeBits(zcu)) {
// Pointer to nothing - return undefined
return cg.module.constUndef(ty_id);
}
@ -1196,7 +1195,7 @@ fn constantNavRef(cg: *CodeGen, ty: Type, nav_index: InternPool.Nav.Index) !Id {
},
}
if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (!nav_ty.hasRuntimeBits(zcu)) {
// Pointer to nothing - return undefined.
return cg.module.constUndef(ty_id);
}
@ -4381,7 +4380,7 @@ fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
fn ptrElemPtr(cg: *CodeGen, ptr_ty: Type, ptr_id: Id, index_id: Id) !Id {
const zcu = cg.module.zcu;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.indexablePtrElem(zcu);
const elem_ty = ptr_ty.indexableElem(zcu);
const elem_ty_id = try cg.resolveType(elem_ty, .indirect);
const elem_ptr_ty_id = try cg.module.ptrType(elem_ty_id, cg.module.storageClass(ptr_ty.ptrAddressSpace(zcu)));
if (ptr_ty.isSinglePointer(zcu)) {
@ -5028,7 +5027,7 @@ fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index)
const gpa = cg.module.gpa;
const zcu = cg.module.zcu;
const ty = cg.typeOfIndex(inst);
const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu);
const have_block_result = ty.hasRuntimeBits(zcu);
const cf = switch (cg.control_flow) {
.structured => |*cf| cf,
@ -5166,7 +5165,7 @@ fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
switch (cg.control_flow) {
.structured => |*cf| {
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (operand_ty.hasRuntimeBits(zcu)) {
const operand_id = try cg.resolve(br.operand);
const block_result_var_id = cf.block_results.get(br.block_inst).?;
try cg.store(operand_ty, block_result_var_id, operand_id, .{});
@ -5177,7 +5176,7 @@ fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
},
.unstructured => |cf| {
const block = cf.blocks.get(br.block_inst).?;
if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (operand_ty.hasRuntimeBits(zcu)) {
const operand_id = try cg.resolve(br.operand);
// block_label should not be undefined here, lest there
// is a br or br_void in the function's body.

View file

@ -2099,7 +2099,7 @@ fn airRetPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const child_type = cg.typeOfIndex(inst).childType(zcu);
const result = result: {
if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
if (!child_type.hasRuntimeBits(zcu)) {
break :result try cg.allocStack(Type.usize); // create pointer to void
}
@ -3161,7 +3161,6 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
.undefined,
.void,
.null,
.empty_tuple,
.@"unreachable",
=> unreachable, // non-runtime values
.false, .true => return .{ .imm32 = switch (simple_value) {

View file

@ -104121,7 +104121,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.slice_elem_val, .ptr_elem_val => {
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
const res_ty = cg.typeOf(bin_op.lhs).indexablePtrElem(zcu);
const res_ty = cg.typeOf(bin_op.lhs).indexableElem(zcu);
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
try ops[0].toSlicePtr(cg);
var res: [1]Temp = undefined;
@ -188179,8 +188179,8 @@ const Select = struct {
.signed => false,
.unsigned => size.bitSize(cg.target) >= int_info.bits,
} else false,
.elem_size_is => |size| size == ty.indexablePtrElem(zcu).abiSize(zcu),
.po2_elem_size => std.math.isPowerOfTwo(ty.indexablePtrElem(zcu).abiSize(zcu)),
.elem_size_is => |size| size == ty.indexableElem(zcu).abiSize(zcu),
.po2_elem_size => std.math.isPowerOfTwo(ty.indexableElem(zcu).abiSize(zcu)),
};
}
};
@ -189941,9 +189941,9 @@ const Select = struct {
op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu),
@divExact(op.flags.base.size.bitSize(s.cg.target), 8),
)),
.elem_size => @intCast(op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.elem_size => @intCast(op.flags.base.ref.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size_mul_src1 => @intCast(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) *
Select.Operand.Ref.src1.valueOf(s).immediate),
.vector_index => switch (op.flags.base.ref.typeOf(s).ptrInfo(s.cg.pt.zcu).flags.vector_index) {
@ -189953,7 +189953,7 @@ const Select = struct {
.src1 => @intCast(Select.Operand.Ref.src1.valueOf(s).immediate),
.src1_sub_bit_size => @as(SignedImm, @intCast(Select.Operand.Ref.src1.valueOf(s).immediate)) -
@as(SignedImm, @intCast(s.cg.nonBoolScalarBitSize(op.flags.base.ref.typeOf(s)))),
.log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))),
.log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))),
.elem_mask => @as(u8, std.math.maxInt(u8)) >> @intCast(
8 - ((s.cg.unalignedSize(op.flags.base.ref.typeOf(s)) - 1) %
@divExact(op.flags.base.size.bitSize(s.cg.target), 8) + 1 >>

View file

@ -1552,7 +1552,7 @@ fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
const sec_si = try coff.navSection(zcu, nav.status.fully_resolved);
try coff.nodes.ensureUnusedCapacity(gpa, 1);
const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
.alignment = pt.navAlignment(nav_index).toStdMem(),
.alignment = zcu.navAlignment(nav_index).toStdMem(),
.moved = true,
});
coff.nodes.appendAssumeCapacity(.{ .nav = nmi });

View file

@ -1479,7 +1479,7 @@ fn updateTlv(
log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
const required_alignment = pt.navAlignment(nav_index);
const required_alignment = zcu.navAlignment(nav_index);
const sym = self.symbol(sym_index);
const esym = &self.symtab.items(.elf_sym)[sym.esym_index];

View file

@ -2906,7 +2906,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const sec_si = elf.navSection(ip, nav.status.fully_resolved);
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.alignment = pt.navAlignment(nav_index).toStdMem(),
.alignment = zcu.navAlignment(nav_index).toStdMem(),
.moved = true,
});
elf.nodes.appendAssumeCapacity(.{ .nav = nmi });

View file

@ -925,7 +925,7 @@ pub fn updateNav(
const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code);
if (isThreadlocal(macho_file, nav_index))
try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code)
try self.updateTlv(macho_file, zcu, nav_index, sym_index, sect_index, code)
else
try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code);
@ -1030,13 +1030,13 @@ fn updateNavCode(
fn updateTlv(
self: *ZigObject,
macho_file: *MachO,
pt: Zcu.PerThread,
zcu: *Zcu,
nav_index: InternPool.Nav.Index,
sym_index: Symbol.Index,
sect_index: u8,
code: []const u8,
) !void {
const ip = &pt.zcu.intern_pool;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
log.debug("updateTlv {f} (0x{x})", .{ nav.fqn.fmt(ip), nav_index });
@ -1045,7 +1045,7 @@ fn updateTlv(
const init_sym_index = try self.createTlvInitializer(
macho_file,
nav.fqn.toSlice(ip),
pt.navAlignment(nav_index),
zcu.navAlignment(nav_index),
sect_index,
code,
);

View file

@ -72,8 +72,13 @@ pub fn print(
.undef => try writer.writeAll("undefined"),
.simple_value => |simple_value| switch (simple_value) {
.void => try writer.writeAll("{}"),
.empty_tuple => try writer.writeAll(".{}"),
else => try writer.writeAll(@tagName(simple_value)),
.undefined,
.null,
.true,
.false,
.@"unreachable",
=> try writer.writeAll(@tagName(simple_value)),
},
.variable => try writer.writeAll("(variable)"),
.@"extern" => |e| try writer.print("(extern '{f}')", .{e.name.fmt(ip)}),
@ -248,17 +253,26 @@ fn printAggregate(
const len = ty.arrayLen(zcu);
if (is_ref) try writer.writeByte('&');
try writer.writeAll(".{ ");
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
switch (len) {
0 => try writer.writeAll(".{}"),
1 => {
try writer.writeAll(".{");
try print(try val.fieldValue(pt, 0), writer, level - 1, pt, opt_sema);
try writer.writeByte('}');
},
else => {
try writer.writeAll(".{ ");
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
try writer.writeAll(" }");
},
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
fn printPtr(

View file

@ -1439,10 +1439,10 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(struct_decl.name_strategy)});
if (struct_decl.backing_int_type != .none) {
if (struct_decl.backing_int_type_body) |backing_int_type_body| {
assert(struct_decl.layout == .@"packed");
try stream.writeAll("packed(");
try self.writeInstRef(stream, struct_decl.backing_int_type);
try self.writeBracedDecl(stream, backing_int_type_body);
try stream.writeAll("), ");
} else {
try stream.print("{s}, ", .{@tagName(struct_decl.layout)});
@ -1507,18 +1507,18 @@ const Writer = struct {
.@"packed" => try stream.writeAll("packed, "),
.packed_explicit => {
try stream.writeAll("packed(");
try self.writeInstRef(stream, union_decl.arg_type);
try self.writeBracedDecl(stream, union_decl.arg_type_body.?);
try stream.writeAll("), ");
},
.tagged_explicit => {
try stream.writeAll("auto(");
try self.writeInstRef(stream, union_decl.arg_type);
try stream.writeAll("tagged(");
try self.writeBracedDecl(stream, union_decl.arg_type_body.?);
try stream.writeAll("), ");
},
.tagged_enum => try stream.writeAll("auto(enum)"),
.tagged_enum => try stream.writeAll("tagged(enum), "),
.tagged_enum_explicit => {
try stream.writeAll("auto(enum(");
try self.writeInstRef(stream, union_decl.arg_type);
try stream.writeAll("tagged(enum(");
try self.writeBracedDecl(stream, union_decl.arg_type_body.?);
try stream.writeAll(")), ");
},
}
@ -1577,7 +1577,11 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(enum_decl.name_strategy)});
try self.writeFlag(stream, "nonexhaustive, ", enum_decl.nonexhaustive);
try self.writeInstRef(stream, enum_decl.tag_type);
if (enum_decl.tag_type_body) |tag_type_body| {
try stream.writeAll("tag(");
try self.writeBracedDecl(stream, tag_type_body);
try stream.writeAll("), ");
}
try self.writeCaptures(stream, enum_decl.captures, enum_decl.capture_names);
try stream.writeAll(", ");
@ -1585,9 +1589,9 @@ const Writer = struct {
try stream.writeAll(", ");
if (enum_decl.field_names.len == 0) {
try stream.writeAll(", {}) ");
try stream.writeAll("{}) ");
} else {
try stream.writeAll(", {\n");
try stream.writeAll("{\n");
self.indent += 2;
var it = enum_decl.iterateFields();