diff --git a/lib/std/zig.zig b/lib/std/zig.zig index ba799c650e..26abf81a11 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -837,6 +837,10 @@ pub const SimpleComptimeReason = enum(u32) { tuple_field_types, enum_field_names, enum_field_values, + union_enum_tag_type, + enum_int_tag_type, + packed_struct_backing_int_type, + packed_union_backing_int_type, // Evaluating at comptime because decl/field name must be comptime-known. decl_name, @@ -925,6 +929,11 @@ pub const SimpleComptimeReason = enum(u32) { .enum_field_names => "enum field names must be comptime-known", .enum_field_values => "enum field values must be comptime-known", + .union_enum_tag_type => "enum tag type of union must be comptime-known", + .enum_int_tag_type => "integer tag type of enum must be comptime-known", + .packed_struct_backing_int_type => "packed struct backing integer type must be comptime-known", + .packed_union_backing_int_type => "packed struct backing integer type must be comptime-known", + .decl_name => "declaration name must be comptime-known", .field_name => "field name must be comptime-known", .tuple_field_index => "tuple field index must be comptime-known", diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 6a6585192d..046330c46b 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -4922,24 +4922,14 @@ fn structDeclInner( astgen.advanceSourceCursorToNode(node); - const backing_int_type_ref: Zir.Inst.Ref = ty: { - const backing_int_node = maybe_backing_int_node.unwrap() orelse break :ty .none; - if (layout != .@"packed") return astgen.failNode( - backing_int_node, - "non-packed struct does not support backing integer type", - .{}, - ); - break :ty try typeExpr(gz, scope, backing_int_node); - }; - const decl_inst = try gz.reserveInstructionIndex(); - if (container_decl.ast.members.len == 0 and backing_int_type_ref == .none) { + if (container_decl.ast.members.len == 0 and maybe_backing_int_node == .none) { try gz.setStruct(decl_inst, .{ .src_node = node, .name_strat = name_strat, .layout = layout, - .backing_int_type = .none, + .backing_int_type_body_len = null, .decls_len = 0, .fields_len = 0, .any_field_aligns = false, @@ -4993,6 +4983,22 @@ fn structDeclInner( ); if (field_comptime_bits) |bits| @memset(bits.get(astgen), 0); + // Before any field bodies comes the backing int type, if specified. + const backing_int_type_body_len: ?u32 = if (maybe_backing_int_node.unwrap()) |backing_int_node| len: { + if (layout != .@"packed") return astgen.failNode( + backing_int_node, + "non-packed struct does not support backing integer type", + .{}, + ); + const type_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, type_ref); + } + const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice()); + block_scope.instructions.items.len = block_scope.instructions_top; + break :len body_len; + } else null; + const old_hasher = astgen.src_hasher; defer astgen.src_hasher = old_hasher; astgen.src_hasher = .init(.{}); @@ -5076,7 +5082,7 @@ fn structDeclInner( .src_node = node, .name_strat = name_strat, .layout = layout, - .backing_int_type = backing_int_type_ref, + .backing_int_type_body_len = backing_int_type_body_len, .decls_len = scan_result.decls_len, .fields_len = scan_result.fields_len, .any_field_aligns = scan_result.any_field_aligns, @@ -5220,11 +5226,6 @@ fn unionDeclInner( astgen.advanceSourceCursorToNode(node); - const arg_type_ref: Zir.Inst.Ref = ref: { - const arg_node = opt_arg_node.unwrap() orelse break :ref .none; - break :ref try typeExpr(gz, scope, arg_node); - }; - const decl_inst = try gz.reserveInstructionIndex(); var namespace: Scope.Namespace = .{ @@ -5262,6 +5263,17 @@ fn unionDeclInner( const field_align_body_lens = try scratch.addOptionalSlice(scan_result.any_field_aligns, scan_result.fields_len); const field_value_body_lens = try scratch.addOptionalSlice(scan_result.any_field_values, scan_result.fields_len); + // Before any field bodies comes the tag/backing type, if specified. + const arg_type_body_len: ?u32 = if (opt_arg_node.unwrap()) |arg_node| len: { + const type_ref = try typeExpr(&block_scope, &namespace.base, arg_node); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, type_ref); + } + const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice()); + block_scope.instructions.items.len = block_scope.instructions_top; + break :len body_len; + } else null; + const old_hasher = astgen.src_hasher; defer astgen.src_hasher = old_hasher; astgen.src_hasher = .init(.{}); @@ -5358,7 +5370,7 @@ fn unionDeclInner( .@"extern" => .@"extern", .@"packed" => if (opt_arg_node != .none) .packed_explicit else .@"packed", }, - .arg_type = arg_type_ref, + .arg_type_body_len = arg_type_body_len, .decls_len = scan_result.decls_len, .fields_len = scan_result.fields_len, .any_field_aligns = scan_result.any_field_aligns, @@ -5420,11 +5432,6 @@ fn containerDecl( astgen.advanceSourceCursorToNode(node); - const tag_type_ref: Zir.Inst.Ref = ref: { - const arg_node = container_decl.ast.arg.unwrap() orelse break :ref .none; - break :ref try typeExpr(gz, scope, arg_node); - }; - const decl_inst = try gz.reserveInstructionIndex(); var namespace: Scope.Namespace = .{ @@ -5461,6 +5468,17 @@ fn containerDecl( const field_names = try scratch.addSlice(fields_len); const field_value_body_lens = try scratch.addOptionalSlice(scan_result.any_field_values, fields_len); + // Before any field bodies comes the tag type, if specified. + const tag_type_body_len: ?u32 = if (container_decl.ast.arg.unwrap()) |tag_type_node| len: { + const type_ref = try typeExpr(&block_scope, &namespace.base, tag_type_node); + if (!block_scope.endsWithNoReturn()) { + _ = try block_scope.addBreak(.break_inline, decl_inst, type_ref); + } + const body_len = try scratch.appendBodyWithFixups(block_scope.instructionsSlice()); + block_scope.instructions.items.len = block_scope.instructions_top; + break :len body_len; + } else null; + const old_hasher = astgen.src_hasher; defer astgen.src_hasher = old_hasher; astgen.src_hasher = .init(.{}); @@ -5508,7 +5526,7 @@ fn containerDecl( field_names.get(astgen)[field_idx] = @intFromEnum(try astgen.identAsString(member.ast.main_token)); if (member.ast.value_expr.unwrap()) |value_node| { - if (tag_type_ref == .none) { + if (tag_type_body_len == null) { return astgen.failNodeNotes(node, "explicitly valued enum missing integer tag type", .{}, &.{ try astgen.errNoteNode(value_node, "tag value specified here", .{}), }); @@ -5535,7 +5553,7 @@ fn containerDecl( try gz.setEnum(decl_inst, .{ .src_node = node, .name_strat = name_strat, - .tag_type = tag_type_ref, + .tag_type_body_len = tag_type_body_len, .nonexhaustive = scan_result.has_underscore_field, .decls_len = scan_result.decls_len, .fields_len = fields_len, @@ -12406,7 +12424,7 @@ const GenZir = struct { src_node: Ast.Node.Index, name_strat: Zir.Inst.NameStrategy, layout: std.builtin.Type.ContainerLayout, - backing_int_type: Zir.Inst.Ref, + backing_int_type_body_len: ?u32, decls_len: u32, fields_len: u32, any_field_aligns: bool, @@ -12430,7 +12448,7 @@ const GenZir = struct { const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).@"struct".fields.len + - 4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type` + 4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type_body_len` captures_len * 2 + // `capture`, `capture_name` args.remaining.len); @@ -12446,7 +12464,7 @@ const GenZir = struct { if (captures_len != 0) astgen.extra.appendAssumeCapacity(captures_len); if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len); if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len); - if (args.backing_int_type != .none) astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_type)); + if (args.backing_int_type_body_len) |n| astgen.extra.appendAssumeCapacity(n); astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures)); astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names)); astgen.extra.appendSliceAssumeCapacity(args.remaining); @@ -12461,7 +12479,7 @@ const GenZir = struct { .has_fields_len = args.fields_len != 0, .name_strategy = args.name_strat, .layout = args.layout, - .has_backing_int_type = args.backing_int_type != .none, + .has_backing_int_type = args.backing_int_type_body_len != null, .any_field_aligns = args.any_field_aligns, .any_field_defaults = args.any_field_defaults, .any_comptime_fields = args.any_comptime_fields, @@ -12475,7 +12493,7 @@ const GenZir = struct { src_node: Ast.Node.Index, name_strat: Zir.Inst.NameStrategy, kind: Zir.Inst.UnionDecl.Kind, - arg_type: Zir.Inst.Ref, + arg_type_body_len: ?u32, decls_len: u32, fields_len: u32, any_field_aligns: bool, @@ -12497,7 +12515,7 @@ const GenZir = struct { const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).@"struct".fields.len + - 4 + // `captures_len`, `decls_len`, `fields_len`, `backing_int_type` + 4 + // `captures_len`, `decls_len`, `fields_len`, `arg_type_body_len` captures_len * 2 + // `capture`, `capture_name` args.remaining.len); @@ -12514,10 +12532,9 @@ const GenZir = struct { if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len); if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len); if (args.kind.hasArgType()) { - assert(args.arg_type != .none); - astgen.extra.appendAssumeCapacity(@intFromEnum(args.arg_type)); + astgen.extra.appendAssumeCapacity(args.arg_type_body_len.?); } else { - assert(args.arg_type == .none); + assert(args.arg_type_body_len == null); } astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures)); astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names)); @@ -12525,28 +12542,26 @@ const GenZir = struct { astgen.instructions.set(@intFromEnum(inst), .{ .tag = .extended, - .data = .{ - .extended = .{ - .opcode = .union_decl, - .small = @bitCast(Zir.Inst.UnionDecl.Small{ - .has_captures_len = captures_len != 0, - .has_decls_len = args.decls_len != 0, - .has_fields_len = args.fields_len != 0, - .name_strategy = args.name_strat, - .kind = args.kind, - .any_field_aligns = args.any_field_aligns, - .any_field_values = args.any_field_values, - }), - .operand = payload_index, - }, - }, + .data = .{ .extended = .{ + .opcode = .union_decl, + .small = @bitCast(Zir.Inst.UnionDecl.Small{ + .has_captures_len = captures_len != 0, + .has_decls_len = args.decls_len != 0, + .has_fields_len = args.fields_len != 0, + .name_strategy = args.name_strat, + .kind = args.kind, + .any_field_aligns = args.any_field_aligns, + .any_field_values = args.any_field_values, + }), + .operand = payload_index, + } }, }); } fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct { src_node: Ast.Node.Index, name_strat: Zir.Inst.NameStrategy, - tag_type: Zir.Inst.Ref, + tag_type_body_len: ?u32, nonexhaustive: bool, decls_len: u32, fields_len: u32, @@ -12568,7 +12583,7 @@ const GenZir = struct { const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash); try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).@"struct".fields.len + - 4 + // `captures_len`, `decls_len`, `fields_len`, `tag_type` + 4 + // `captures_len`, `decls_len`, `fields_len`, `tag_type_body_len` captures_len * 2 + // `capture`, `capture_name` args.remaining.len); @@ -12584,7 +12599,7 @@ const GenZir = struct { if (captures_len != 0) astgen.extra.appendAssumeCapacity(captures_len); if (args.decls_len != 0) astgen.extra.appendAssumeCapacity(args.decls_len); if (args.fields_len != 0) astgen.extra.appendAssumeCapacity(args.fields_len); - if (args.tag_type != .none) astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type)); + if (args.tag_type_body_len) |n| astgen.extra.appendAssumeCapacity(n); astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.captures)); astgen.extra.appendSliceAssumeCapacity(@ptrCast(args.capture_names)); astgen.extra.appendSliceAssumeCapacity(args.remaining); @@ -12598,7 +12613,7 @@ const GenZir = struct { .has_decls_len = args.decls_len != 0, .has_fields_len = args.fields_len != 0, .name_strategy = args.name_strat, - .has_tag_type = args.tag_type != .none, + .has_tag_type = args.tag_type_body_len != null, .nonexhaustive = args.nonexhaustive, .any_field_values = args.any_field_values, }), diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index c0270a9e03..94c76b429c 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -3465,7 +3465,7 @@ pub const Inst = struct { /// 0. captures_len: u32 // if `has_captures_len` /// 1. decls_len: u32, // if `has_decls_len` /// 2. fields_len: u32, // if `has_fields_len` - /// 3. backing_int_type: Ref // if `has_backing_int` + /// 3. backing_int_body_len: u32 // if `has_backing_int` /// 4. capture: Capture // for every `captures_len` /// 5. capture_name: NullTerminatedString // for every `captures_len` /// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction @@ -3475,7 +3475,8 @@ pub const Inst = struct { /// 10. field_default_body_len: u32 // for every `fields_len` if `any_field_defaults` /// 11. field_comptime_bits: u32 // one bit per `fields_len` if `any_comptime_fields` /// // LSB is first field, minimum number of `u32` needed - /// 12. body_inst: Inst.Index // type body, then align body, then default body, for each field + /// 12. backing_int_body_inst: Inst.Index // for each `backing_int_body_len` + /// 13. body_inst: Inst.Index // type body, then align body, then default body, for each field pub const StructDecl = struct { // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. // This hash contains the source of all fields, and any specified attributes (`extern`, backing type, etc). @@ -3622,13 +3623,14 @@ pub const Inst = struct { /// 0. captures_len: u32, // if has_captures_len /// 1. decls_len: u32, // if has_decls_len /// 2. fields_len: u32, // if has_fields_len - /// 3. tag_type: Ref, // if has_tag_type + /// 3. tag_type_body_len: u32, // if has_tag_type /// 4. capture: Capture // for every `captures_len` /// 5. capture_name: NullTerminatedString // for every `captures_len` /// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction /// 7. field_name: NullTerminatedString // for every `fields_len` /// 8. field_value_body_len: u32 // for every `fields_len` if `any_field_values` - /// 9. body_inst: Inst.Index // value body for each field + /// 9. tag_type_body_inst: Inst.Index // for each `tag_type_body_len` + /// 10. body_inst: Inst.Index // value body for each field pub const EnumDecl = struct { // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. // This hash contains the source of all fields, and the backing type if specified. @@ -3656,7 +3658,7 @@ pub const Inst = struct { /// 0. captures_len: u32 // if `has_captures_len` /// 1. decls_len: u32, // if `has_decls_len` /// 2. fields_len: u32, // if `has_fields_len` - /// 3. arg_type: Ref, // if `kind.hasArgType()` + /// 3. arg_type_body_len: u32, // if `kind.hasArgType()` /// 4. capture: Capture // for every `captures_len` /// 5. capture_name: NullTerminatedString // for every `captures_len` /// 6. decl: Index, // for every `decls_len`; points to a `declaration` instruction @@ -3664,7 +3666,8 @@ pub const Inst = struct { /// 8. field_type_body_len: u32 // for every `fields_len` /// 9 . field_align_body_len: u32 // for every `fields_len` if `any_field_aligns` /// 10. field_value_body_len: u32 // for every `fields_len` if `any_field_values` - /// 11. body_inst: Inst.Index // type body, then align body, then value body, for each field + /// 11. arg_type_body_inst: Inst.Index // for each `arg_type_body_len` + /// 12. body_inst: Inst.Index // type body, then align body, then value body, for each field pub const UnionDecl = struct { // These fields should be concatenated and reinterpreted as a `std.zig.SrcHash`. // This hash contains the source of all fields, and any specified attributes (`extern` etc). @@ -5235,18 +5238,6 @@ pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void { } } -/// MLUGG TODO: maybe delete these two? -pub fn typeCapturesLen(zir: Zir, type_decl: Inst.Index) u32 { - const inst = zir.instructions.get(@intFromEnum(type_decl)); - assert(inst.tag == .extended); - return switch (inst.data.extended.opcode) { - .struct_decl => @intCast(zir.getStructDecl(type_decl).captures.len), - .union_decl => @intCast(zir.getUnionDecl(type_decl).captures.len), - .enum_decl => @intCast(zir.getEnumDecl(type_decl).captures.len), - .opaque_decl => @intCast(zir.getOpaqueDecl(type_decl).captures.len), - else => unreachable, - }; -} pub fn typeDecls(zir: Zir, type_decl: Inst.Index) []const Zir.Inst.Index { const inst = zir.instructions.get(@intFromEnum(type_decl)); assert(inst.tag == .extended); @@ -5281,11 +5272,11 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe extra_index += 1; break :blk fields_len; } else 0; - const backing_int_type: Inst.Ref = if (small.has_backing_int_type) ty: { - const ty = zir.extra[extra_index]; + const backing_int_type_body_len: u32 = if (small.has_backing_int_type) len: { + const body_len = zir.extra[extra_index]; extra_index += 1; - break :ty @enumFromInt(ty); - } else .none; + break :len body_len; + } else 0; const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]); extra_index += captures_len; const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]); @@ -5312,6 +5303,11 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe extra_index += bits_len; break :bits bits; } else null; + const backing_int_type_body: ?[]const Zir.Inst.Index = switch (backing_int_type_body_len) { + 0 => null, + else => |n| zir.bodySlice(extra_index, n), + }; + extra_index += backing_int_type_body_len; const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]); return .{ .src_line = extra.data.src_line, @@ -5321,7 +5317,7 @@ pub fn getStructDecl(zir: *const Zir, struct_decl: Inst.Index) UnwrappedStructDe .capture_names = capture_names, .decls = decls, .layout = small.layout, - .backing_int_type = backing_int_type, + .backing_int_type_body = backing_int_type_body, .field_names = field_names, .field_type_body_lens = field_type_body_lens, .field_align_body_lens = field_align_body_lens, @@ -5341,7 +5337,7 @@ pub const UnwrappedStructDecl = struct { decls: []const Inst.Index, layout: std.builtin.Type.ContainerLayout, - backing_int_type: Inst.Ref, + backing_int_type_body: ?[]const Inst.Index, field_names: []const NullTerminatedString, field_type_body_lens: []const u32, @@ -5427,11 +5423,11 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl extra_index += 1; break :blk fields_len; } else 0; - const arg_type: Inst.Ref = if (small.kind.hasArgType()) ty: { - const ty = zir.extra[extra_index]; + const arg_type_body_len: u32 = if (small.kind.hasArgType()) len: { + const body_len = zir.extra[extra_index]; extra_index += 1; - break :ty @enumFromInt(ty); - } else .none; + break :len body_len; + } else 0; const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]); extra_index += captures_len; const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]); @@ -5452,6 +5448,11 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl extra_index += fields_len; break :lens @ptrCast(lens); } else null; + const arg_type_body: ?[]const Zir.Inst.Index = switch (arg_type_body_len) { + 0 => null, + else => |n| zir.bodySlice(extra_index, n), + }; + extra_index += arg_type_body_len; const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]); return .{ .src_line = extra.data.src_line, @@ -5461,7 +5462,7 @@ pub fn getUnionDecl(zir: *const Zir, union_decl: Inst.Index) UnwrappedUnionDecl .capture_names = capture_names, .decls = decls, .kind = small.kind, - .arg_type = arg_type, + .arg_type_body = arg_type_body, .field_names = field_names, .field_type_body_lens = field_type_body_lens, .field_align_body_lens = field_align_body_lens, @@ -5480,7 +5481,7 @@ pub const UnwrappedUnionDecl = struct { decls: []const Inst.Index, kind: Inst.UnionDecl.Kind, - arg_type: Inst.Ref, + arg_type_body: ?[]const Inst.Index, field_names: []const NullTerminatedString, field_type_body_lens: []const u32, @@ -5556,11 +5557,11 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl { extra_index += 1; break :blk fields_len; } else 0; - const tag_type: Inst.Ref = if (small.has_tag_type) ty: { - const ty = zir.extra[extra_index]; + const tag_type_body_len: u32 = if (small.has_tag_type) len: { + const body_len = zir.extra[extra_index]; extra_index += 1; - break :ty @enumFromInt(ty); - } else .none; + break :len body_len; + } else 0; const captures: []const Inst.Capture = @ptrCast(zir.extra[extra_index..][0..captures_len]); extra_index += captures_len; const capture_names: []const NullTerminatedString = @ptrCast(zir.extra[extra_index..][0..captures_len]); @@ -5574,6 +5575,11 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl { extra_index += fields_len; break :lens @ptrCast(lens); } else null; + const tag_type_body: ?[]const Zir.Inst.Index = switch (tag_type_body_len) { + 0 => null, + else => |n| zir.bodySlice(extra_index, n), + }; + extra_index += tag_type_body_len; const field_bodies_overlong: []const Inst.Index = @ptrCast(zir.extra[extra_index..]); return .{ .src_line = extra.data.src_line, @@ -5582,7 +5588,7 @@ pub fn getEnumDecl(zir: *const Zir, enum_decl: Inst.Index) UnwrappedEnumDecl { .captures = captures, .capture_names = capture_names, .decls = decls, - .tag_type = tag_type, + .tag_type_body = tag_type_body, .nonexhaustive = small.nonexhaustive, .field_names = field_names, .field_value_body_lens = field_value_body_lens, @@ -5599,7 +5605,7 @@ pub const UnwrappedEnumDecl = struct { decls: []const Inst.Index, - tag_type: Inst.Ref, + tag_type_body: ?[]const Inst.Index, nonexhaustive: bool, field_names: []const NullTerminatedString, diff --git a/src/Compilation.zig b/src/Compilation.zig index 4c033b32f8..9b6b3fa250 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3713,7 +3713,7 @@ const Header = extern struct { nav_val_deps_len: u32, nav_ty_deps_len: u32, type_layout_deps_len: u32, - type_inits_deps_len: u32, + struct_defaults_deps_len: u32, func_ies_deps_len: u32, zon_file_deps_len: u32, embed_file_deps_len: u32, @@ -3763,7 +3763,7 @@ pub fn saveState(comp: *Compilation) !void { .nav_val_deps_len = @intCast(ip.nav_val_deps.count()), .nav_ty_deps_len = @intCast(ip.nav_ty_deps.count()), .type_layout_deps_len = @intCast(ip.type_layout_deps.count()), - .type_inits_deps_len = @intCast(ip.type_inits_deps.count()), + .struct_defaults_deps_len = @intCast(ip.struct_defaults_deps.count()), .func_ies_deps_len = @intCast(ip.func_ies_deps.count()), .zon_file_deps_len = @intCast(ip.zon_file_deps.count()), .embed_file_deps_len = @intCast(ip.embed_file_deps.count()), @@ -3800,8 +3800,8 @@ pub fn saveState(comp: *Compilation) !void { addBuf(&bufs, @ptrCast(ip.nav_ty_deps.values())); addBuf(&bufs, @ptrCast(ip.type_layout_deps.keys())); addBuf(&bufs, @ptrCast(ip.type_layout_deps.values())); - addBuf(&bufs, @ptrCast(ip.type_inits_deps.keys())); - addBuf(&bufs, @ptrCast(ip.type_inits_deps.values())); + addBuf(&bufs, @ptrCast(ip.struct_defaults_deps.keys())); + addBuf(&bufs, @ptrCast(ip.struct_defaults_deps.values())); addBuf(&bufs, @ptrCast(ip.func_ies_deps.keys())); addBuf(&bufs, @ptrCast(ip.func_ies_deps.values())); addBuf(&bufs, @ptrCast(ip.zon_file_deps.keys())); @@ -4481,7 +4481,7 @@ pub fn addModuleErrorMsg( const root_name: ?[]const u8 = switch (ref.referencer.unwrap()) { .@"comptime" => "comptime", .nav_val, .nav_ty => |nav| ip.getNav(nav).name.toSlice(ip), - .type_layout, .type_inits => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), + .type_layout, .struct_defaults => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip), .func => |f| ip.getNav(zcu.funcInfo(f).owner_nav).name.toSlice(ip), .memoized_state => null, }; @@ -5251,7 +5251,7 @@ fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!v .nav_ty => |nav| pt.ensureNavTypeUpToDate(nav), .nav_val => |nav| pt.ensureNavValUpToDate(nav), .type_layout => |ty| pt.ensureTypeLayoutUpToDate(.fromInterned(ty)), - .type_inits => |ty| pt.ensureTypeInitsUpToDate(.fromInterned(ty)), + .struct_defaults => |ty| pt.ensureStructDefaultsUpToDate(.fromInterned(ty)), .memoized_state => |stage| pt.ensureMemoizedStateUpToDate(stage), .func => |func| pt.ensureFuncBodyUpToDate(func), }; diff --git a/src/IncrementalDebugServer.zig b/src/IncrementalDebugServer.zig index ce40844057..7d0dc8e89b 100644 --- a/src/IncrementalDebugServer.zig +++ b/src/IncrementalDebugServer.zig @@ -307,7 +307,7 @@ fn handleCommand(zcu: *Zcu, w: *Io.Writer, cmd_str: []const u8, arg_str: []const switch (dependee) { .src_hash, .namespace, .namespace_name, .zon_file, .embed_file => try w.print("{f}", .{zcu.fmtDependee(dependee)}), .nav_val, .nav_ty => |nav| try w.print("{t} {d}", .{ dependee, @intFromEnum(nav) }), - .type_layout, .type_inits, .func_ies => |ip_index| try w.print("{t} {d}", .{ dependee, @intFromEnum(ip_index) }), + .type_layout, .struct_defaults, .func_ies => |ip_index| try w.print("{t} {d}", .{ dependee, @intFromEnum(ip_index) }), .memoized_state => |stage| try w.print("memoized_state {s}", .{@tagName(stage)}), } try w.writeByte('\n'); @@ -374,8 +374,8 @@ fn parseAnalUnit(str: []const u8) ?AnalUnit { return .wrap(.{ .nav_ty = @enumFromInt(parseIndex(idx_str) orelse return null) }); } else if (std.mem.eql(u8, kind, "type_layout")) { return .wrap(.{ .type_layout = @enumFromInt(parseIndex(idx_str) orelse return null) }); - } else if (std.mem.eql(u8, kind, "type_inits")) { - return .wrap(.{ .type_inits = @enumFromInt(parseIndex(idx_str) orelse return null) }); + } else if (std.mem.eql(u8, kind, "struct_defaults")) { + return .wrap(.{ .struct_defaults = @enumFromInt(parseIndex(idx_str) orelse return null) }); } else if (std.mem.eql(u8, kind, "func")) { return .wrap(.{ .func = @enumFromInt(parseIndex(idx_str) orelse return null) }); } else if (std.mem.eql(u8, kind, "memoized_state")) { diff --git a/src/InternPool.zig b/src/InternPool.zig index 28846c3f70..ce9622e54d 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -50,12 +50,12 @@ nav_ty_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index), /// Dependencies on a function's inferred error set. Key is the function body, not the IES. /// Value is index into `dep_entries` of the first dependency on this function's IES. func_ies_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), -/// Dependencies on the resolved layout of a `struct` or `union` type. +/// Dependencies on the resolved layout of a `struct`, `union`, or `enum` type. /// Value is index into `dep_entries` of the first dependency on this type's layout. type_layout_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), -/// Dependencies on the resolved initializers of a `struct` or `enum` type. +/// Dependencies on the resolved default field values of a `struct` type. /// Value is index into `dep_entries` of the first dependency on this type's inits. -type_inits_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), +struct_defaults_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), /// Dependencies on a ZON file. Triggered by `@import` of ZON. /// Value is index into `dep_entries` of the first dependency on this ZON file. zon_file_deps: std.AutoArrayHashMapUnmanaged(FileIndex, DepEntry.Index), @@ -110,7 +110,7 @@ pub const empty: InternPool = .{ .nav_ty_deps = .empty, .func_ies_deps = .empty, .type_layout_deps = .empty, - .type_inits_deps = .empty, + .struct_defaults_deps = .empty, .zon_file_deps = .empty, .embed_file_deps = .empty, .namespace_deps = .empty, @@ -422,7 +422,7 @@ pub const AnalUnit = packed struct(u64) { nav_val, nav_ty, type_layout, - type_inits, + struct_defaults, func, memoized_state, }; @@ -434,11 +434,10 @@ pub const AnalUnit = packed struct(u64) { nav_val: Nav.Index, /// This `AnalUnit` resolves the type of the given `Nav`. nav_ty: Nav.Index, - /// This `AnalUnit` resolves the layout of the given `struct` or `union` type. + /// This `AnalUnit` resolves the layout of the given `struct`, `union`, or `enum` type. type_layout: InternPool.Index, - /// This `AnalUnit` resolves the field inits of the given `struct` or `enum` type. - /// The type may be a union's auto-generated tag enum, if the union has explicit field values. - type_inits: InternPool.Index, + /// This `AnalUnit` resolves the default field values of the given `struct` type. + struct_defaults: InternPool.Index, /// This `AnalUnit` analyzes the body of the given runtime function. func: InternPool.Index, /// This `AnalUnit` resolves all state which is memoized in fields on `Zcu`. @@ -852,7 +851,7 @@ pub const Dependee = union(enum) { /// Index is the function, not its IES. func_ies: Index, type_layout: Index, - type_inits: Index, + struct_defaults: Index, zon_file: FileIndex, embed_file: Zcu.EmbedFile.Index, namespace: TrackedInst.Index, @@ -906,7 +905,7 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI .nav_ty => |x| ip.nav_ty_deps.get(x), .func_ies => |x| ip.func_ies_deps.get(x), .type_layout => |x| ip.type_layout_deps.get(x), - .type_inits => |x| ip.type_inits_deps.get(x), + .struct_defaults => |x| ip.struct_defaults_deps.get(x), .zon_file => |x| ip.zon_file_deps.get(x), .embed_file => |x| ip.embed_file_deps.get(x), .namespace => |x| ip.namespace_deps.get(x), @@ -981,7 +980,7 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend .nav_ty => ip.nav_ty_deps, .func_ies => ip.func_ies_deps, .type_layout => ip.type_layout_deps, - .type_inits => ip.type_inits_deps, + .struct_defaults => ip.struct_defaults_deps, .zon_file => ip.zon_file_deps, .embed_file => ip.embed_file_deps, .namespace => ip.namespace_deps, @@ -2248,10 +2247,6 @@ pub const Key = union(enum) { pub const Declared = struct { /// A `struct_decl`, `union_decl`, `enum_decl`, or `opaque_decl` instruction. zir_index: TrackedInst.Index, - /// If the type declaration had an argument type (tag type or packed backing type), this - /// is that type. Otherwise, this is `.none`. It is always `.none` for `opaque` types as - /// `opaque(T)` does not exist. - arg_ty: Index, /// The captured values of this type. These values must be fully resolved per the language spec. captures: union(enum) { owned: CaptureValue.Slice, @@ -2745,7 +2740,6 @@ pub const Key = union(enum) { switch (namespace_type) { .declared => |declared| { std.hash.autoHash(&hasher, declared.zir_index); - std.hash.autoHash(&hasher, declared.arg_ty); const captures = switch (declared.captures) { .owned => |cvs| cvs.get(ip), .external => |cvs| cvs, @@ -3155,7 +3149,6 @@ pub const Key = union(enum) { .declared => |a_d| { const b_d = b_info.declared; if (a_d.zir_index != b_d.zir_index) return false; - if (a_d.arg_ty != b_d.arg_ty) return false; const a_captures = switch (a_d.captures) { .owned => |s| s.get(ip), .external => |cvs| cvs, @@ -3295,7 +3288,6 @@ pub const Key = union(enum) { .void => .void_type, .null => .null_type, .false, .true => .bool_type, - .empty_tuple => .empty_tuple_type, .@"unreachable" => .noreturn_type, }, @@ -3308,6 +3300,7 @@ pub const LoadedStructType = struct { /// Index of the `struct_decl` or `reify` ZIR instruction. zir_index: TrackedInst.Index, captures: CaptureValue.Slice, + is_reified: bool, // TODO: the non-fqn will be needed by the new dwarf structure /// The name of this struct type. @@ -3319,10 +3312,9 @@ pub const LoadedStructType = struct { layout: std.builtin.Type.ContainerLayout, /// May be `undefined` if `layout != .@"packed"`. - packed_backing_mode: PackedBackingMode, - /// May be `undefined` if `layout != .@"packed", - packed_backing_int_type: Index, + packed_backing_mode: BackingTypeMode, + // The remaining fields are only valid once the struct's layout is resolved. field_name_map: MapIndex, field_names: NullTerminatedString.Slice, field_types: Index.Slice, @@ -3331,11 +3323,11 @@ pub const LoadedStructType = struct { field_is_comptime_bits: ComptimeBits, field_runtime_order: RuntimeOrder.Slice, field_offsets: Offsets, - - // These fields are only valid once the layout is resolved, and are never valid for `layout == .@"packed"`. + packed_backing_int_type: Index, has_no_possible_value: bool, has_one_possible_value: bool, comptime_only: bool, + has_runtime_bits: bool, size: u32, alignment: Alignment, @@ -3478,6 +3470,7 @@ pub const LoadedUnionType = struct { /// Index of the `union_decl` or `reify` ZIR instruction. zir_index: TrackedInst.Index, captures: CaptureValue.Slice, + is_reified: bool, // TODO: the non-fqn will be needed by the new dwarf structure /// The name of this union type. @@ -3488,23 +3481,26 @@ pub const LoadedUnionType = struct { namespace: NamespaceIndex, layout: std.builtin.Type.ContainerLayout, + enum_tag_mode: BackingTypeMode, + /// May be `undefined` if `layout != .@"packed"`. + packed_backing_mode: BackingTypeMode, + + /// Only reified unions store field names; typically they should be loaded from `enum_tag_type` + /// instead. Reified unions store them because type resolution needs them in order to validate + /// or populate `enum_tag_type`. + reified_field_names: NullTerminatedString.Slice, + + // The remaining fields are only valid once the union's layout is resolved. + field_types: Index.Slice, + field_aligns: Alignment.Slice, runtime_tag: RuntimeTag, /// Even if `runtime_tag == .none`, this is populated with the union's "hypothetical" tag type. enum_tag_type: Index, - /// May be `undefined` if `layout != .@"packed"`. - packed_backing_mode: PackedBackingMode, - /// May be `undefined` if `layout != .@"packed", packed_backing_int_type: Index, - - // Field names are not stored here, because fields are guaranteed to map one-to-one to the - // fields of the enum tag type. If you need field names, load them from `enum_tag_type`. - field_types: Index.Slice, - field_aligns: Alignment.Slice, - - // These fields are only valid once the layout is resolved, and are never valid for `layout == .@"packed"`. has_no_possible_value: bool, has_one_possible_value: bool, comptime_only: bool, + has_runtime_bits: bool, size: u32, padding: u32, alignment: Alignment, @@ -3523,6 +3519,7 @@ pub const LoadedEnumType = struct { captures: CaptureValue.Slice, /// If `zir_index` is `.none`, this is the union type for which this enum is the tag type. owner_union: Index, + is_reified: bool, // TODO: the non-fqn will be needed by the new dwarf structure /// The name of this enum type. @@ -3532,19 +3529,14 @@ pub const LoadedEnumType = struct { name_nav: Nav.Index.Optional, namespace: NamespaceIndex, - /// An integer type which is used for the numerical value of the enum. Populated immediately, regardless - /// of whether the integer tag type was explicitly provided or inferred by the compiler. - int_tag_type: Index, - int_tag_is_explicit: bool, + int_tag_mode: BackingTypeMode, nonexhaustive: bool, - /// Uses `NullTerminatedString.Adapter` with `field_names`. + // The remaining fields are only valid once the enum's layout is resolved. + int_tag_type: Index, field_name_map: MapIndex, - /// If this is `.none`, the enum tag type is auto-generated and so the fields are auto-numbered. - /// Otherwise, uses `Index.Adapter` with `field_values`. - field_value_map: OptionalMapIndex, field_names: NullTerminatedString.Slice, - /// Empty if `field_value_map` is `.none`. + field_value_map: OptionalMapIndex, field_values: Index.Slice, /// Look up field index based on field name. @@ -3596,7 +3588,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { const extra_items = extra_list.view().items(.@"0"); const item = unwrapped_index.getItem(ip); // Exiting this `switch` means this is a `packed struct`. - const backing_mode: PackedBackingMode, const any_defaults: bool = switch (item.tag) { + const backing_mode: BackingTypeMode, const any_defaults: bool = switch (item.tag) { .type_struct_packed_auto => .{ .auto, false }, .type_struct_packed_explicit => .{ .explicit, false }, .type_struct_packed_auto_defaults => .{ .auto, true }, @@ -3667,6 +3659,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .zir_index = extra.data.zir_index, .captures = captures, + .is_reified = extra.data.flags.any_captures == .reified, .name = extra.data.name, .name_nav = extra.data.name_nav, .namespace = extra.data.namespace, @@ -3675,7 +3668,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .@"extern" => .@"extern", }, .packed_backing_mode = undefined, - .packed_backing_int_type = undefined, + .field_name_map = extra.data.field_name_map, .field_names = field_names, .field_types = field_types, @@ -3684,9 +3677,11 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .field_is_comptime_bits = field_is_comptime_bits, .field_runtime_order = field_runtime_order, .field_offsets = field_offsets, + .packed_backing_int_type = .none, .has_no_possible_value = extra.data.flags.has_no_possible_value, .has_one_possible_value = extra.data.flags.has_one_possible_value, .comptime_only = extra.data.flags.comptime_only, + .has_runtime_bits = extra.data.flags.has_runtime_bits, .size = extra.data.size, .alignment = extra.data.flags.alignment, }; @@ -3728,12 +3723,13 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { return .{ .zir_index = extra.data.zir_index, .captures = captures, + .is_reified = extra.data.captures_len == .reified, .name = extra.data.name, .name_nav = extra.data.name_nav, .namespace = extra.data.namespace, .layout = .@"packed", .packed_backing_mode = backing_mode, - .packed_backing_int_type = extra.data.backing_int_type, + .field_name_map = extra.data.field_name_map, .field_names = field_names, .field_types = field_types, @@ -3742,9 +3738,11 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .field_is_comptime_bits = .empty, .field_runtime_order = .empty, .field_offsets = .empty, + .packed_backing_int_type = extra.data.backing_int_type, .has_no_possible_value = undefined, .has_one_possible_value = undefined, .comptime_only = undefined, + .has_runtime_bits = undefined, .size = undefined, .alignment = undefined, }; @@ -3756,7 +3754,7 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { const extra_items = extra_list.view().items(.@"0"); const item = unwrapped_index.getItem(ip); // Exiting this `switch` means this is a `packed union`. - const backing_mode: PackedBackingMode = switch (item.tag) { + const backing_mode: BackingTypeMode = switch (item.tag) { .type_union_packed_auto => .auto, .type_union_packed_explicit => .explicit, .type_union => { @@ -3779,6 +3777,12 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { }, }; extra_index += captures.len; + const reified_field_names: NullTerminatedString.Slice = if (extra.data.flags.any_captures == .reified) .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = extra.data.fields_len, + } else .empty; + extra_index += reified_field_names.len; const field_types: Index.Slice = .{ .tid = unwrapped_index.tid, .start = extra_index, @@ -3795,6 +3799,7 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { return .{ .zir_index = extra.data.zir_index, .captures = captures, + .is_reified = extra.data.flags.any_captures == .reified, .name = extra.data.name, .name_nav = extra.data.name_nav, .namespace = extra.data.namespace, @@ -3803,14 +3808,17 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { .@"extern" => .@"extern", }, .runtime_tag = extra.data.flags.runtime_tag, + .enum_tag_mode = extra.data.flags.enum_tag_mode, .enum_tag_type = extra.data.enum_tag_type, .packed_backing_mode = undefined, .packed_backing_int_type = undefined, + .reified_field_names = reified_field_names, .field_types = field_types, .field_aligns = field_aligns, .has_no_possible_value = extra.data.flags.has_no_possible_value, .has_one_possible_value = extra.data.flags.has_one_possible_value, .comptime_only = extra.data.flags.comptime_only, + .has_runtime_bits = extra.data.flags.has_runtime_bits, .size = extra.data.size, .padding = extra.data.padding, .alignment = extra.data.flags.alignment, @@ -3832,6 +3840,12 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { }, }; extra_index += captures.len; + const reified_field_names: NullTerminatedString.Slice = if (extra.data.captures_len == .reified) .{ + .tid = unwrapped_index.tid, + .start = extra_index, + .len = extra.data.fields_len, + } else .empty; + extra_index += reified_field_names.len; const field_types: Index.Slice = .{ .tid = unwrapped_index.tid, .start = extra_index, @@ -3841,19 +3855,23 @@ pub fn loadUnionType(ip: *const InternPool, index: Index) LoadedUnionType { return .{ .zir_index = extra.data.zir_index, .captures = captures, + .is_reified = extra.data.captures_len == .reified, .name = extra.data.name, .name_nav = extra.data.name_nav, .namespace = extra.data.namespace, .layout = .@"packed", .runtime_tag = .none, + .enum_tag_mode = .auto, .enum_tag_type = extra.data.enum_tag_type, .packed_backing_mode = backing_mode, .packed_backing_int_type = extra.data.backing_int_type, + .reified_field_names = reified_field_names, .field_types = field_types, .field_aligns = .empty, .has_no_possible_value = undefined, .has_one_possible_value = undefined, .comptime_only = undefined, + .has_runtime_bits = undefined, .size = undefined, .padding = undefined, .alignment = undefined, @@ -3917,12 +3935,13 @@ pub fn loadEnumType(ip: *const InternPool, index: Index) LoadedEnumType { return .{ .zir_index = zir_index, .captures = captures, + .is_reified = extra.data.captures_len == .reified, .owner_union = owner_union, .name = extra.data.name, .name_nav = extra.data.name_nav, .namespace = extra.data.namespace, .int_tag_type = extra.data.int_tag_type, - .int_tag_is_explicit = explicit_int_tag, + .int_tag_mode = if (explicit_int_tag) .explicit else .auto, .nonexhaustive = nonexhaustive, .field_name_map = extra.data.field_name_map, .field_value_map = field_value_map, @@ -4160,7 +4179,7 @@ pub const Index = enum(u32) { }; /// Used for a map of `Index` values to the index within a list of `Index` values. - pub const Adapter = struct { + const Adapter = struct { indexes: []const Index, pub fn eql(ctx: @This(), a: Index, b_void: void, b_map_index: usize) bool { @@ -4365,7 +4384,7 @@ pub const Index = enum(u32) { }) void { _ = self; const map_fields = @typeInfo(@typeInfo(@TypeOf(tag_to_encoding_map)).pointer.child).@"struct".fields; - @setEvalBranchQuota(2_000); + @setEvalBranchQuota(3_000); inline for (@typeInfo(Tag).@"enum".fields, 0..) |tag, start| { inline for (0..map_fields.len) |offset| { if (comptime std.mem.eql(u8, tag.name, map_fields[(start + offset) % map_fields.len].name)) break; @@ -4797,7 +4816,11 @@ pub const static_keys: [static_len]Key = .{ .{ .simple_value = .null }, .{ .simple_value = .true }, .{ .simple_value = .false }, - .{ .simple_value = .empty_tuple }, + + .{ .aggregate = .{ + .ty = .empty_tuple_type, + .storage = .{ .elems = &.{} }, + } }, }; /// How many items in the InternPool are statically known. @@ -5601,10 +5624,12 @@ pub const Tag = enum(u8) { has_no_possible_value: bool, /// Whether the struct is comptime-only. Always `false` until layout resolved. comptime_only: bool, + /// Whether the struct has runtime bits. Always `false` until layout resolved. + has_runtime_bits: bool, /// Alignment of the whole struct. Always `.none` until layout resolved. alignment: Alignment, - _: u17 = 0, + _: u16 = 0, }; }; @@ -5625,21 +5650,25 @@ pub const Tag = enum(u8) { name_nav: Nav.Index.Optional, namespace: NamespaceIndex, - /// The corresponding `PackedBackingMode` depends on the item's `Tag`. + /// The corresponding `BackingTypeMode` depends on the item's `Tag`. backing_int_type: Index, fields_len: u32, field_name_map: MapIndex, }; - /// Field names are intentionally omitted---they are available in `enum_tag_type`. + /// For declared unions, field names are intentionally omitted because they are available in + /// `enum_tag_type`. However, reified unions do store field names, because they are needed by + /// type resolution to create or validate the enum tag type (type resolution for declared unions + /// instead fetches field names from ZIR). /// /// Trailing: /// 0. type_hash: PackedU64 // if `any_captures == .reified` /// 1. captures_len: u32 // if `any_captures == .true` /// 2. capture: CaptureValue // if `any_captures == .true`; for each `captures_len` - /// 3. field_type: Index // for each `fields_len` - /// 4. field_align: Alignment // for each `fields_len` if `any_field_aligns` + /// 3. reified_field_name: NullTerminatedString // if `any_captures == .reified`; for each `fields_len` + /// 4. field_type: Index // for each `fields_len` + /// 5. field_align: Alignment // for each `fields_len` if `any_field_aligns` pub const TypeUnion = struct { zir_index: TrackedInst.Index, @@ -5652,7 +5681,6 @@ pub const Tag = enum(u8) { /// This could be provided through the tag type, but it is more convenient /// to store it directly. This is also necessary for `dumpStatsFallible` to /// work on unresolved types. - /// MLUGG TODO: reconsider, because we resolve the tag type eagerly now. fields_len: u32, /// Always 0 until layout resolved. @@ -5669,7 +5697,7 @@ pub const Tag = enum(u8) { /// /// For `union(enum(E))` syntax, this is `false`, but the generated enum tag type is /// considered to have an explicitly specified integer tag type. - explicit_tag_type: bool, + enum_tag_mode: BackingTypeMode, /// `packed` layout is represented separately by `TypeStructPacked`. layout: enum(u1) { auto, @"extern" }, @@ -5685,19 +5713,25 @@ pub const Tag = enum(u8) { has_no_possible_value: bool, /// Whether the union is comptime-only. Always `false` until layout resolved. comptime_only: bool, + /// Whether the union has runtime bits. Always `false` until layout resolved. + has_runtime_bits: bool, /// Alignment of the whole union. Always `.none` until layout resolved. alignment: Alignment, - _: u16 = 0, + _: u15 = 0, }; }; - /// Field names are intentionally omitted---they are available in `enum_tag_type`. + /// For declared unions, field names are intentionally omitted because they are available in + /// `enum_tag_type`. However, reified unions do store field names, because they are needed by + /// type resolution to create or validate the enum tag type (type resolution for declared unions + /// instead fetches field names from ZIR). /// /// Trailing: /// 0. type_hash: PackedU64 // if `captures_len == .reified` /// 1. capture: CaptureValue // if `captures_len != .reified`; for each `captures_len` - /// 2. field_type: Index // for each `fields_len` + /// 2. reified_field_name: NullTerminatedString // if `captures_len == .reified`; for each `fields_len` + /// 3. field_type: Index // for each `fields_len` pub const TypeUnionPacked = struct { zir_index: TrackedInst.Index, captures_len: enum(u32) { @@ -5709,7 +5743,7 @@ pub const Tag = enum(u8) { name_nav: Nav.Index.Optional, namespace: NamespaceIndex, - /// The corresponding `PackedBackingMode` depends on the item's `Tag`. + /// The corresponding `BackingTypeMode` depends on the item's `Tag`. backing_int_type: Index, /// Although packed unions do not semantically have a tag type, the compiler still assigns /// them a "hypothetical" tag type. @@ -5718,7 +5752,6 @@ pub const Tag = enum(u8) { /// This could be provided through the tag type, but it is more convenient /// to store it directly. This is also necessary for `dumpStatsFallible` to /// work on unresolved types. - /// MLUGG TODO: reconsider, because we resolve the tag type eagerly now. fields_len: u32, }; @@ -5742,8 +5775,7 @@ pub const Tag = enum(u8) { namespace: NamespaceIndex, /// An integer type which is used for the numerical value of the enum. Whether this was - /// user-provided or inferred by the compiler depends on the tag. Either way, the field - /// is populated immediately (i.e. does not require any type resolution). + /// user-provided or inferred by the compiler depends on the tag. int_tag_type: Index, fields_len: u32, @@ -5762,13 +5794,17 @@ pub const Tag = enum(u8) { }; }; -/// Differentiates between user-provided and compiler-generated backing types for packed aggregates. -pub const PackedBackingMode = enum(u1) { - /// The backing type was explicitly provided by the user, i.e. `packed struct(T)` or `packed union(T)`. - /// Type resolution simply *validates* that type. +/// Differentiates between user-provided and compiler-generated backing types for packed and tagged types. +pub const BackingTypeMode = enum(u1) { + /// The backing type was explicitly provided by the user. For instance: + /// union(T) + /// enum(T) + /// packed struct(T) + /// packed union(T) + /// Type layout resolution will evaluate the user-provided expression and validate that type. explicit, - /// No backing type was explicitly provided by the user. Type layout resolution will populate the - /// backing type based on the field types; before then it is invalid (probably `.none`). + /// No backing type was explicitly provided by the user. Type layout resolution will populate + /// an inferred/generated type. auto, }; @@ -5852,8 +5888,6 @@ pub const SimpleValue = enum(u32) { void = @intFromEnum(Index.void_value), /// This is untyped `null`. null = @intFromEnum(Index.null_value), - /// This is the untyped empty struct/array literal: `.{}` - empty_tuple = @intFromEnum(Index.empty_tuple), true = @intFromEnum(Index.bool_true), false = @intFromEnum(Index.bool_false), @"unreachable" = @intFromEnum(Index.unreachable_value), @@ -6395,7 +6429,7 @@ pub fn deinit(ip: *InternPool, gpa: Allocator, io: Io) void { ip.nav_ty_deps.deinit(gpa); ip.func_ies_deps.deinit(gpa); ip.type_layout_deps.deinit(gpa); - ip.type_inits_deps.deinit(gpa); + ip.struct_defaults_deps.deinit(gpa); ip.zon_file_deps.deinit(gpa); ip.embed_file_deps.deinit(gpa); ip.namespace_deps.deinit(gpa); @@ -6544,12 +6578,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .false => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = .none, .captures = .{ .owned = .empty }, } }, .true => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = .none, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end + 1, @@ -6572,11 +6604,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, _ => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = switch (item.tag) { - .type_struct_packed_auto, .type_struct_packed_auto_defaults => .none, - .type_struct_packed_explicit, .type_struct_packed_explicit_defaults => extra.data.backing_int_type, - else => unreachable, - }, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end, @@ -6595,12 +6622,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .false => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = if (extra.data.flags.explicit_tag_type) extra.data.enum_tag_type else .none, .captures = .{ .owned = .empty }, } }, .true => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = if (extra.data.flags.explicit_tag_type) extra.data.enum_tag_type else .none, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end + 1, @@ -6619,11 +6644,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, _ => .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = switch (item.tag) { - .type_union_packed_auto => .none, - .type_union_packed_explicit => extra.data.backing_int_type, - else => unreachable, - }, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end, @@ -6645,11 +6665,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, _ => .{ .declared = .{ .zir_index = @enumFromInt(extra_list.view().items(.@"0")[extra.end]), - .arg_ty = switch (item.tag) { - .type_enum_auto => .none, - .type_enum_explicit, .type_enum_nonexhaustive => extra.data.int_tag_type, - else => unreachable, - }, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end + 1, @@ -6662,7 +6677,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { const extra = extraDataTrail(unwrapped_index.getExtra(ip), Tag.TypeOpaque, data); break :ns .{ .declared = .{ .zir_index = extra.data.zir_index, - .arg_ty = .none, .captures = .{ .owned = .{ .tid = unwrapped_index.tid, .start = extra.end, @@ -6883,7 +6897,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }, .type_array_small, .type_vector, - // MLUGG TODO: is this still possible? also, i hate .only_possible_value, it should die in a fire. .type_struct_packed_auto, .type_struct_packed_explicit, => .{ .aggregate = .{ @@ -6894,7 +6907,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { // There is only one possible value precisely due to the // fact that this values slice is fully populated! .type_struct, - // MLUGG TODO: is this still possible? also, i hate .only_possible_value, it should die in a fire. .type_struct_packed_auto_defaults, .type_struct_packed_explicit_defaults, => { @@ -7445,12 +7457,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key: }); }, - .struct_type => unreachable, // use getStructType() instead - .tuple_type => unreachable, // use getTupleType() instead - .union_type => unreachable, // use getUnionType() instead - .opaque_type => unreachable, // use getOpaqueType() instead + .struct_type => unreachable, // instead use: getDeclaredStructType, getReifiedStructType + .union_type => unreachable, // instead use: getDeclaredUnionType, getReifiedUnionType + .enum_type => unreachable, // instead use: getDeclaredEnumType, getReifiedEnumType, getGeneratedEnumTagType + .opaque_type => unreachable, // instead use: getDeclaredOpaqueType - .enum_type => unreachable, // use getEnumType() instead + .tuple_type => unreachable, // use getTupleType() instead .func_type => unreachable, // use getFuncType() instead .@"extern" => unreachable, // use getExtern() instead .func => unreachable, // use getFuncInstance() or getFuncDecl() instead @@ -8072,43 +8084,37 @@ pub fn get(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, key: return gop.put(); } -pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { - fields_len: u32, - layout: std.builtin.Type.ContainerLayout, - /// The following only applies if `layout == .@"packed"`; this field is ignored otherwise. - /// - /// The explicitly specified backing integer type. `.none` means the backing integer is inferred - /// by the compiler. Asserts that this is an integer type. - explicit_packed_backing_type: Index, - any_comptime_fields: bool, - any_field_defaults: bool, - any_field_aligns: bool, - key: union(enum) { - declared: struct { - zir_index: TrackedInst.Index, - captures: []const CaptureValue, - }, - reified: struct { - zir_index: TrackedInst.Index, - type_hash: u64, - }, +pub fn getDeclaredStructType( + ip: *InternPool, + gpa: Allocator, + io: Io, + tid: Zcu.PerThread.Id, + ini: struct { + zir_index: TrackedInst.Index, + captures: []const CaptureValue, + + // If the value of any of the following fields would change on an incremental update, then logic + // in `Zcu.mapOldZirToNew` must detect that (these properties are all trivially known from ZIR) + // and refuse to map the type declaration. This causes `zir_index` to change so that a new type + // will be interned at a fresh index. + // + // In the future, it would be good to remove all of those fields from `ini`, and in fact just + // have a single function `getDeclaredContainer` which is suitable for all container types. + // However, this requires some major changes to how container types are represented in the + // InternPool, so that it is possible for their backing storage to be "reallocated" as needed + // during type resolution. + fields_len: u32, + layout: std.builtin.Type.ContainerLayout, + any_comptime_fields: bool, + any_field_defaults: bool, + any_field_aligns: bool, + packed_backing_mode: BackingTypeMode, }, -}) Allocator.Error!WipContainerType.Result { - const key: Key = .{ .struct_type = switch (ini.key) { - .declared => |d| .{ .declared = .{ - .zir_index = d.zir_index, - .arg_ty = switch (ini.layout) { - .auto, .@"extern" => .none, - .@"packed" => ini.explicit_packed_backing_type, - }, - .captures = .{ .external = d.captures }, - } }, - .reified => |r| .{ .reified = .{ - .zir_index = r.zir_index, - .type_hash = r.type_hash, - } }, - } }; - var gop = try ip.getOrPutKey(gpa, io, tid, key); +) Allocator.Error!WipContainerType.Result { + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .struct_type = .{ .declared = .{ + .zir_index = ini.zir_index, + .captures = .{ .external = ini.captures }, + } } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8120,48 +8126,36 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread const field_name_map = try ip.addMap(gpa, io, tid, ini.fields_len); errdefer local.mutate.maps.len -= 1; - const zir_index, const type_hash_captures_extra_len = switch (ini.key) { - .declared => |d| .{ d.zir_index, d.captures.len + @intFromBool(ini.layout != .@"packed") }, - .reified => |r| .{ r.zir_index, 2 }, - }; - const is_extern = switch (ini.layout) { .auto => false, .@"extern" => true, .@"packed" => { try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).@"struct".fields.len + - type_hash_captures_extra_len + + ini.captures.len + // capture ini.fields_len + // field_name ini.fields_len + // field_type (if (ini.any_field_defaults) ini.fields_len else 0)); // field_default const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{ - .zir_index = zir_index, - .captures_len = switch (ini.key) { - .declared => |d| @enumFromInt(d.captures.len), - .reified => .reified, - }, + .zir_index = ini.zir_index, + .captures_len = @enumFromInt(ini.captures.len), .name = undefined, // set by `finish` .name_nav = undefined, // set by `finish` .namespace = undefined, // set by `finish` - .backing_int_type = ini.explicit_packed_backing_type, + .backing_int_type = .none, .fields_len = ini.fields_len, .field_name_map = field_name_map, }); - switch (ini.key) { - .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), - .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), - } - const field_names_start = extra.mutate.len; + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)}); // capture extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type if (ini.any_field_defaults) { extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_default } items.appendAssumeCapacity(.{ - .tag = switch (ini.explicit_packed_backing_type) { - .none => if (ini.any_field_defaults) .type_struct_packed_auto_defaults else .type_struct_packed_auto, - else => if (ini.any_field_defaults) .type_struct_packed_explicit_defaults else .type_struct_packed_explicit, + .tag = switch (ini.packed_backing_mode) { + .auto => if (ini.any_field_defaults) .type_struct_packed_auto_defaults else .type_struct_packed_auto, + .explicit => if (ini.any_field_defaults) .type_struct_packed_explicit_defaults else .type_struct_packed_explicit, }, .data = extra_index, }); @@ -8171,17 +8165,18 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?, .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?, .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?, - .tag_type_index = null, - .fields_len = ini.fields_len, - .field_name_map = field_name_map, - .field_names_start = field_names_start, - .field_comptime_bits_start = null, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, } }; }, }; try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).@"struct".fields.len + - type_hash_captures_extra_len + + 1 + // captures_len + ini.captures.len + // capture ini.fields_len + // field_name ini.fields_len + // field_type (if (ini.any_field_defaults) ini.fields_len else 0) + // field_default @@ -8191,7 +8186,7 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread ini.fields_len); // field_offset const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{ - .zir_index = zir_index, + .zir_index = ini.zir_index, .name = undefined, // set by `finish` .name_nav = undefined, // set by `finish` .namespace = undefined, // set by `finish` @@ -8199,10 +8194,7 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread .field_name_map = field_name_map, .size = 0, .flags = .{ - .any_captures = switch (ini.key) { - .declared => |d| if (d.captures.len != 0) .true else .false, - .reified => .reified, - }, + .any_captures = if (ini.captures.len != 0) .true else .false, .layout = if (is_extern) .@"extern" else .auto, .any_comptime_fields = ini.any_comptime_fields, .any_field_defaults = ini.any_field_defaults, @@ -8210,17 +8202,14 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread .has_one_possible_value = false, .has_no_possible_value = false, .comptime_only = false, + .has_runtime_bits = false, .alignment = .none, }, }); - switch (ini.key) { - .declared => |d| if (d.captures.len != 0) { - extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); - extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); - }, - .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), + if (ini.captures.len != 0) { + extra.appendAssumeCapacity(.{@intCast(ini.captures.len)}); // captures_len + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)}); // capture } - const field_names_start = extra.mutate.len; extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type if (ini.any_field_defaults) { @@ -8229,11 +8218,9 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread if (ini.any_field_aligns) { extra.appendNTimesAssumeCapacity(.{0}, (ini.fields_len + 3) / 4); // field_align } - const field_comptime_bits_start: ?u32 = if (ini.any_comptime_fields) start: { - const start = extra.mutate.len; + if (ini.any_comptime_fields) { extra.appendNTimesAssumeCapacity(.{0}, (ini.fields_len + 31) / 32); // field_is_comptime_bits - break :start start; - } else null; + } if (!is_extern) { extra.appendNTimesAssumeCapacity(.{@intFromEnum(LoadedStructType.RuntimeOrder.unresolved)}, ini.fields_len); // field_runtime_order } @@ -8248,58 +8235,29 @@ pub fn getStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name").?, .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?, .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?, - .tag_type_index = null, - .fields_len = ini.fields_len, - .field_name_map = field_name_map, - .field_names_start = field_names_start, - .field_comptime_bits_start = field_comptime_bits_start, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, } }; } -pub fn getUnionType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { +pub fn getReifiedStructType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { + zir_index: TrackedInst.Index, + type_hash: u64, fields_len: u32, layout: std.builtin.Type.ContainerLayout, - /// The explicitly specified backing integer type for a `packed union`. - /// `.none` means the backing integer is inferred by the compiler. If set, - /// must be an integer type. If the union is not packed, must be `.none`. - explicit_packed_backing_type: Index, - runtime_tag: LoadedUnionType.RuntimeTag, - /// `true` for `union(T)`, but `false` for anything else, including `union(enum(T))`. - have_explicit_enum_tag: bool, + any_comptime_fields: bool, + any_field_defaults: bool, any_field_aligns: bool, - key: union(enum) { - declared: struct { - zir_index: TrackedInst.Index, - captures: []const CaptureValue, - /// This is the `T` in one of the following: - /// * `union(T)` (enum tag type) - /// * `union(enum(T))` (int tag type) - /// * `packed union(T)` (int backing type) - /// Or `.none` otherwise. - arg_ty: InternPool.Index, - }, - reified: struct { - zir_index: TrackedInst.Index, - type_hash: u64, - }, - }, + /// Explicitly specified backing int type. `.none` if not packed or if backing type is inferred. + packed_backing_int_type: Index, }) Allocator.Error!WipContainerType.Result { - if (ini.explicit_packed_backing_type != .none) { - assert(ip.zigTypeTag(ini.explicit_packed_backing_type) == .int); - if (ini.key == .declared) assert(ini.key.declared.arg_ty == ini.explicit_packed_backing_type); - } - const key: Key = .{ .union_type = switch (ini.key) { - .declared => |d| .{ .declared = .{ - .zir_index = d.zir_index, - .arg_ty = d.arg_ty, - .captures = .{ .external = d.captures }, - } }, - .reified => |r| .{ .reified = .{ - .zir_index = r.zir_index, - .type_hash = r.type_hash, - } }, - } }; - var gop = try ip.getOrPutKey(gpa, io, tid, key); + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .struct_type = .{ .reified = .{ + .zir_index = ini.zir_index, + .type_hash = ini.type_hash, + } } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8308,96 +8266,254 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread. const extra = local.getMutableExtra(gpa, io); try items.ensureUnusedCapacity(1); - const zir_index, const type_hash_captures_extra_len = switch (ini.key) { - .declared => |d| .{ d.zir_index, d.captures.len + @intFromBool(ini.layout != .@"packed") }, - .reified => |r| .{ r.zir_index, 2 }, + const field_name_map = try ip.addMap(gpa, io, tid, ini.fields_len); + errdefer local.mutate.maps.len -= 1; + + const is_extern = switch (ini.layout) { + .auto => false, + .@"extern" => true, + .@"packed" => { + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStructPacked).@"struct".fields.len + + 2 + // type_hash + ini.fields_len + // field_name + ini.fields_len + // field_type + (if (ini.any_field_defaults) ini.fields_len else 0)); // field_default + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStructPacked{ + .zir_index = ini.zir_index, + .captures_len = .reified, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .backing_int_type = ini.packed_backing_int_type, + .fields_len = ini.fields_len, + .field_name_map = field_name_map, + }); + _ = addExtraAssumeCapacity(extra, PackedU64.init(ini.type_hash)); // type_hash + const field_names_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name + const field_types_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type + const field_defaults_start = extra.mutate.len; + if (ini.any_field_defaults) { + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_default + } + items.appendAssumeCapacity(.{ + .tag = switch (ini.packed_backing_int_type) { + .none => if (ini.any_field_defaults) .type_struct_packed_auto_defaults else .type_struct_packed_auto, + else => if (ini.any_field_defaults) .type_struct_packed_explicit_defaults else .type_struct_packed_explicit, + }, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeStructPacked, "namespace").?, + .field_names = .{ .tid = tid, .start = field_names_start, .len = ini.fields_len }, + .field_types = .{ .tid = tid, .start = field_types_start, .len = ini.fields_len }, + .field_values = if (ini.any_field_defaults) + .{ .tid = tid, .start = field_defaults_start, .len = ini.fields_len } + else + undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; + }, }; + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeStruct).@"struct".fields.len + + 2 + // type_hash + ini.fields_len + // field_name + ini.fields_len + // field_type + (if (ini.any_field_defaults) ini.fields_len else 0) + // field_default + (if (ini.any_field_aligns) (ini.fields_len + 3) / 4 else 0) + // field_align + (if (ini.any_comptime_fields) (ini.fields_len + 31) / 32 else 0) + // field_is_comptime_bits + (if (!is_extern) ini.fields_len else 0) + // field_runtime_order + ini.fields_len); // field_offset + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeStruct{ + .zir_index = ini.zir_index, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .fields_len = ini.fields_len, + .field_name_map = field_name_map, + .size = 0, + .flags = .{ + .any_captures = .reified, + .layout = if (is_extern) .@"extern" else .auto, + .any_comptime_fields = ini.any_comptime_fields, + .any_field_defaults = ini.any_field_defaults, + .any_field_aligns = ini.any_field_aligns, + .has_one_possible_value = false, + .has_no_possible_value = false, + .comptime_only = false, + .has_runtime_bits = false, + .alignment = .none, + }, + }); + _ = addExtraAssumeCapacity(extra, PackedU64.init(ini.type_hash)); // type_hash + const field_names_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name + const field_types_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type + const field_defaults_start = extra.mutate.len; + if (ini.any_field_defaults) { + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_default + } + const field_aligns_start = extra.mutate.len; + if (ini.any_field_aligns) { + extra.appendNTimesAssumeCapacity(.{0}, (ini.fields_len + 3) / 4); // field_align + } + const field_is_comptime_bits_start = extra.mutate.len; + if (ini.any_comptime_fields) { + extra.appendNTimesAssumeCapacity(.{0}, (ini.fields_len + 31) / 32); // field_is_comptime_bits + } + if (!is_extern) { + extra.appendNTimesAssumeCapacity(.{@intFromEnum(LoadedStructType.RuntimeOrder.unresolved)}, ini.fields_len); // field_runtime_order + } + extra.appendNTimesAssumeCapacity(.{0}, ini.fields_len); // field_offset + items.appendAssumeCapacity(.{ + .tag = .type_struct, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?, + .field_names = .{ .tid = tid, .start = field_names_start, .len = ini.fields_len }, + .field_types = .{ .tid = tid, .start = field_types_start, .len = ini.fields_len }, + .field_values = if (ini.any_field_defaults) + .{ .tid = tid, .start = field_defaults_start, .len = ini.fields_len } + else + undefined, + .field_aligns = if (ini.any_field_aligns) + .{ .tid = tid, .start = field_aligns_start, .len = ini.fields_len } + else + undefined, + .field_is_comptime_bits = if (ini.any_comptime_fields) + .{ .tid = tid, .start = field_is_comptime_bits_start, .len = (ini.fields_len + 31) / 32 } + else + undefined, + } }; +} + +pub fn getDeclaredUnionType( + ip: *InternPool, + gpa: Allocator, + io: Io, + tid: Zcu.PerThread.Id, + ini: struct { + zir_index: TrackedInst.Index, + captures: []const CaptureValue, + + // If the value of any of the following fields would change on an incremental update, then logic + // in `Zcu.mapOldZirToNew` must detect that (these properties are all trivially known from ZIR) + // and refuse to map the type declaration. This causes `zir_index` to change so that a new type + // will be interned at a fresh index. + // + // In the future, it would be good to remove all of those fields from `ini`, and in fact just + // have a single function `getDeclaredContainer` which is suitable for all container types. + // However, this requires some major changes to how container types are represented in the + // InternPool, so that it is possible for their backing storage to be "reallocated" as needed + // during type resolution. + fields_len: u32, + layout: std.builtin.Type.ContainerLayout, + any_field_aligns: bool, + runtime_tag: LoadedUnionType.RuntimeTag, + enum_tag_mode: BackingTypeMode, + packed_backing_mode: BackingTypeMode, + }, +) Allocator.Error!WipContainerType.Result { + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .union_type = .{ .declared = .{ + .zir_index = ini.zir_index, + .captures = .{ .external = ini.captures }, + } } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa, io); + const extra = local.getMutableExtra(gpa, io); + try items.ensureUnusedCapacity(1); + const is_extern = switch (ini.layout) { .auto => false, .@"extern" => true, .@"packed" => { try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnionPacked).@"struct".fields.len + - type_hash_captures_extra_len + + ini.captures.len + // capture ini.fields_len); // field_type const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnionPacked{ - .zir_index = zir_index, - .captures_len = switch (ini.key) { - .declared => |d| @enumFromInt(d.captures.len), - .reified => .reified, - }, + .zir_index = ini.zir_index, + .captures_len = @enumFromInt(ini.captures.len), .name = undefined, // set by `finish` .name_nav = undefined, // set by `finish` .namespace = undefined, // set by `finish` - .backing_int_type = ini.explicit_packed_backing_type, - .enum_tag_type = .none, // set by `setTagType` + .backing_int_type = .none, + .enum_tag_type = .none, .fields_len = ini.fields_len, }); - switch (ini.key) { - .declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}), - .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), - } + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)}); // capture extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type items.appendAssumeCapacity(.{ - .tag = switch (ini.explicit_packed_backing_type) { - .none => .type_union_packed_auto, - else => .type_union_packed_explicit, + .tag = switch (ini.packed_backing_mode) { + .auto => .type_union_packed_auto, + .explicit => .type_union_packed_explicit, }, .data = extra_index, }); - return .{ - .wip = .{ - .index = gop.put(), - .tid = tid, - .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name").?, - .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name_nav").?, - .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "namespace").?, - .tag_type_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "enum_tag_type").?, - .fields_len = 0, // the fields come from the enum, so nothing to set - .field_name_map = undefined, - .field_names_start = undefined, - .field_comptime_bits_start = undefined, - }, - }; + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "namespace").?, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; }, }; try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).@"struct".fields.len + - type_hash_captures_extra_len + + 1 + // captures_len + ini.captures.len + // capture ini.fields_len + // field_type (if (ini.any_field_aligns) (ini.fields_len + 3) / 4 else 0)); // field_align const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ - .zir_index = zir_index, + .zir_index = ini.zir_index, .name = undefined, // set by `finish` .name_nav = undefined, // set by `finish` .namespace = undefined, // set by `finish` - .enum_tag_type = .none, // set by `setTagType` + .enum_tag_type = .none, .fields_len = ini.fields_len, .size = 0, .padding = 0, .flags = .{ - .any_captures = switch (ini.key) { - .declared => |d| if (d.captures.len != 0) .true else .false, - .reified => .reified, - }, - .explicit_tag_type = ini.have_explicit_enum_tag, + .any_captures = if (ini.captures.len != 0) .true else .false, + .enum_tag_mode = ini.enum_tag_mode, .layout = if (is_extern) .@"extern" else .auto, .any_field_aligns = ini.any_field_aligns, .runtime_tag = ini.runtime_tag, .has_one_possible_value = false, .has_no_possible_value = false, .comptime_only = false, + .has_runtime_bits = false, .alignment = .none, }, }); - switch (ini.key) { - .declared => |d| if (d.captures.len != 0) { - extra.appendAssumeCapacity(.{@intCast(d.captures.len)}); - extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); - }, - .reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)), + if (ini.captures.len > 0) { + extra.appendAssumeCapacity(.{@intCast(ini.captures.len)}); // captures_len + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)}); // capture } extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type if (ini.any_field_aligns) { @@ -8407,53 +8523,177 @@ pub fn getUnionType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread. .tag = .type_union, .data = extra_index, }); - return .{ - .wip = .{ - .index = gop.put(), - .tid = tid, - .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?, - .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name_nav").?, - .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?, - .tag_type_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "enum_tag_type").?, - .fields_len = 0, // the fields come from the enum, so nothing to set - .field_name_map = undefined, - .field_names_start = undefined, - .field_comptime_bits_start = undefined, - }, - }; + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; } -pub fn getEnumType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { +pub fn getReifiedUnionType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { + zir_index: TrackedInst.Index, + type_hash: u64, fields_len: u32, - /// For `enum(T)` or `union(enum(T))`, this is `T`. Asserts `T` is an integer type. - /// Otherwise, `.none`. - explicit_int_tag_type: Index, - nonexhaustive: bool, - key: union(enum) { - declared: struct { - zir_index: TrackedInst.Index, - captures: []const CaptureValue, - }, - reified: struct { - zir_index: TrackedInst.Index, - type_hash: u64, - }, - generated_union_tag: Index, - }, + layout: std.builtin.Type.ContainerLayout, + any_field_aligns: bool, + runtime_tag: LoadedUnionType.RuntimeTag, + /// Explicitly specified enum tag type. `.none` if `runtime_tag != .tagged`. + enum_tag_type: Index, + /// Explicitly specified backing int type. `.none` if not packed or if backing type is inferred. + packed_backing_int_type: Index, }) Allocator.Error!WipContainerType.Result { - const key: Key = .{ .enum_type = switch (ini.key) { - .declared => |d| .{ .declared = .{ - .zir_index = d.zir_index, - .arg_ty = ini.explicit_int_tag_type, - .captures = .{ .external = d.captures }, - } }, - .reified => |r| .{ .reified = .{ - .zir_index = r.zir_index, - .type_hash = r.type_hash, - } }, - .generated_union_tag => |u| .{ .generated_union_tag = u }, + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .union_type = .{ .reified = .{ + .zir_index = ini.zir_index, + .type_hash = ini.type_hash, + } } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa, io); + const extra = local.getMutableExtra(gpa, io); + try items.ensureUnusedCapacity(1); + + const is_extern = switch (ini.layout) { + .auto => false, + .@"extern" => true, + .@"packed" => { + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnionPacked).@"struct".fields.len + + 2 + // type_hash + ini.fields_len + // reified_field_name + ini.fields_len); // field_type + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnionPacked{ + .zir_index = ini.zir_index, + .captures_len = .reified, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .backing_int_type = ini.packed_backing_int_type, + .enum_tag_type = .none, + .fields_len = ini.fields_len, + }); + _ = addExtraAssumeCapacity(extra, PackedU64.init(ini.type_hash)); // type_hash + const field_names_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // reified_field_name + const field_types_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type + items.appendAssumeCapacity(.{ + .tag = switch (ini.packed_backing_int_type) { + .none => .type_union_packed_auto, + else => .type_union_packed_explicit, + }, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnionPacked, "namespace").?, + .field_names = .{ .tid = tid, .start = field_names_start, .len = ini.fields_len }, + .field_types = .{ .tid = tid, .start = field_types_start, .len = ini.fields_len }, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; + }, + }; + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeUnion).@"struct".fields.len + + 2 + // type_hash + ini.fields_len + // reified_field_name + ini.fields_len + // field_type + (if (ini.any_field_aligns) (ini.fields_len + 3) / 4 else 0)); // field_align + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{ + .zir_index = ini.zir_index, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .enum_tag_type = ini.enum_tag_type, + .fields_len = ini.fields_len, + .size = 0, + .padding = 0, + .flags = .{ + .any_captures = .reified, + .enum_tag_mode = if (ini.enum_tag_type == .none) .auto else .explicit, + .layout = if (is_extern) .@"extern" else .auto, + .any_field_aligns = ini.any_field_aligns, + .runtime_tag = ini.runtime_tag, + .has_one_possible_value = false, + .has_no_possible_value = false, + .comptime_only = false, + .has_runtime_bits = false, + .alignment = .none, + }, + }); + _ = addExtraAssumeCapacity(extra, PackedU64.init(ini.type_hash)); + const field_names_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // reified_field_name + const field_types_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_type + const field_aligns_start = extra.mutate.len; + if (ini.any_field_aligns) { + extra.appendNTimesAssumeCapacity(.{0}, (ini.fields_len + 3) / 4); // field_align + } + items.appendAssumeCapacity(.{ + .tag = .type_union, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeUnion, "namespace").?, + .field_names = .{ .tid = tid, .start = field_names_start, .len = ini.fields_len }, + .field_types = .{ .tid = tid, .start = field_types_start, .len = ini.fields_len }, + .field_values = undefined, + .field_aligns = if (ini.any_field_aligns) + .{ .tid = tid, .start = field_aligns_start, .len = ini.fields_len } + else + undefined, + .field_is_comptime_bits = undefined, } }; - var gop = try ip.getOrPutKey(gpa, io, tid, key); +} + +pub fn getDeclaredEnumType( + ip: *InternPool, + gpa: Allocator, + io: Io, + tid: Zcu.PerThread.Id, + ini: struct { + zir_index: TrackedInst.Index, + captures: []const CaptureValue, + + // If the value of any of the following fields would change on an incremental update, then logic + // in `Zcu.mapOldZirToNew` must detect that (these properties are all trivially known from ZIR) + // and refuse to map the type declaration. This causes `zir_index` to change so that a new type + // will be interned at a fresh index. + // + // In the future, it would be good to remove all of those fields from `ini`, and in fact just + // have a single function `getDeclaredContainer` which is suitable for all container types. + // However, this requires some major changes to how container types are represented in the + // InternPool, so that it is possible for their backing storage to be "reallocated" as needed + // during type resolution. + fields_len: u32, + nonexhaustive: bool, + /// For `enum(T)` this is `.explicit`. Otherwise this is `.none`. + int_tag_mode: BackingTypeMode, + }, +) Allocator.Error!WipContainerType.Result { + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .enum_type = .{ .declared = .{ + .zir_index = ini.zir_index, + .captures = .{ .external = ini.captures }, + } } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8464,7 +8704,7 @@ pub fn getEnumType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.I const tag: Tag, const have_values: bool = if (ini.nonexhaustive) .{ .type_enum_nonexhaustive, true } - else if (ini.explicit_int_tag_type != .none) + else if (ini.int_tag_mode == .explicit) .{ .type_enum_explicit, true } else .{ .type_enum_auto, false }; @@ -8476,43 +8716,24 @@ pub fn getEnumType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.I errdefer local.mutate.maps.len -= @intFromBool(have_values); try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeEnum).@"struct".fields.len + - switch (ini.key) { - .declared => |d| 1 + d.captures.len, // `zir_index` and `capture` - .reified => 3, // `zir_index` and `type_hash` - .generated_union_tag => 1, // owner_union - } + + 1 + // zir_index + ini.captures.len + // capture @intFromBool(have_values) + // field_value_map ini.fields_len + // field_name (if (have_values) ini.fields_len else 0)); // field_value const extra_index = addExtraAssumeCapacity(extra, Tag.TypeEnum{ - .captures_len = switch (ini.key) { - .declared => |d| @enumFromInt(d.captures.len), - .reified => .reified, - .generated_union_tag => .generated_union_tag, - }, + .captures_len = @enumFromInt(ini.captures.len), .name = undefined, // set by `finish` .name_nav = undefined, // set by `finish` .namespace = undefined, // set by `finish` - .int_tag_type = ini.explicit_int_tag_type, + .int_tag_type = .none, .fields_len = ini.fields_len, .field_name_map = field_name_map, }); - switch (ini.key) { - .declared => |d| { - extra.appendAssumeCapacity(.{@intFromEnum(d.zir_index)}); // zir_index - extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}); // capture - }, - .reified => |r| { - extra.appendAssumeCapacity(.{@intFromEnum(r.zir_index)}); // zir_index - _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)); // type_hash - }, - .generated_union_tag => |owner_union| { - extra.appendAssumeCapacity(.{@intFromEnum(owner_union)}); // owner_union - }, - } - if (have_values) extra.appendAssumeCapacity(.{@intFromEnum(field_value_map)}); - const field_names_start = extra.mutate.len; + extra.appendAssumeCapacity(.{@intFromEnum(ini.zir_index)}); // zir_index + extra.appendSliceAssumeCapacity(.{@ptrCast(ini.captures)}); // capture + if (have_values) extra.appendAssumeCapacity(.{@intFromEnum(field_value_map)}); // field_value_map extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name if (have_values) extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_value items.appendAssumeCapacity(.{ @@ -8525,22 +8746,165 @@ pub fn getEnumType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.I .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name").?, .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name_nav").?, .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "namespace").?, - .tag_type_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "int_tag_type").?, - .fields_len = ini.fields_len, - .field_name_map = field_name_map, - .field_names_start = field_names_start, - .field_comptime_bits_start = null, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, } }; } -pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { +pub fn getReifiedEnumType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { + zir_index: TrackedInst.Index, + type_hash: u64, + fields_len: u32, + nonexhaustive: bool, + /// Explicitly specified int tag type, or `.none` if the int tag type is inferred. + int_tag_type: Index, +}) Allocator.Error!WipContainerType.Result { + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .enum_type = .{ .reified = .{ + .zir_index = ini.zir_index, + .type_hash = ini.type_hash, + } } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa, io); + const extra = local.getMutableExtra(gpa, io); + try items.ensureUnusedCapacity(1); + + const tag: Tag, const have_values: bool = if (ini.nonexhaustive) + .{ .type_enum_nonexhaustive, true } + else if (ini.int_tag_type != .none) + .{ .type_enum_explicit, true } + else + .{ .type_enum_auto, false }; + + const field_name_map = try ip.addMap(gpa, io, tid, ini.fields_len); + errdefer local.mutate.maps.len -= 1; + + const field_value_map = if (have_values) try ip.addMap(gpa, io, tid, ini.fields_len) else undefined; + errdefer local.mutate.maps.len -= @intFromBool(have_values); + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeEnum).@"struct".fields.len + + 1 + // zir_index + 2 + // type_hash + @intFromBool(have_values) + // field_value_map + ini.fields_len + // field_name + (if (have_values) ini.fields_len else 0)); // field_value + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeEnum{ + .captures_len = .reified, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .int_tag_type = ini.int_tag_type, + .fields_len = ini.fields_len, + .field_name_map = field_name_map, + }); + extra.appendAssumeCapacity(.{@intFromEnum(ini.zir_index)}); // zir_index + _ = addExtraAssumeCapacity(extra, PackedU64.init(ini.type_hash)); // type_hash + if (have_values) extra.appendAssumeCapacity(.{@intFromEnum(field_value_map)}); // field_value_map + const field_names_start = extra.mutate.len; + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name + const field_values_start = extra.mutate.len; + if (have_values) extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_value + items.appendAssumeCapacity(.{ + .tag = tag, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "namespace").?, + .field_names = .{ .tid = tid, .start = field_names_start, .len = ini.fields_len }, + .field_types = undefined, + .field_values = if (have_values) + .{ .tid = tid, .start = field_values_start, .len = ini.fields_len } + else + undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; +} + +pub fn getGeneratedEnumTagType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { + /// The union type for which this enum is a generated tag. + union_type: Index, + /// For `union(enum(T))` this is `.explicit`. Otherwise this is `.none`. + int_tag_mode: BackingTypeMode, + fields_len: u32, +}) Allocator.Error!WipContainerType.Result { + var gop = try ip.getOrPutKey(gpa, io, tid, .{ .enum_type = .{ .generated_union_tag = ini.union_type } }); + defer gop.deinit(); + if (gop == .existing) return .{ .existing = gop.existing }; + + const local = ip.getLocal(tid); + const items = local.getMutableItems(gpa, io); + const extra = local.getMutableExtra(gpa, io); + try items.ensureUnusedCapacity(1); + + const field_name_map = try ip.addMap(gpa, io, tid, ini.fields_len); + errdefer local.mutate.maps.len -= 1; + + const have_values = switch (ini.int_tag_mode) { + .explicit => true, + .auto => false, + }; + + const field_value_map = if (have_values) try ip.addMap(gpa, io, tid, ini.fields_len) else undefined; + errdefer local.mutate.maps.len -= @intFromBool(have_values); + + try extra.ensureUnusedCapacity(@typeInfo(Tag.TypeEnum).@"struct".fields.len + + 1 + // owner_union + @intFromBool(have_values) + // field_value_map + ini.fields_len + // field_name + (if (have_values) ini.fields_len else 0)); // field_value + + const extra_index = addExtraAssumeCapacity(extra, Tag.TypeEnum{ + .captures_len = .generated_union_tag, + .name = undefined, // set by `finish` + .name_nav = undefined, // set by `finish` + .namespace = undefined, // set by `finish` + .int_tag_type = .none, + .fields_len = ini.fields_len, + .field_name_map = field_name_map, + }); + extra.appendAssumeCapacity(.{@intFromEnum(ini.union_type)}); // owner_union + if (have_values) extra.appendAssumeCapacity(.{@intFromEnum(field_value_map)}); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(NullTerminatedString.empty)}, ini.fields_len); // field_name + if (have_values) extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); // field_value + items.appendAssumeCapacity(.{ + .tag = switch (ini.int_tag_mode) { + .auto => .type_enum_auto, + .explicit => .type_enum_explicit, + }, + .data = extra_index, + }); + return .{ .wip = .{ + .index = gop.put(), + .tid = tid, + .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name").?, + .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "name_nav").?, + .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeEnum, "namespace").?, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, + } }; +} + +pub fn getDeclaredOpaqueType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, ini: struct { zir_index: TrackedInst.Index, captures: []const CaptureValue, }) Allocator.Error!WipContainerType.Result { var gop = try ip.getOrPutKey(gpa, io, tid, .{ .opaque_type = .{ .declared = .{ .zir_index = ini.zir_index, .captures = .{ .external = ini.captures }, - .arg_ty = .none, } } }); defer gop.deinit(); if (gop == .existing) return .{ .existing = gop.existing }; @@ -8569,11 +8933,11 @@ pub fn getOpaqueType(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread .type_name_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name").?, .name_nav_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "name_nav").?, .namespace_index = extra_index + std.meta.fieldIndex(Tag.TypeOpaque, "namespace").?, - .tag_type_index = null, - .fields_len = 0, - .field_name_map = undefined, - .field_names_start = undefined, - .field_comptime_bits_start = undefined, + .field_names = undefined, + .field_types = undefined, + .field_values = undefined, + .field_aligns = undefined, + .field_is_comptime_bits = undefined, } }; } @@ -8584,12 +8948,15 @@ pub const WipContainerType = struct { name_nav_index: u32, namespace_index: u32, - tag_type_index: ?u32, - - fields_len: u32, - field_name_map: MapIndex, - field_names_start: u32, - field_comptime_bits_start: ?u32, + // These fields are only populated when creating reified types, because reified types populate + // field information immediately, with type resolution only handling validation. This is in + // contrast to declared types, where field information is populated by the type resolution + // process evaluating ZIR expressions. + field_names: NullTerminatedString.Slice, + field_types: Index.Slice, + field_values: Index.Slice, + field_aligns: Alignment.Slice, + field_is_comptime_bits: LoadedStructType.ComptimeBits, pub fn setName( wip: WipContainerType, @@ -8605,48 +8972,6 @@ pub const WipContainerType = struct { extra_items[wip.name_nav_index] = @intFromEnum(name_nav); } - pub fn setTagType( - wip: WipContainerType, - ip: *InternPool, - tag_ty: Index, - ) void { - const extra = ip.getLocalShared(wip.tid).extra.acquire(); - const extra_items = extra.view().items(.@"0"); - const i = wip.tag_type_index.?; - const old_val: InternPool.Index = @enumFromInt(extra_items[i]); - assert(old_val == .none); - assert(tag_ty != .none); - extra_items[i] = @intFromEnum(tag_ty); - } - - /// Returns the already-existing field with the same name, if any. - pub fn nextField( - wip: WipContainerType, - ip: *InternPool, - name: NullTerminatedString, - marked_comptime: bool, - ) ?u32 { - assert(wip.fields_len > 0); - const extra = ip.getLocalShared(wip.tid).extra.acquire(); - const extra_items = extra.view().items(.@"0"); - const map = wip.field_name_map.get(ip); - const field_idx = map.count(); - assert(field_idx < wip.fields_len); - const names: []NullTerminatedString = @ptrCast(extra_items[wip.field_names_start..][0..wip.fields_len]); - const adapter: NullTerminatedString.Adapter = .{ .strings = names[0..field_idx] }; - const gop = map.getOrPutAssumeCapacityAdapted(name, adapter); - if (gop.found_existing) return @intCast(gop.index); - names[field_idx] = name; - if (wip.field_comptime_bits_start) |start_idx| { - if (marked_comptime) { - extra_items[start_idx + field_idx / 32] |= @as(u32, 1) << @intCast(field_idx % 32); - } - } else { - assert(!marked_comptime); - } - return null; - } - pub fn finish( wip: WipContainerType, ip: *InternPool, @@ -8657,14 +8982,6 @@ pub const WipContainerType = struct { extra_items[wip.namespace_index] = @intFromEnum(namespace); - if (wip.fields_len > 0) { - assert(wip.field_name_map.get(ip).count() == wip.fields_len); - } - if (wip.tag_type_index) |i| { - const tag_ty: Index = @enumFromInt(extra_items[i]); - assert(tag_ty != .none); - } - return wip.index; } @@ -9504,19 +9821,6 @@ fn addStringsToMap( } } -fn addIndexesToMap( - ip: *InternPool, - map_index: MapIndex, - indexes: []const Index, -) void { - const map = map_index.get(ip); - const adapter: Index.Adapter = .{ .indexes = indexes }; - for (indexes) |index| { - const gop = map.getOrPutAssumeCapacityAdapted(index, adapter); - assert(!gop.found_existing); - } -} - fn addMap(ip: *InternPool, gpa: Allocator, io: Io, tid: Zcu.PerThread.Id, cap: usize) Allocator.Error!MapIndex { const maps = ip.getLocal(tid).getMutableMaps(gpa, io); const unwrapped: MapIndex.Unwrapped = .{ .tid = tid, .index = maps.mutate.len }; @@ -10260,10 +10564,78 @@ pub fn dump(ip: *const InternPool) void { const stderr = std.debug.lockStderr(&buffer); defer std.debug.unlockStderr(); const w = &stderr.file_writer.interface; + dumpDependencyStatsFallible(ip, w) catch return; dumpStatsFallible(ip, w, std.heap.page_allocator) catch return; dumpAllFallible(ip, w) catch return; } +fn dumpDependencyStatsFallible(ip: *const InternPool, w: *Io.Writer) !void { + const dep_entries_len = ip.dep_entries.items.len - ip.free_dep_entries.items.len; + const src_hash_deps_len = ip.src_hash_deps.count(); + const nav_val_deps_len = ip.nav_val_deps.count(); + const nav_ty_deps_len = ip.nav_ty_deps.count(); + const func_ies_deps_len = ip.func_ies_deps.count(); + const type_layout_deps_len = ip.type_layout_deps.count(); + const struct_defaults_deps_len = ip.struct_defaults_deps.count(); + const zon_file_deps_len = ip.zon_file_deps.count(); + const embed_file_deps_len = ip.embed_file_deps.count(); + const namespace_deps_len = ip.namespace_deps.count(); + const namespace_name_deps_len = ip.namespace_name_deps.count(); + const dep_entries_size = dep_entries_len * @sizeOf(DepEntry); + const src_hash_deps_size = src_hash_deps_len * 8; + const nav_val_deps_size = nav_val_deps_len * 8; + const nav_ty_deps_size = nav_ty_deps_len * 8; + const func_ies_deps_size = func_ies_deps_len * 8; + const type_layout_deps_size = type_layout_deps_len * 8; + const struct_defaults_deps_size = struct_defaults_deps_len * 8; + const zon_file_deps_size = zon_file_deps_len * 8; + const embed_file_deps_size = embed_file_deps_len * 8; + const namespace_deps_size = namespace_deps_len * 8; + const namespace_name_deps_size = namespace_name_deps_len * (@sizeOf(NamespaceNameKey) + 4); + + try w.print( + \\InternPool dependencies: {d} bytes + \\ {d} entries: {d} bytes + \\ {d} src_hash: {d} bytes + \\ {d} nav_val: {d} bytes + \\ {d} nav_ty: {d} bytes + \\ {d} func_ies: {d} bytes + \\ {d} type_layout: {d} bytes + \\ {d} struct_defaults: {d} bytes + \\ {d} zon_file: {d} bytes + \\ {d} embed_file: {d} bytes + \\ {d} namespace: {d} bytes + \\ {d} namespace_name: {d} bytes + \\ + , .{ + dep_entries_size + src_hash_deps_size + nav_val_deps_size + nav_ty_deps_size + + func_ies_deps_size + type_layout_deps_size + struct_defaults_deps_size + zon_file_deps_size + + embed_file_deps_size + namespace_deps_size + namespace_name_deps_size, + dep_entries_len, + dep_entries_size, + src_hash_deps_len, + src_hash_deps_size, + nav_val_deps_len, + nav_val_deps_size, + nav_ty_deps_len, + nav_ty_deps_size, + func_ies_deps_len, + func_ies_deps_size, + type_layout_deps_len, + type_layout_deps_size, + struct_defaults_deps_len, + struct_defaults_deps_size, + zon_file_deps_len, + zon_file_deps_size, + embed_file_deps_len, + embed_file_deps_size, + namespace_deps_len, + namespace_deps_size, + namespace_name_deps_len, + namespace_name_deps_size, + }); +} + fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !void { var items_len: usize = 0; var extra_len: usize = 0; @@ -10278,10 +10650,10 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo const limbs_size = 8 * limbs_len; // TODO: map overhead size is not taken into account - const total_size = @sizeOf(InternPool) + items_size + extra_size + limbs_size; + const total_size = items_size + extra_size + limbs_size; - std.debug.print( - \\InternPool size: {d} bytes + try w.print( + \\InternPool values: {d} bytes \\ {d} items: {d} bytes \\ {d} extra: {d} bytes \\ {d} limbs: {d} bytes @@ -10302,6 +10674,8 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo }; var counts = std.AutoArrayHashMap(Tag, TagStats).init(arena); for (ip.locals) |*local| { + // Early check for length 0, because `view()` is invalid if capacity is 0 + if (local.mutate.items.len == 0) continue; const items = local.shared.items.view().slice(); const extra_list = local.shared.extra; const extra_items = extra_list.view().items(.@"0"); @@ -10562,6 +10936,8 @@ fn dumpStatsFallible(ip: *const InternPool, w: *Io.Writer, arena: Allocator) !vo fn dumpAllFallible(ip: *const InternPool, w: *Io.Writer) anyerror!void { for (ip.locals, 0..) |*local, tid| { + // Early check for length 0, because `view()` is invalid if capacity is 0 + if (local.mutate.items.len == 0) continue; const items = local.shared.items.view(); for ( items.items(.tag)[0..local.mutate.items.len], @@ -11981,22 +12357,42 @@ pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { }; } -/// Returns the already-existing field with the same name, if any. +/// Puts `name` into `names_slice` at the next index (that being the current length of `map`). +/// Also inserts the name into `map`. If there is an existing field with this name, its index +/// is returned. Otherwise, `null` is returned. pub fn addFieldName( ip: *InternPool, - extra: Local.Extra, - names_map: MapIndex, - names_start: u32, + names: NullTerminatedString.Slice, + map: MapIndex, name: NullTerminatedString, ) ?u32 { - const extra_items = extra.view().items(.@"0"); - const map = names_map.get(ip); - const field_index = map.count(); - const strings = extra_items[names_start..][0..field_index]; - const adapter: NullTerminatedString.Adapter = .{ .strings = @ptrCast(strings) }; - const gop = map.getOrPutAssumeCapacityAdapted(name, adapter); + const m = map.get(ip); + const field_idx = m.count(); + const names_slice = names.get(ip); + names_slice[field_idx] = name; + const adapter: NullTerminatedString.Adapter = .{ .strings = names_slice[0..field_idx] }; + const gop = m.getOrPutAssumeCapacityAdapted(name, adapter); if (gop.found_existing) return @intCast(gop.index); - extra_items[names_start + field_index] = @intFromEnum(name); + assert(gop.index == field_idx); + return null; +} + +/// Like `addFieldName`, but instead of adding a field name to a struct, union, or enum, adds a +/// field tag value for an enum. +pub fn addFieldTagValue( + ip: *InternPool, + values: Index.Slice, + map: MapIndex, + value: Index, +) ?u32 { + const m = map.get(ip); + const field_idx = m.count(); + const values_slice = values.get(ip); + values_slice[field_idx] = value; + const adapter: Index.Adapter = .{ .indexes = values_slice[0..field_idx] }; + const gop = m.getOrPutAssumeCapacityAdapted(value, adapter); + if (gop.found_existing) return @intCast(gop.index); + assert(gop.index == field_idx); return null; } @@ -12295,6 +12691,7 @@ pub fn resolveStructLayout( has_no_possible_value: bool, has_one_possible_value: bool, comptime_only: bool, + has_runtime_bits: bool, ) void { const unwrapped_index = struct_type.unwrap(ip); @@ -12311,6 +12708,7 @@ pub fn resolveStructLayout( flags.has_no_possible_value = has_no_possible_value; flags.has_one_possible_value = has_one_possible_value; flags.comptime_only = comptime_only; + flags.has_runtime_bits = has_runtime_bits; flags.alignment = alignment; } @@ -12322,12 +12720,14 @@ pub fn resolveUnionLayout( ip: *InternPool, io: Io, union_type: Index, + enum_tag_type: Index, size: u32, padding: u32, alignment: Alignment, has_no_possible_value: bool, has_one_possible_value: bool, comptime_only: bool, + has_runtime_bits: bool, ) void { const unwrapped_index = union_type.unwrap(ip); @@ -12339,17 +12739,24 @@ pub fn resolveUnionLayout( const item = unwrapped_index.getItem(ip); assert(item.tag == .type_union); + extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnion, "enum_tag_type").?] = @intFromEnum(enum_tag_type); extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnion, "size").?] = size; extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnion, "padding").?] = padding; const flags: *Tag.TypeUnion.Flags = @ptrCast(&extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnion, "flags").?]); flags.has_no_possible_value = has_no_possible_value; flags.has_one_possible_value = has_one_possible_value; flags.comptime_only = comptime_only; + flags.has_runtime_bits = has_runtime_bits; flags.alignment = alignment; } /// Asserts that `struct_type` is a packed struct type. -pub fn resolvePackedStructBackingInt(ip: *InternPool, io: Io, struct_type: Index, backing_int_type: Index) void { +pub fn resolvePackedStructLayout( + ip: *InternPool, + io: Io, + struct_type: Index, + backing_int_type: Index, +) void { const unwrapped_index = struct_type.unwrap(ip); const local = ip.getLocal(unwrapped_index.tid); @@ -12371,7 +12778,13 @@ pub fn resolvePackedStructBackingInt(ip: *InternPool, io: Io, struct_type: Index } /// Asserts that `union_type` is a packed union type. -pub fn resolvePackedUnionBackingInt(ip: *InternPool, io: Io, union_type: Index, backing_int_type: Index) void { +pub fn resolvePackedUnionLayout( + ip: *InternPool, + io: Io, + union_type: Index, + enum_tag_type: Index, + backing_int_type: Index, +) void { const unwrapped_index = union_type.unwrap(ip); const local = ip.getLocal(unwrapped_index.tid); @@ -12387,5 +12800,32 @@ pub fn resolvePackedUnionBackingInt(ip: *InternPool, io: Io, union_type: Index, else => unreachable, } + extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnionPacked, "enum_tag_type").?] = @intFromEnum(enum_tag_type); extra_items[item.data + std.meta.fieldIndex(Tag.TypeUnionPacked, "backing_int_type").?] = @intFromEnum(backing_int_type); } + +/// Asserts that `enum_type` is an enum type. +pub fn resolveEnumLayout( + ip: *InternPool, + io: Io, + enum_type: Index, + int_tag_type: Index, +) void { + const unwrapped_index = enum_type.unwrap(ip); + + const local = ip.getLocal(unwrapped_index.tid); + local.mutate.extra.mutex.lockUncancelable(io); + defer local.mutate.extra.mutex.unlock(io); + + const extra_items = local.shared.extra.view().items(.@"0"); + const item = unwrapped_index.getItem(ip); + switch (item.tag) { + .type_enum_auto, + .type_enum_explicit, + .type_enum_nonexhaustive, + => {}, + else => unreachable, + } + + extra_items[item.data + std.meta.fieldIndex(Tag.TypeEnum, "int_tag_type").?] = @intFromEnum(int_tag_type); +} diff --git a/src/Sema.zig b/src/Sema.zig index 55fb718a45..d1482b8681 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -397,7 +397,7 @@ pub const Block = struct { /// The name of the current "context" for naming namespace types. /// The interpretation of this depends on the name strategy in ZIR, but the name /// is always incorporated into the type name somehow. - /// See `Sema.createTypeName`. + /// See `Sema.setTypeName`. type_name_ctx: InternPool.NullTerminatedString, /// Create a `LazySrcLoc` based on an `Offset` from the code being analyzed in this block. @@ -1158,7 +1158,7 @@ fn analyzeBodyInner( }, inst }); } - const air_inst: Air.Inst.Ref = inst: switch (tags[@intFromEnum(inst)]) { + const air_ref: Air.Inst.Ref = inst: switch (tags[@intFromEnum(inst)]) { // zig fmt: off .alloc => try sema.zirAlloc(block, inst), .alloc_inferred => try sema.zirAllocInferred(block, true), @@ -1991,31 +1991,33 @@ fn analyzeBodyInner( break :blk .void_value; }, }; - if (sema.isNoReturn(air_inst)) { + if (sema.isNoReturn(air_ref)) { // We're going to assume that the body itself is noreturn, so let's ensure that now assert(block.instructions.items.len > 0); assert(sema.isNoReturn(block.instructions.items[block.instructions.items.len - 1].toRef())); break; } - // - if (air_inst.toIndex()) |air_inst_index| { - switch (sema.air_instructions.items(.tag)[@intFromEnum(air_inst_index)]) { - .inferred_alloc, .inferred_alloc_comptime => {}, - else => { - assert(sema.typeOf(air_inst).onePossibleValue(pt) catch @panic("") == null); - sema.typeOf(air_inst).assertHasLayout(zcu); - }, - } - } else { - switch (tags[@intFromEnum(inst)]) { - // MLUGG TODO: do we actually *want* this exception? we could arguably simplify things without it - // e.g. analyzeNavVal could stop doing ensureLayoutResolved in most cases (`extern` is an exception) and instead do `assertHasLayout` - .func, .func_inferred, .func_fancy => {}, // exception: we're in a func decl, layout will get resolved in a bit by `analyzeNavVal` - else => sema.typeOf(air_inst).assertHasLayout(zcu), + + // We must resolve the layout of a type before creating a value of that type. Therefore, + // the layout of the type of `air_ref` must already be resolved. + check_type: { + if (air_ref.toIndex()) |air_inst| switch (sema.air_instructions.items(.tag)[@intFromEnum(air_inst)]) { + .inferred_alloc, .inferred_alloc_comptime => break :check_type, + else => {}, + }; + sema.typeOf(air_ref).assertHasLayout(zcu); + // If the type has an OPV, `air_ref` must be that OPV: there is no other interned value + // it could be, and it would be a bug for the value to not be comptime-known when it has + // an OPV. Behind a `std.debug.runtime_safety` check because `onePossibleValue` mutates + // the InternPool so cannot be optimized out. + if (std.debug.runtime_safety) { + if (try sema.typeOf(air_ref).onePossibleValue(pt)) |opv| { + assert(air_ref == Air.Inst.Ref.fromValue(opv)); + } } } - // - map.putAssumeCapacity(inst, air_inst); + + map.putAssumeCapacity(inst, air_ref); i += 1; } } @@ -2097,7 +2099,7 @@ pub fn resolveConstStringIntern( fn resolveTypeOrPoison(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !?Type { const air_inst = try sema.resolveInst(zir_ref); - const ty = try sema.analyzeAsType(block, src, air_inst); + const ty = try sema.analyzeAsType(block, src, .type, air_inst); if (ty.isGenericPoison()) return null; return ty; } @@ -2216,11 +2218,12 @@ pub fn analyzeAsType( sema: *Sema, block: *Block, src: LazySrcLoc, + reason: std.zig.SimpleComptimeReason, air_inst: Air.Inst.Ref, ) !Type { const wanted_type: Type = .type; const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); - const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .simple = .type }); + const val = try sema.resolveConstDefinedValue(block, src, coerced_inst, .{ .simple = reason }); return val.toType(); } @@ -4112,9 +4115,11 @@ fn zirForLen(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. /// or error union pointed to, initializing these pointers along the way. /// Given a `*E!?T`, returns a (valid) `*T`. /// May invalidate already-stored payload data. +/// Asserts that the layout of the pointer child type is already resolved. fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcLoc) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; + sema.typeOf(ptr).childType(zcu).assertHasLayout(zcu); var base_ptr = ptr; while (true) switch (sema.typeOf(base_ptr).childType(zcu).zigTypeTag(zcu)) { .error_union => base_ptr = try sema.analyzeErrUnionPayloadPtr(block, src, base_ptr, false, true), @@ -4128,6 +4133,7 @@ fn optEuBasePtrInit(sema: *Sema, block: *Block, ptr: Air.Inst.Ref, src: LazySrcL fn zirOptEuBasePtrInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const un_node = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const ptr = try sema.resolveInst(un_node.operand); + try sema.ensureLayoutResolved(sema.typeOf(ptr).childType(sema.pt.zcu)); return sema.optEuBasePtrInit(block, ptr, block.nodeOffset(un_node.src_node)); } @@ -4513,7 +4519,7 @@ fn validateStructInit( if (struct_ty.structFieldIsComptime(i, zcu)) continue; if (!struct_ty.isTuple(zcu)) { - try sema.ensureFieldInitsResolved(struct_ty); + try sema.ensureStructDefaultsResolved(struct_ty); } const default_val = struct_ty.structFieldDefaultValue(i, zcu) orelse { @@ -5737,7 +5743,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void } if (zcu.llvm_object != null and options.linkage == .internal) return; const export_ty = Value.fromInterned(uav.val).typeOf(zcu); - if (!try sema.validateExternType(export_ty, .other)) { + if (!export_ty.validateExtern(.other, zcu)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to export type '{f}'", .{export_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); @@ -5789,7 +5795,7 @@ pub fn analyzeExport( const exported_nav = ip.getNav(exported_nav_index); const export_ty: Type = .fromInterned(exported_nav.typeOf(ip)); - if (!try sema.validateExternType(export_ty, .other)) { + if (!export_ty.validateExtern(.other, zcu)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "unable to export type '{f}'", .{export_ty.fmt(pt)}); errdefer msg.destroy(gpa); @@ -5827,7 +5833,7 @@ fn zirDisableInstrumentation(sema: *Sema) CompileError!void { .nav_val, .nav_ty, .type_layout, - .type_inits, + .struct_defaults, .memoized_state, => return, // does nothing outside a function }; @@ -5846,7 +5852,7 @@ fn zirDisableIntrinsics(sema: *Sema) CompileError!void { .nav_val, .nav_ty, .type_layout, - .type_inits, + .struct_defaults, .memoized_state, => return, // does nothing outside a function }; @@ -6729,8 +6735,28 @@ fn analyzeCall( } else func_src; const func_ty_info = zcu.typeToFunc(func_ty).?; - // MLUGG TODO: this isn't quite the check i want. this includes inline functions, which aren't *generic*... - const func_is_generic = !func_ty.fnHasRuntimeBits(zcu); + const any_comptime_params = func_ty_info.comptime_bits != 0 or ct: { + for (func_ty_info.param_types.get(ip)) |param_ty| { + if (Type.fromInterned(param_ty).comptimeOnly(zcu)) break :ct true; + } + break :ct Type.fromInterned(func_ty_info.return_type).comptimeOnly(zcu); + }; + const any_generic_types = generic: { + for (func_ty_info.param_types.get(ip)) |param_ty| { + if (param_ty == .generic_poison_type) break :generic true; + } + const ret_ty: Type = .fromInterned(func_ty_info.return_type); + if (ret_ty.toIntern() == .generic_poison_type) { + break :generic true; + } + if (ret_ty.zigTypeTag(zcu) == .error_union and + ret_ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type) + { + break :generic true; + } + break :generic false; + }; + if (!callConvIsCallable(func_ty_info.cc)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg( @@ -6766,7 +6792,7 @@ fn analyzeCall( else => unreachable, } else .{ null, false }; - if (func_is_generic and func_val == null) { + if ((any_generic_types or any_comptime_params) and func_val == null) { return sema.failWithNeededComptime(block, func_src, .{ .simple = .generic_call_target }); } @@ -6815,13 +6841,13 @@ fn analyzeCall( // This is the `inst_map` used when evaluating generic parameters and return types. var generic_inst_map: InstMap = .{}; defer generic_inst_map.deinit(gpa); - if (func_is_generic) { + if (any_generic_types) { try generic_inst_map.ensureSpaceForInstructions(gpa, fn_zir_info.param_body); } // This exists so that `generic_block` below can include a "called from here" note back to this // call site when analyzing generic parameter/return types. - var generic_inlining: Block.Inlining = if (func_is_generic) .{ + var generic_inlining: Block.Inlining = if (any_generic_types) .{ .call_block = block, .call_src = call_src, .func = func_val.?.toIntern(), @@ -6834,7 +6860,7 @@ fn analyzeCall( // This is the block in which we evaluate generic function components: that is, generic parameter // types and the generic return type. This must not be used if the function is not generic. // `comptime_reason` is set as needed. - var generic_block: Block = if (func_is_generic) .{ + var generic_block: Block = if (any_generic_types) .{ .parent = null, .sema = sema, .namespace = fn_nav.analysis.?.namespace, @@ -6843,9 +6869,9 @@ fn analyzeCall( .src_base_inst = fn_nav.analysis.?.zir_index, .type_name_ctx = fn_nav.fqn, } else undefined; - defer if (func_is_generic) generic_block.instructions.deinit(gpa); + defer if (any_generic_types) generic_block.instructions.deinit(gpa); - if (func_is_generic) { + if (any_generic_types) { // We certainly depend on the generic owner's signature! try sema.declareDependency(.{ .src_hash = fn_tracked_inst }); } @@ -6857,7 +6883,7 @@ fn analyzeCall( if (raw != .generic_poison_type) break :ty .fromInterned(raw); // We must discover the generic parameter type. - assert(func_is_generic); + assert(any_generic_types); const param_inst_idx = fn_zir_info.param_body[arg_idx]; const param_inst = fn_zir.instructions.get(@intFromEnum(param_inst_idx)); switch (param_inst.tag) { @@ -6888,7 +6914,7 @@ fn analyzeCall( } }; const ty_ref = try sema.resolveInlineBody(&generic_block, body, param_inst_idx); - const param_ty = try sema.analyzeAsType(&generic_block, param_src, ty_ref); + const param_ty = try sema.analyzeAsType(&generic_block, param_src, .fn_param_types, ty_ref); if (!param_ty.isValidParamType(zcu)) { const opaque_str = if (param_ty.zigTypeTag(zcu) == .@"opaque") "opaque " else ""; @@ -6906,7 +6932,7 @@ fn analyzeCall( return arg.*; // terminate analysis here } - if (func_is_generic) { + if (any_generic_types) { // We need to put the argument into `generic_inst_map` so that other parameters can refer to it. const param_inst_idx = fn_zir_info.param_body[arg_idx]; const declared_comptime = if (std.math.cast(u5, arg_idx)) |i| func_ty_info.paramIsComptime(i) else false; @@ -6948,7 +6974,7 @@ fn analyzeCall( // calls (where it should be the IES of the instantiation). However, it's how we print this // in error messages. const resolved_ret_ty: Type = ret_ty: { - if (!func_is_generic) break :ret_ty .fromInterned(func_ty_info.return_type); + if (!any_generic_types) break :ret_ty .fromInterned(func_ty_info.return_type); const maybe_poison_bare = if (fn_zir_info.inferred_error_set) maybe_poison: { break :maybe_poison ip.errorUnionPayload(func_ty_info.return_type); @@ -6958,7 +6984,7 @@ fn analyzeCall( // Evaluate the generic return type. As with generic parameters, we switch out `sema.code` and `sema.inst_map`. - assert(func_is_generic); + assert(any_generic_types); const old_code = sema.code; const old_inst_map = sema.inst_map; @@ -6981,7 +7007,7 @@ fn analyzeCall( } else bare: { assert(fn_zir_info.ret_ty_body.len != 0); const ty_ref = try sema.resolveInlineBody(&generic_block, fn_zir_info.ret_ty_body, fn_zir_inst); - break :bare try sema.analyzeAsType(&generic_block, func_ret_ty_src, ty_ref); + break :bare try sema.analyzeAsType(&generic_block, func_ret_ty_src, .fn_ret_ty, ty_ref); }; assert(bare_ty.toIntern() != .generic_poison_type); @@ -7035,7 +7061,7 @@ fn analyzeCall( }); if (func_ty_info.cc == .auto) { switch (sema.owner.unwrap()) { - .@"comptime", .nav_ty, .nav_val, .type_layout, .type_inits, .memoized_state => {}, + .@"comptime", .nav_ty, .nav_val, .type_layout, .struct_defaults, .memoized_state => {}, .func => |owner_func| ip.funcSetHasErrorTrace(io, owner_func, true), } } @@ -7043,7 +7069,7 @@ fn analyzeCall( try sema.validateRuntimeValue(block, args_info.argSrc(block, arg_idx), arg); } const runtime_func: Air.Inst.Ref, const runtime_args: []const Air.Inst.Ref = func: { - if (!func_is_generic) break :func .{ callee, args }; + if (!any_generic_types and !any_comptime_params) break :func .{ callee, args }; // Instantiate the generic function! @@ -7512,9 +7538,7 @@ fn zirArrayInitElemType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compil assert(indexable_ty.isIndexable(zcu)); // validated by a previous instruction const elem_ty = switch (indexable_ty.zigTypeTag(zcu)) { .@"struct" => indexable_ty.fieldType(@intFromEnum(bin.rhs), zcu), - .array, .vector => indexable_ty.childType(zcu), - .pointer => indexable_ty.indexablePtrElem(zcu), - else => unreachable, + else => indexable_ty.indexableElem(zcu), }; return .fromType(elem_ty); } @@ -7835,8 +7859,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr }; return sema.failWithOwnedErrorMsg(block, msg); } - const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); - const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); + const lhs_ty = try sema.analyzeAsType(block, lhs_src, .type, lhs); + const rhs_ty = try sema.analyzeAsType(block, rhs_src, .type, rhs); if (lhs_ty.zigTypeTag(zcu) != .error_set) return sema.fail(block, lhs_src, "expected error set type, found '{f}'", .{lhs_ty.fmt(pt)}); if (rhs_ty.zigTypeTag(zcu) != .error_set) @@ -8017,12 +8041,13 @@ fn zirEnumFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (dest_ty.zigTypeTag(zcu) != .@"enum") { return sema.fail(block, src, "expected enum, found '{f}'", .{dest_ty.fmt(pt)}); } + try sema.ensureLayoutResolved(dest_ty); _ = try sema.checkIntType(block, operand_src, operand_ty); if (try sema.resolveValue(operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum(zcu)) { const int_tag_ty = dest_ty.intTagType(zcu); - if (try sema.intFitsInType(int_val, int_tag_ty, null)) { + if (int_val.intFitsInType(int_tag_ty, null, zcu)) { return Air.internedToRef((try pt.getCoerced(int_val, dest_ty)).toIntern()); } return sema.fail(block, src, "int value '{f}' out of range of non-exhaustive enum '{f}'", .{ @@ -8077,10 +8102,14 @@ fn zirOptionalPayloadPtr( const optional_ptr = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); + const ptr_ty = sema.typeOf(optional_ptr); + assert(ptr_ty.zigTypeTag(sema.pt.zcu) == .pointer); + try sema.ensureLayoutResolved(ptr_ty.childType(sema.pt.zcu)); + return sema.analyzeOptionalPayloadPtr(block, src, optional_ptr, safety_check, false); } -/// MLUGG TODO: pre-resolved child? +/// Asserts that the layout of the pointer child type is already resolved. fn analyzeOptionalPayloadPtr( sema: *Sema, block: *Block, @@ -8095,12 +8124,12 @@ fn analyzeOptionalPayloadPtr( assert(optional_ptr_ty.zigTypeTag(zcu) == .pointer); const opt_type = optional_ptr_ty.childType(zcu); + opt_type.assertHasLayout(zcu); if (opt_type.zigTypeTag(zcu) != .optional) { return sema.failWithExpectedOptionalType(block, src, opt_type); } const child_type = opt_type.optionalChild(zcu); - try sema.ensureLayoutResolved(child_type); const child_pointer = try pt.ptrType(.{ .child = child_type.toIntern(), .flags = .{ @@ -8283,10 +8312,14 @@ fn zirErrUnionPayloadPtr( const operand = try sema.resolveInst(inst_data.operand); const src = block.nodeOffset(inst_data.src_node); + const ptr_ty = sema.typeOf(operand); + assert(ptr_ty.zigTypeTag(sema.pt.zcu) == .pointer); + try sema.ensureLayoutResolved(ptr_ty.childType(sema.pt.zcu)); + return sema.analyzeErrUnionPayloadPtr(block, src, operand, false, false); } -/// MLUGG TODO LAYOUT: already-resolved child? +/// Asserts that the layout of the pointer child type is already resolved. fn analyzeErrUnionPayloadPtr( sema: *Sema, block: *Block, @@ -8307,8 +8340,8 @@ fn analyzeErrUnionPayloadPtr( } const err_union_ty = operand_ty.childType(zcu); + err_union_ty.assertHasLayout(zcu); const payload_ty = err_union_ty.errorUnionPayload(zcu); - try sema.ensureLayoutResolved(payload_ty); const operand_pointer_ty = try pt.ptrType(.{ .child = payload_ty.toIntern(), .flags = .{ @@ -8744,7 +8777,7 @@ fn checkParamTypeCommon( } if (!param_ty.isGenericPoison() and !target_util.fnCallConvAllowsZigTypes(cc) and - !try sema.validateExternType(param_ty, .param_ty)) + !param_ty.validateExtern(.param_ty, zcu)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(param_src, "parameter of type '{f}' not allowed in function with calling convention '{s}'", .{ @@ -8818,7 +8851,7 @@ fn checkReturnTypeAndCallConvCommon( } if (!bare_ret_ty.isGenericPoison() and !target_util.fnCallConvAllowsZigTypes(@"callconv") and - (inferred_error_set or !try sema.validateExternType(bare_ret_ty, .ret_ty))) + (inferred_error_set or !bare_ret_ty.validateExtern(.ret_ty, zcu))) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(ret_ty_src, "return type '{s}{f}' not allowed in function with calling convention '{s}'", .{ @@ -9042,7 +9075,7 @@ fn funcCommon( if (inferred_error_set) { assert(has_body); - return .fromIntern(try ip.getFuncDeclIes(gpa, io, pt.tid, .{ + const func_val: Value = .fromInterned(try ip.getFuncDeclIes(gpa, io, pt.tid, .{ .owner_nav = sema.owner.unwrap().nav_val, .param_types = param_types, @@ -9059,6 +9092,8 @@ fn funcCommon( .lbrace_column = @as(u16, @truncate(src_locs.columns)), .rbrace_column = @as(u16, @truncate(src_locs.columns >> 16)), })); + try sema.ensureLayoutResolved(func_val.typeOf(zcu)); + return .fromValue(func_val); } const func_ty = try ip.getFuncType(gpa, io, pt.tid, .{ @@ -9072,6 +9107,7 @@ fn funcCommon( }); if (has_body) { + try sema.ensureLayoutResolved(.fromInterned(func_ty)); return .fromIntern(try ip.getFuncDecl(gpa, io, pt.tid, .{ .owner_nav = sema.owner.unwrap().nav_val, .ty = func_ty, @@ -9109,7 +9145,7 @@ fn zirParam( } const param_ty_inst = try sema.resolveInlineBody(block, body, inst); - break :ty try sema.analyzeAsType(block, src, param_ty_inst); + break :ty try sema.analyzeAsType(block, src, .fn_param_types, param_ty_inst); }; try block.params.append(sema.arena, .{ @@ -9948,6 +9984,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp err_union_ty.fmt(pt), }); } + try sema.ensureLayoutResolved(err_union_ty); const non_err_cond = if (non_err_case.operand_is_ref) try sema.analyzePtrIsNonErr(block, operand_src, eu_maybe_ptr) @@ -12924,7 +12961,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air. const res_ty: InternPool.Index = b: { if (extra.res_ty == .none) break :b .none; const res_ty_inst = try sema.resolveInst(extra.res_ty); - const res_ty = try sema.analyzeAsType(block, operand_src, res_ty_inst); + const res_ty = try sema.analyzeAsType(block, operand_src, .type, res_ty_inst); if (res_ty.isGenericPoison()) break :b .none; break :b res_ty.toIntern(); }; @@ -15683,8 +15720,8 @@ fn zirCmpEq( return block.addBinOp(air_tag, lhs, rhs); } if (lhs_ty_tag == .type and rhs_ty_tag == .type) { - const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); - const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); + const lhs_as_type = try sema.analyzeAsType(block, lhs_src, .type, lhs); + const rhs_as_type = try sema.analyzeAsType(block, rhs_src, .type, rhs); return if (lhs_as_type.eql(rhs_as_type, zcu) == (op == .eq)) .bool_true else .bool_false; } return sema.analyzeCmp(block, src, lhs, rhs, op, lhs_src, rhs_src, true); @@ -15979,16 +16016,7 @@ fn zirThis( extended: Zir.Inst.Extended.InstData, ) CompileError!Air.Inst.Ref { _ = extended; - const zcu = sema.pt.zcu; - const namespace = zcu.namespacePtr(block.namespace); - - switch (zcu.intern_pool.indexToKey(namespace.owner_type)) { - .opaque_type, .struct_type, .union_type => {}, - // Enum inits are resolved eagerly. TODO MLUGG: honestly i don't think they SHOULD be lol - .enum_type => try sema.ensureFieldInitsResolved(.fromInterned(namespace.owner_type)), - else => unreachable, - } - return .fromIntern(namespace.owner_type); + return .fromIntern(sema.pt.zcu.namespacePtr(block.namespace).owner_type); } fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { @@ -16224,12 +16252,12 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_info_ty = try sema.getBuiltinType(src, .Type); const type_info_tag_ty = type_info_ty.unionTagType(zcu).?; + try sema.ensureLayoutResolved(ty); + if (ty.typeDeclInst(zcu)) |type_decl_inst| { try sema.declareDependency(.{ .namespace = type_decl_inst }); } - try sema.ensureLayoutResolved(ty); - switch (ty.zigTypeTag(zcu)) { .type, .void, @@ -16240,7 +16268,14 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .undefined, .null, .enum_literal, - => |type_info_tag| return unionInitFromEnumTag(sema, block, src, type_info_ty, @intFromEnum(type_info_tag), .void_value), + => |type_info_tag| return .fromValue(try pt.unionValue( + type_info_ty, + Value.uninterpret(type_info_tag, type_info_tag_ty, pt) catch |err| switch (err) { + error.TypeMismatch => @panic("std.builtin is corrupt"), + error.OutOfMemory => |e| return e, + }, + .void, + )), .@"fn" => { const fn_info_ty = try sema.getBuiltinType(src, .@"Type.Fn"); @@ -16248,9 +16283,11 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const func_ty_info = zcu.typeToFunc(ty).?; const param_vals = try sema.arena.alloc(InternPool.Index, func_ty_info.param_types.len); + var func_is_generic = false; for (param_vals, 0..) |*param_val, i| { const param_ty = func_ty_info.param_types.get(ip)[i]; const is_generic = param_ty == .generic_poison_type; + if (is_generic or Type.fromInterned(param_ty).comptimeOnly(zcu)) func_is_generic = true; const param_ty_val = try pt.intern(.{ .opt = .{ .ty = try pt.intern(.{ .opt_type = .type_type }), .val = if (is_generic) .none else param_ty, @@ -16300,18 +16337,21 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai } }); }; + const ret_ty_is_generic = generic: { + const ret_ty: Type = .fromInterned(func_ty_info.return_type); + if (ret_ty.toIntern() == .generic_poison_type) break :generic true; + if (ret_ty.zigTypeTag(zcu) == .error_union) { + if (ret_ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type) { + break :generic true; + } + } + break :generic false; + }; + if (ret_ty_is_generic) func_is_generic = true; + const ret_ty_opt = try pt.intern(.{ .opt = .{ .ty = try pt.intern(.{ .opt_type = .type_type }), - .val = opt_val: { - const ret_ty: Type = .fromInterned(func_ty_info.return_type); - if (ret_ty.toIntern() == .generic_poison_type) break :opt_val .none; - if (ret_ty.zigTypeTag(zcu) == .error_union) { - if (ret_ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type) { - break :opt_val .none; - } - } - break :opt_val ret_ty.toIntern(); - }, + .val = if (ret_ty_is_generic) .none else func_ty_info.return_type, } }); const callconv_ty = try sema.getBuiltinType(src, .CallingConvention); @@ -16320,9 +16360,6 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai error.OutOfMemory => |e| return e, }; - // MLUGG TODO - const func_is_generic = false; - const field_values: [5]InternPool.Index = .{ // calling_convention: CallingConvention, callconv_val.toIntern(), @@ -16837,7 +16874,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .struct_type => ip.loadStructType(ty.toIntern()), else => unreachable, }; - try sema.ensureFieldInitsResolved(ty); // can't do this sooner, since it's not allowed on tuples + try sema.ensureStructDefaultsResolved(ty); // can't do this sooner, since it's not allowed on tuples struct_field_vals = try gpa.alloc(InternPool.Index, struct_type.field_types.len); for (struct_field_vals, 0..) |*field_val, field_index| { @@ -18193,7 +18230,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const elem_ty = blk: { const air_inst = try sema.resolveInst(extra.data.elem_type); - const ty = sema.analyzeAsType(block, elem_ty_src, air_inst) catch |err| { + const ty = sema.analyzeAsType(block, elem_ty_src, .type, air_inst) catch |err| { if (err == error.AnalysisFail and sema.err != null and sema.typeOf(air_inst).isSinglePointer(zcu)) { try sema.errNote(elem_ty_src, sema.err.?, "use '.*' to dereference pointer", .{}); } @@ -18274,7 +18311,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } else if (inst_data.size != .one and elem_ty.zigTypeTag(zcu) == .@"opaque") { return sema.fail(block, elem_ty_src, "indexable pointer to opaque type '{f}' not allowed", .{elem_ty.fmt(pt)}); } else if (inst_data.size == .c) { - if (!try sema.validateExternType(elem_ty, .other)) { + if (!elem_ty.validateExtern(.other, zcu)) { const msg = msg: { const msg = try sema.errMsg(elem_ty_src, "C pointers cannot point to non-C-ABI-compatible type '{f}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); @@ -18288,11 +18325,11 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air } } - if (host_size != 0 and !elem_ty.packable(zcu)) { - return sema.failWithOwnedErrorMsg(block, msg: { + if (host_size != 0) { + if (elem_ty.unpackable(zcu)) |reason| return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(elem_ty_src, "bit-pointer cannot refer to value of type '{f}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); - try sema.explainWhyTypeIsNotPackable(msg, elem_ty_src, elem_ty); + try sema.explainWhyTypeIsUnpackable(msg, elem_ty_src, reason); break :msg msg; }); } @@ -18455,63 +18492,32 @@ fn arrayInitEmpty(sema: *Sema, block: *Block, src: LazySrcLoc, obj_ty: Type) Com fn zirUnionInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const pt = sema.pt; + const zcu = pt.zcu; + const ip = &zcu.intern_pool; const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const ty_src = block.builtinCallArgSrc(inst_data.src_node, 0); const field_src = block.builtinCallArgSrc(inst_data.src_node, 1); - const init_src = block.builtinCallArgSrc(inst_data.src_node, 2); + const payload_src = block.builtinCallArgSrc(inst_data.src_node, 2); const extra = sema.code.extraData(Zir.Inst.UnionInit, inst_data.payload_index).data; const union_ty = try sema.resolveType(block, ty_src, extra.union_type); if (union_ty.zigTypeTag(pt.zcu) != .@"union") { return sema.fail(block, ty_src, "expected union type, found '{f}'", .{union_ty.fmt(pt)}); } + union_ty.assertHasLayout(zcu); // from a previous `field_type_ref` instruction const field_name = try sema.resolveConstStringIntern(block, field_src, extra.field_name, .{ .simple = .union_field_names }); - const init = try sema.resolveInst(extra.init); - return sema.unionInit(block, init, init_src, union_ty, ty_src, field_name, field_src); -} - -fn unionInit( - sema: *Sema, - block: *Block, - uncasted_init: Air.Inst.Ref, - init_src: LazySrcLoc, - union_ty: Type, - union_ty_src: LazySrcLoc, - field_name: InternPool.NullTerminatedString, - field_src: LazySrcLoc, -) CompileError!Air.Inst.Ref { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_src); const field_ty: Type = .fromInterned(zcu.typeToUnion(union_ty).?.field_types.get(ip)[field_index]); - const init = try sema.coerce(block, field_ty, uncasted_init, init_src); - _ = union_ty_src; - return unionInitFromEnumTag(sema, block, init_src, union_ty, field_index, init); -} -fn unionInitFromEnumTag( - sema: *Sema, - block: *Block, - init_src: LazySrcLoc, - union_ty: Type, - field_index: u32, - init: Air.Inst.Ref, -) !Air.Inst.Ref { - const pt = sema.pt; - const zcu = pt.zcu; + const payload = try sema.coerce(block, field_ty, try sema.resolveInst(extra.init), payload_src); - if (try sema.resolveValue(init)) |init_val| { + if (try sema.resolveValue(payload)) |payload_val| { const tag_ty = union_ty.unionTagTypeHypothetical(zcu); const tag_val = try pt.enumValueFieldIndex(tag_ty, field_index); - return Air.internedToRef((try pt.internUnion(.{ - .ty = union_ty.toIntern(), - .tag = tag_val.toIntern(), - .val = init_val.toIntern(), - }))); + return .fromValue(try pt.unionValue(union_ty, tag_val, payload_val)); } - try sema.requireRuntimeBlock(block, init_src, null); - return block.addUnionInit(union_ty, field_index, init); + try sema.requireRuntimeBlock(block, payload_src, null); + return block.addUnionInit(union_ty, field_index, payload); } fn zirStructInit( @@ -18588,9 +18594,6 @@ fn zirStructInit( const field_ty = resolved_ty.fieldType(field_index, zcu); field_inits[field_index] = try sema.coerce(block, field_ty, uncoerced_init, field_src); if (resolved_ty.structFieldIsComptime(field_index, zcu)) { - if (!resolved_ty.isTuple(zcu)) { - try sema.ensureFieldInitsResolved(resolved_ty); - } const default_value = (try resolved_ty.structFieldValueComptime(pt, field_index)).?; const init_val = (try sema.resolveValue(field_inits[field_index])) orelse { return sema.failWithNeededComptime(block, field_src, .{ .simple = .stored_to_comptime_field }); @@ -18744,7 +18747,12 @@ fn finishStructInit( continue; } - try sema.ensureFieldInitsResolved(struct_ty); + if (struct_type.field_is_comptime_bits.get(ip, i)) { + field_inits[i] = .fromIntern(struct_type.field_defaults.get(ip)[i]); + continue; + } + + try sema.ensureStructDefaultsResolved(struct_ty); const field_default: InternPool.Index = d: { if (struct_type.field_defaults.len == 0) break :d .none; @@ -18935,55 +18943,54 @@ fn structInitAnon( break :hash hasher.final(); }; const tracked_inst = try block.trackZir(inst); - const struct_ty: Type = switch (try ip.getStructType(gpa, io, pt.tid, .{ + const struct_ty: Type = switch (try ip.getReifiedStructType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .type_hash = type_hash, .fields_len = extra_data.fields_len, .layout = .auto, - .explicit_packed_backing_type = .none, .any_comptime_fields = any_values, .any_field_defaults = any_values, .any_field_aligns = false, - .key = .{ .reified = .{ - .zir_index = tracked_inst, - .type_hash = type_hash, - } }, + .packed_backing_int_type = .none, })) { + .existing => |ty| .fromInterned(ty), .wip => |wip| ty: { errdefer wip.cancel(ip, pt.tid); - // MLUGG TODO obvs this sux - const anon_prefix = (try sema.createTypeName(block, .anon, "struct", inst)).anon_prefix; - wip.setName(ip, try ip.getOrPutStringFmt(gpa, io, pt.tid, "{s}_{d}", .{ anon_prefix, @intFromEnum(wip.index) }, .no_embedded_nulls), .none); + try sema.setTypeName(block, &wip, .anon, "struct", inst); - const struct_type = ip.loadStructType(wip.index); - - for (names, values) |name, init_val| { - assert(wip.nextField(ip, name, init_val != .none) == null); // AstGen validated no duplicates for us + // Reified structs have field information populated immediately. + @memcpy(wip.field_names.get(ip), names); + @memcpy(wip.field_types.get(ip), types); + if (any_values) { + @memcpy(wip.field_values.get(ip), values); + @memset(wip.field_is_comptime_bits.getAll(ip), 0); + for (values, 0..) |val, field_index| { + if (val == .none) continue; + const bit_bag_index = field_index / 32; + const mask = @as(u32, 1) << @intCast(field_index % 32); + wip.field_is_comptime_bits.getAll(ip)[bit_bag_index] |= mask; + } } - // Populating these means the type is already resolved; we don't need to add it to `zcu.outdated` or anything. - // That's important because type resolution relies on types being declared. - @memcpy(struct_type.field_types.get(ip), types); - @memcpy(struct_type.field_defaults.get(ip), if (any_values) values else @as([]const InternPool.Index, &.{})); - - try type_resolution.finishStructLayout(sema, block, src, wip.index, &struct_type); - const new_namespace_index = try pt.createNamespace(.{ .parent = block.namespace.toOptional(), .owner_type = wip.index, .file_scope = block.getFileScopeIndex(zcu), .generation = zcu.generation, }); - codegen_type: { - if (zcu.comp.config.use_llvm) break :codegen_type; - if (block.ownerModule().strip) break :codegen_type; - zcu.comp.link_prog_node.increaseEstimatedTotalItems(1); - try zcu.comp.queueJob(.{ .link_type = wip.index }); - } if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entries + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + break :ty .fromInterned(wip.finish(ip, new_namespace_index)); }, - .existing => |ty| .fromInterned(ty), }; try sema.addTypeReferenceEntry(src, struct_ty); + try sema.ensureLayoutResolved(struct_ty); _ = opt_runtime_index orelse { const struct_val = try pt.aggregateValue(struct_ty, values); @@ -19338,6 +19345,7 @@ fn fieldType( const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; + aggregate_ty.assertHasLayout(zcu); var cur_ty = aggregate_ty; while (true) { switch (cur_ty.zigTypeTag(zcu)) { @@ -19397,7 +19405,7 @@ fn getErrorReturnTrace(sema: *Sema, block: *Block) CompileError!Air.Inst.Ref { .func => |func| if (ip.funcAnalysisUnordered(func).has_error_trace and block.ownerModule().error_tracing) { return block.addTy(.err_return_trace, opt_ptr_stack_trace_ty); }, - .@"comptime", .nav_ty, .nav_val, .type_layout, .type_inits, .memoized_state => {}, + .@"comptime", .nav_ty, .nav_val, .type_layout, .struct_defaults, .memoized_state => {}, } return Air.internedToRef(try pt.intern(.{ .opt = .{ .ty = opt_ptr_stack_trace_ty.toIntern(), @@ -19823,7 +19831,7 @@ fn zirReifyPointer( else => {}, } - if (size == .c and !try sema.validateExternType(elem_ty, .other)) { + if (size == .c and !elem_ty.validateExtern(.other, zcu)) { return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(src, "C pointers cannot point to non-C-ABI-compatible type '{f}'", .{elem_ty.fmt(pt)}); errdefer msg.destroy(gpa); @@ -19988,6 +19996,7 @@ fn zirReifyStruct( const name_strategy: Zir.Inst.NameStrategy = @enumFromInt(extended.small); const extra = sema.code.extraData(Zir.Inst.ReifyStruct, extended.operand).data; const tracked_inst = try block.trackZir(inst); + const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .nodeOffset(.zero), @@ -20039,7 +20048,7 @@ fn zirReifyStruct( const backing_int_ty_uncoerced = try sema.resolveInst(extra.backing_ty); const backing_int_ty_coerced = try sema.coerce(block, .optional_type, backing_int_ty_uncoerced, backing_ty_src); - const backing_int_ty_val = try sema.resolveConstDefinedValue(block, backing_ty_src, backing_int_ty_coerced, .{ .simple = .type }); + const backing_int_ty_val = try sema.resolveConstDefinedValue(block, backing_ty_src, backing_int_ty_coerced, .{ .simple = .packed_struct_backing_int_type }); const field_names_uncoerced = try sema.resolveInst(extra.field_names); const field_names_coerced = try sema.coerce(block, .slice_const_slice_const_u8, field_names_uncoerced, field_names_src); @@ -20079,19 +20088,30 @@ fn zirReifyStruct( return sema.failWithUseOfUndef(block, backing_ty_src, null); } - // The validation work here is non-trivial, and it's possible the type already exists. - // So in this first pass, let's just construct a hash to optimize for this case. If the - // inputs turn out to be invalid, we can cancel the WIP type later. + // Most validation of this type happens during type resolution. We basically need to do the work + // which AstGen would normally do. An exception is checking for duplicate field names, which is + // handled by type resolution---it just simplifies some logic a little. + + // As well as validation, we're going to gather some information about the fields, and construct + // a hash representing the inputs for deduplication purposes. var any_comptime_fields = false; - var any_default_inits = false; - var any_aligned_fields = false; + var any_field_defaults = false; + var any_field_aligns = false; - // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, layout); std.hash.autoHash(&hasher, backing_int_ty_val); + + const backing_int_ty: ?Type = if (backing_int_ty_val.optionalValue(zcu)) |backing| ty: { + switch (layout) { + .auto, .@"extern" => return sema.fail(block, backing_ty_src, "non-packed struct does not support backing integer type", .{}), + .@"packed" => {}, + } + break :ty backing.toType(); + } else null; + // The field *type* array has already been deduplicated for us thanks to the InternPool! std.hash.autoHash(&hasher, field_types_arr); // However, for field names and attributes, we need to actually iterate the individual fields, @@ -20126,201 +20146,126 @@ fn zirReifyStruct( field_attrs_src, .{ .simple = .struct_field_default_value }, ); + if (deref_val.canMutateComptimeVarState(zcu)) { + return sema.failWithContainsReferenceToComptimeVar(block, field_attrs_src, field_name, "field default value", deref_val); + } + any_field_defaults = true; break :d deref_val.toIntern(); }; + if (field_attr_comptime.toBool()) { + if (field_default == .none) { + return sema.fail(block, field_attrs_src, "comptime field without default initialization value", .{}); + } + if (layout != .auto) { + return sema.fail(block, field_attrs_src, "{t} struct fields cannot be marked comptime", .{layout}); + } + any_comptime_fields = true; + } + + if (field_attr_align.optionalValue(zcu)) |align_val| { + if (layout == .@"packed") { + return sema.fail(block, field_attrs_src, "packed struct fields cannot be aligned", .{}); + } + // Trigger a compile error if the alignment is invalid. + _ = try sema.validateAlign(block, field_attrs_src, align_val.toUnsignedInt(zcu)); + any_field_aligns = true; + } + std.hash.autoHash(&hasher, .{ field_name, field_attr_comptime, field_attr_align, field_default, }); - - if (field_attr_comptime.toBool()) any_comptime_fields = true; - if (field_attr_align.optionalValue(zcu)) |_| any_aligned_fields = true; - if (field_default != .none) any_default_inits = true; } - // Some basic validation to avoid a bogus `getStructType` call... - const backing_int_ty: ?Type = if (backing_int_ty_val.optionalValue(zcu)) |backing| ty: { - switch (layout) { - .auto, .@"extern" => return sema.fail(block, backing_ty_src, "non-packed struct does not support backing integer type", .{}), - .@"packed" => {}, - } - break :ty backing.toType(); - } else null; - if (any_aligned_fields and layout == .@"packed") { - return sema.fail(block, field_attrs_src, "packed struct fields cannot be aligned", .{}); - } - if (any_comptime_fields and layout != .auto) { - return sema.fail(block, field_attrs_src, "{t} struct fields cannot be marked comptime", .{layout}); - } - - const wip_ty = switch (try ip.getStructType(gpa, io, pt.tid, .{ + switch (try ip.getReifiedStructType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .type_hash = hasher.final(), .fields_len = @intCast(fields_len), .layout = layout, - .explicit_packed_backing_type = if (backing_int_ty) |t| t.toIntern() else .none, .any_comptime_fields = any_comptime_fields, - .any_field_defaults = any_default_inits, - .any_field_aligns = any_aligned_fields, - .key = .{ .reified = .{ - .zir_index = tracked_inst, - .type_hash = hasher.final(), - } }, + .any_field_defaults = any_field_defaults, + .any_field_aligns = any_field_aligns, + .packed_backing_int_type = if (backing_int_ty) |ty| ty.toIntern() else .none, })) { - .wip => |wip| wip, .existing => |ty| { try sema.addTypeReferenceEntry(src, .fromInterned(ty)); return .fromIntern(ty); }, - }; - errdefer wip_ty.cancel(ip, pt.tid); + .wip => |wip| { + errdefer wip.cancel(ip, pt.tid); + try sema.setTypeName(block, &wip, name_strategy, "struct", inst); + for (0..fields_len) |field_idx| { + const field_name_val = try field_names_arr.elemValue(pt, field_idx); + const field_attrs_val = try field_attrs_arr.elemValue(pt, field_idx); - _ = try (try sema.createTypeName( - block, - name_strategy, - "struct", - inst, - )).apply(&wip_ty, pt); + // No source location or reason; first loop checked this is valid. + const field_name = try sema.sliceToIpString(block, .unneeded, field_name_val, undefined); + wip.field_names.get(ip)[field_idx] = field_name; - const wip_struct_type = ip.loadStructType(wip_ty.index); + const field_ty = (try field_types_arr.elemValue(pt, field_idx)).toType(); + wip.field_types.get(ip)[field_idx] = field_ty.toIntern(); - for (0..fields_len) |field_idx| { - const field_name_val = try field_names_arr.elemValue(pt, field_idx); - const field_attrs_val = try field_attrs_arr.elemValue(pt, field_idx); + const field_attr_comptime = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( + std.builtin.Type.StructField.Attributes, + "comptime", + ).?); + const field_attr_align = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( + std.builtin.Type.StructField.Attributes, + "align", + ).?); + const field_attr_default_value_ptr = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( + std.builtin.Type.StructField.Attributes, + "default_value_ptr", + ).?); - // Don't pass a reason; first loop acts as a check that this is valid. - const field_name = try sema.sliceToIpString(block, field_names_src, field_name_val, undefined); - const field_ty = (try field_types_arr.elemValue(pt, field_idx)).toType(); - const field_attr_comptime = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( - std.builtin.Type.StructField.Attributes, - "comptime", - ).?); - const field_attr_align = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( - std.builtin.Type.StructField.Attributes, - "align", - ).?); - const field_attr_default_value_ptr = try field_attrs_val.fieldValue(pt, std.meta.fieldIndex( - std.builtin.Type.StructField.Attributes, - "default_value_ptr", - ).?); + if (field_attr_comptime.toBool()) { + const bit_bag_index = field_idx / 32; + const mask = @as(u32, 1) << @intCast(field_idx % 32); + wip.field_is_comptime_bits.getAll(ip)[bit_bag_index] |= mask; + } - if (wip_ty.nextField(ip, field_name, field_attr_comptime.toBool())) |prev_index| { - _ = prev_index; // TODO: better source location - return sema.fail(block, field_names_src, "duplicate struct field name {f}", .{field_name.fmt(ip)}); - } + if (field_attr_default_value_ptr.optionalValue(zcu)) |ptr_val| { + const ptr_ty = try pt.singleConstPtrType(field_ty); + // No source location; first loop checked this is valid. + const deref_val = (try sema.pointerDeref(block, .unneeded, ptr_val, ptr_ty)).?; + wip.field_values.get(ip)[field_idx] = deref_val.toIntern(); + } else if (any_field_defaults) { + wip.field_values.get(ip)[field_idx] = .none; + } - const field_default: InternPool.Index = d: { - const ptr_val = field_attr_default_value_ptr.optionalValue(zcu) orelse break :d .none; - assert(any_default_inits); - const ptr_ty = try pt.singleConstPtrType(field_ty); - // The first loop checked that this is comptime-dereferencable. - const deref_val = (try sema.pointerDeref(block, field_attrs_src, ptr_val, ptr_ty)).?; - // ...but we've not checked this yet! - if (deref_val.canMutateComptimeVarState(zcu)) { - return sema.failWithContainsReferenceToComptimeVar(block, field_attrs_src, field_name, "field default value", deref_val); + if (field_attr_align.optionalValue(zcu)) |field_align_val| { + const bytes = field_align_val.toUnsignedInt(zcu); + // No source location; first loop checked this is valid. + const a = try sema.validateAlign(block, .unneeded, bytes); + wip.field_aligns.get(ip)[field_idx] = a; + } else if (any_field_aligns) { + wip.field_aligns.get(ip)[field_idx] = .none; + } } - break :d deref_val.toIntern(); - }; - if (field_attr_comptime.toBool() and field_default == .none) { - return sema.fail(block, field_attrs_src, "comptime field without default initialization value", .{}); - } + const new_namespace_index = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + try sema.addTypeReferenceEntry(src, .fromInterned(wip.index)); + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); - switch (field_ty.zigTypeTag(zcu)) { - .@"opaque" => return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "opaque types have unknown size and therefore cannot be directly embedded in structs", .{}); - errdefer msg.destroy(gpa); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }), - .noreturn => return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "struct fields cannot be 'noreturn'", .{}); - errdefer msg.destroy(gpa); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }), - else => {}, - } + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entries + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); - switch (layout) { - .auto => {}, - .@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "extern structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - try sema.explainWhyTypeIsNotExtern(msg, field_types_src, field_ty, .struct_field); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - }, - .@"packed" => if (!field_ty.packable(zcu)) { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "packed structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - try sema.explainWhyTypeIsNotPackable(msg, field_types_src, field_ty); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - }, - } - - wip_struct_type.field_types.get(ip)[field_idx] = field_ty.toIntern(); - if (field_default != .none) { - wip_struct_type.field_defaults.get(ip)[field_idx] = field_default; - } - - if (field_attr_align.optionalValue(zcu)) |field_align_val| { - assert(layout != .@"packed"); - const bytes = field_align_val.toUnsignedInt(zcu); - const a = try sema.validateAlign(block, field_attrs_src, bytes); - wip_struct_type.field_aligns.get(ip)[field_idx] = a; - } else if (any_aligned_fields) { - assert(layout != .@"packed"); - wip_struct_type.field_aligns.get(ip)[field_idx] = .none; - } + return .fromIntern(wip.finish(ip, new_namespace_index)); + }, } - - if (layout == .@"packed") { - var field_bits: u64 = 0; - for (0..fields_len) |field_idx| { - const field_ty: Type = .fromInterned(wip_struct_type.field_types.get(ip)[field_idx]); - try sema.ensureLayoutResolved(field_ty); - field_bits += field_ty.bitSize(zcu); - } - try type_resolution.resolvePackedStructBackingInt( - sema, - block, - field_bits, - .fromInterned(wip_ty.index), - &wip_struct_type, - ); - } else { - try type_resolution.finishStructLayout( - sema, - block, - src, - wip_ty.index, - &wip_struct_type, - ); - } - - const new_namespace_index = try pt.createNamespace(.{ - .parent = block.namespace.toOptional(), - .owner_type = wip_ty.index, - .file_scope = block.getFileScopeIndex(zcu), - .generation = zcu.generation, - }); - - codegen_type: { - if (zcu.comp.config.use_llvm) break :codegen_type; - if (block.ownerModule().strip) break :codegen_type; - zcu.comp.link_prog_node.increaseEstimatedTotalItems(1); - try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); - } - try sema.addTypeReferenceEntry(src, .fromInterned(wip_ty.index)); - if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); - return .fromIntern(wip_ty.finish(ip, new_namespace_index)); } fn zirReifyUnion( @@ -20390,7 +20335,10 @@ fn zirReifyUnion( const arg_ty_uncoerced = try sema.resolveInst(extra.arg_ty); const arg_ty_coerced = try sema.coerce(block, .optional_type, arg_ty_uncoerced, arg_ty_src); - const arg_ty_val = try sema.resolveConstDefinedValue(block, arg_ty_src, arg_ty_coerced, .{ .simple = .type }); + const arg_ty_val = try sema.resolveConstDefinedValue(block, arg_ty_src, arg_ty_coerced, switch (layout) { + .@"packed" => .{ .simple = .packed_union_backing_int_type }, + .auto, .@"extern" => .{ .simple = .union_enum_tag_type }, + }); const field_names_uncoerced = try sema.resolveInst(extra.field_names); const field_names_coerced = try sema.coerce(block, .slice_const_slice_const_u8, field_names_uncoerced, field_names_src); @@ -20430,17 +20378,29 @@ fn zirReifyUnion( return sema.failWithUseOfUndef(block, arg_ty_src, null); } - // The validation work here is non-trivial, and it's possible the type already exists. - // So in this first pass, let's just construct a hash to optimize for this case. If the - // inputs turn out to be invalid, we can cancel the WIP type later. + // Most validation of this type happens during type resolution. We basically need to do the work + // which AstGen would normally do. An exception is checking for duplicate field names, which is + // handled by type resolution---it just simplifies some logic a little. - var any_aligned_fields = false; + // As well as validation, we're going to gather some information about the fields, and construct + // a hash representing the inputs for deduplication purposes. + + var any_field_aligns = false; - // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, layout); std.hash.autoHash(&hasher, arg_ty_val); + + const explicit_tag_ty: ?Type, const explicit_packed_backing_type: ?Type = ty: { + const arg_ty = arg_ty_val.optionalValue(zcu) orelse break :ty .{ null, null }; + switch (layout) { + .@"extern" => return sema.fail(block, arg_ty_src, "extern union does not support enum tag type", .{}), + .@"packed" => break :ty .{ null, arg_ty.toType() }, + .auto => break :ty .{ arg_ty.toType(), null }, + } + }; + // `field_types_arr` and `field_attrs_arr` are already deduplicated by the InternPool! std.hash.autoHash(&hasher, field_types_arr); std.hash.autoHash(&hasher, field_attrs_arr); @@ -20457,249 +20417,84 @@ fn zirReifyUnion( try field_attrs_arr.elemValue(pt, field_idx), std.builtin.Type.UnionField.Attributes, ); - if (field_attrs.@"align" != null) { - any_aligned_fields = true; + if (field_attrs.@"align") |bytes| { + if (layout == .@"packed") { + return sema.fail(block, field_attrs_src, "packed union fields cannot be aligned", .{}); + } + // Trigger a compile error if the alignment is invalid. + _ = try sema.validateAlign(block, field_attrs_src, bytes); + any_field_aligns = true; } } - // Some basic validation to avoid a bogus `getUnionType` call... - const explicit_tag_ty: ?Type, const explicit_packed_backing_type: ?Type = ty: { - const arg_ty = arg_ty_val.optionalValue(zcu) orelse break :ty .{ null, null }; - switch (layout) { - .@"extern" => return sema.fail(block, arg_ty_src, "extern union does not support enum tag type", .{}), - .@"packed" => break :ty .{ null, arg_ty.toType() }, - .auto => break :ty .{ arg_ty.toType(), null }, - } - }; - if (any_aligned_fields and layout == .@"packed") { - return sema.fail(block, field_attrs_src, "packed union fields cannot be aligned", .{}); - } - - const wip_ty = switch (try ip.getUnionType(gpa, io, pt.tid, .{ + switch (try ip.getReifiedUnionType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .type_hash = hasher.final(), .fields_len = @intCast(fields_len), .layout = layout, - .explicit_packed_backing_type = if (explicit_packed_backing_type) |t| t.toIntern() else .none, + .any_field_aligns = any_field_aligns, .runtime_tag = rt: { if (explicit_tag_ty != null) break :rt .tagged; if (layout == .auto and block.wantSafeTypes()) break :rt .safety; break :rt .none; }, - .have_explicit_enum_tag = explicit_tag_ty != null, - .any_field_aligns = any_aligned_fields, - .key = .{ .reified = .{ - .zir_index = tracked_inst, - .type_hash = hasher.final(), - } }, + .enum_tag_type = if (explicit_tag_ty) |ty| ty.toIntern() else .none, + .packed_backing_int_type = if (explicit_packed_backing_type) |ty| ty.toIntern() else .none, })) { - .wip => |wip| wip, .existing => |ty| { try sema.addTypeReferenceEntry(src, .fromInterned(ty)); return .fromIntern(ty); }, - }; - errdefer wip_ty.cancel(ip, pt.tid); + .wip => |wip| { + errdefer wip.cancel(ip, pt.tid); + try sema.setTypeName(block, &wip, name_strategy, "union", inst); - const type_name = try (try sema.createTypeName( - block, - name_strategy, - "union", - inst, - )).apply(&wip_ty, pt); + for (0..fields_len) |field_idx| { + const field_name_val = try field_names_arr.elemValue(pt, field_idx); + // No source location or reason; first loop checked this is valid. + const field_name = try sema.sliceToIpString(block, .unneeded, field_name_val, undefined); + wip.field_names.get(ip)[field_idx] = field_name; - const loaded_union = ip.loadUnionType(wip_ty.index); + const field_ty = (try field_types_arr.elemValue(pt, field_idx)).toType(); + wip.field_types.get(ip)[field_idx] = field_ty.toIntern(); - const generated_tag_ty: InternPool.Index = if (explicit_tag_ty) |enum_tag_ty| generated_tag: { - if (enum_tag_ty.zigTypeTag(zcu) != .@"enum") { - return sema.fail(block, arg_ty_src, "tag type must be an enum type", .{}); - } - - const tag_ty_fields_len = enum_tag_ty.enumFieldCount(zcu); - - for (0..fields_len) |field_idx| { - const field_name_val = try field_names_arr.elemValue(pt, field_idx); - // Don't pass a reason; first loop acts as a check that this is valid. - const field_name = try sema.sliceToIpString(block, field_names_src, field_name_val, undefined); - - if (field_idx >= tag_ty_fields_len) { - return sema.fail(block, field_names_src, "no field named '{f}' in enum '{f}'", .{ - field_name.fmt(ip), enum_tag_ty.fmt(pt), - }); + // No source location; first loop checked this is valid. + const field_attrs = try sema.interpretBuiltinType( + block, + .unneeded, + try field_attrs_arr.elemValue(pt, field_idx), + std.builtin.Type.UnionField.Attributes, + ); + if (field_attrs.@"align") |bytes| { + // No source location; first loop checked this is valid. + const a = try sema.validateAlign(block, .unneeded, bytes); + wip.field_aligns.get(ip)[field_idx] = a; + } else if (any_field_aligns) { + wip.field_aligns.get(ip)[field_idx] = .none; + } } - const enum_field_name = enum_tag_ty.enumFieldName(field_idx, zcu); - if (enum_field_name != field_name) { - return sema.fail(block, field_names_src, "union field name '{f}' does not match enum field name '{f}'", .{ - field_name.fmt(ip), enum_field_name.fmt(ip), - }); - } - } - if (tag_ty_fields_len > fields_len) return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_names_src, "{d} enum fields missing in union", .{ - tag_ty_fields_len - fields_len, + const new_namespace_index = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, }); - errdefer msg.destroy(gpa); - for (fields_len..tag_ty_fields_len) |enum_field_idx| { - try sema.addFieldErrNote(enum_tag_ty, enum_field_idx, msg, "field '{f}' missing, declared here", .{ - enum_tag_ty.enumFieldName(enum_field_idx, zcu).fmt(ip), - }); - } - try sema.addDeclaredHereNote(msg, enum_tag_ty); - break :msg msg; - }); - wip_ty.setTagType(ip, enum_tag_ty.toIntern()); - break :generated_tag .none; - } else generated_tag: { - // Generate the union's hypothetical tag type. - const wip_tag_ty = switch (try ip.getEnumType(gpa, io, pt.tid, .{ - .fields_len = @intCast(fields_len), - .explicit_int_tag_type = .none, - .nonexhaustive = false, - .key = .{ .generated_union_tag = wip_ty.index }, - })) { - .existing => unreachable, // enum type is keyed on this union type which we're only just creating - .wip => |wip_tag_ty| wip_tag_ty, - }; - errdefer wip_tag_ty.cancel(ip, pt.tid); + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + try sema.addTypeReferenceEntry(src, .fromInterned(wip.index)); - // Set its name based on the union's name - _ = wip_tag_ty.setName(ip, try ip.getOrPutStringFmt( - gpa, - io, - pt.tid, - "@typeInfo({f}).@\"union\".tag_type.?", - .{type_name.fmt(ip)}, - .no_embedded_nulls, - ), .none); + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); - // Populate its fields (and report any duplicates) - for (0..fields_len) |field_idx| { - const field_name_val = try field_names_arr.elemValue(pt, field_idx); - // Don't pass a reason; first loop acts as a check that this is valid. - const field_name = try sema.sliceToIpString(block, field_names_src, field_name_val, undefined); - if (wip_tag_ty.nextField(ip, field_name, false)) |prev_field_idx| return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "duplicate union field '{f}' at index '{d}", .{ field_name.fmt(ip), field_idx }); - errdefer msg.destroy(gpa); - try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_idx}); - break :msg msg; - }); - } + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entry + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); - // Populate the enum tag type's *integer* tag type - wip_tag_ty.setTagType(ip, int_tag_ty: { - // Infer the int tag type from the field count - const bits = Type.smallestUnsignedBits(fields_len -| 1); - break :int_tag_ty (try pt.intType(.unsigned, bits)).toIntern(); - }); - - // Lastly, it needs a dummy namespace - const enum_tag_type_namespace = try pt.createNamespace(.{ - .parent = block.namespace.toOptional(), - .owner_type = wip_tag_ty.index, - .file_scope = block.getFileScopeIndex(zcu), - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(enum_tag_type_namespace); - - wip_ty.setTagType(ip, wip_tag_ty.index); - - break :generated_tag wip_tag_ty.finish(ip, enum_tag_type_namespace); - }; - // If we fail to create the union type, we must delete the generated enum tag type, since it - // would hold a reference to the deleted union. - errdefer if (generated_tag_ty != .none) ip.remove(pt.tid, generated_tag_ty); - - for (0..fields_len) |field_idx| { - const field_ty = (try field_types_arr.elemValue(pt, field_idx)).toType(); - const field_attrs = try sema.interpretBuiltinType( - block, - field_attrs_src, - try field_attrs_arr.elemValue(pt, field_idx), - std.builtin.Type.UnionField.Attributes, - ); - - if (field_ty.zigTypeTag(zcu) == .@"opaque") { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "opaque types have unknown size and therefore cannot be directly embedded in unions", .{}); - errdefer msg.destroy(gpa); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - } - - switch (layout) { - .auto => {}, - .@"extern" => if (!try sema.validateExternType(field_ty, .union_field)) { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "extern unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - - try sema.explainWhyTypeIsNotExtern(msg, field_types_src, field_ty, .union_field); - - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - }, - .@"packed" => if (!field_ty.packable(zcu)) { - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_types_src, "packed unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - - try sema.explainWhyTypeIsNotPackable(msg, field_types_src, field_ty); - - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - }, - } - - loaded_union.field_types.get(ip)[field_idx] = field_ty.toIntern(); - if (field_attrs.@"align") |bytes| { - assert(layout != .@"packed"); - const a = try sema.validateAlign(block, field_attrs_src, bytes); - loaded_union.field_aligns.get(ip)[field_idx] = a; - } else if (any_aligned_fields) { - assert(layout != .@"packed"); - loaded_union.field_aligns.get(ip)[field_idx] = .none; - } + return .fromIntern(wip.finish(ip, new_namespace_index)); + }, } - - if (layout == .@"packed") { - try type_resolution.resolvePackedUnionBackingInt( - sema, - block, - .fromInterned(wip_ty.index), - &loaded_union, - true, - ); - } else { - try type_resolution.finishUnionLayout( - sema, - block, - src, - wip_ty.index, - &loaded_union, - explicit_tag_ty orelse .fromInterned(generated_tag_ty), - ); - } - - const new_namespace_index = try pt.createNamespace(.{ - .parent = block.namespace.toOptional(), - .owner_type = wip_ty.index, - .file_scope = block.getFileScopeIndex(zcu), - .generation = zcu.generation, - }); - - codegen_type: { - if (zcu.comp.config.use_llvm) break :codegen_type; - if (block.ownerModule().strip) break :codegen_type; - zcu.comp.link_prog_node.increaseEstimatedTotalItems(1); - try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); - } - try sema.addTypeReferenceEntry(src, .fromInterned(wip_ty.index)); - if (zcu.comp.debugIncremental()) { - try zcu.incremental_debug_state.newType(zcu, wip_ty.index); - } - return .fromIntern(wip_ty.finish(ip, new_namespace_index)); } fn zirReifyEnum( @@ -20754,10 +20549,10 @@ fn zirReifyEnum( const enum_mode_ty = try sema.getBuiltinType(mode_src, .@"Type.Enum.Mode"); - const tag_ty = try sema.resolveType(block, tag_ty_src, extra.tag_ty); - if (tag_ty.zigTypeTag(zcu) != .int) { - return sema.fail(block, tag_ty_src, "tag type must be an integer type", .{}); - } + const tag_ty_uncoerced = try sema.resolveInst(extra.tag_ty); + const tag_ty_coerced = try sema.coerce(block, .type, tag_ty_uncoerced, tag_ty_src); + const tag_ty_val = try sema.resolveConstDefinedValue(block, tag_ty_src, tag_ty_coerced, .{ .simple = .enum_int_tag_type }); + const tag_ty = tag_ty_val.toType(); const mode_uncoerced = try sema.resolveInst(extra.mode); const mode_coerced = try sema.coerce(block, enum_mode_ty, mode_uncoerced, mode_src); @@ -20790,11 +20585,13 @@ fn zirReifyEnum( } // We don't need to check `field_names_arr`, because `sliceToIpString` will check that for us. - // The validation work here is non-trivial, and it's possible the type already exists. - // So in this first pass, let's just construct a hash to optimize for this case. If the - // inputs turn out to be invalid, we can cancel the WIP type later. + // Most validation of this type happens during type resolution. We basically need to do the work + // which AstGen would normally do. An exception is checking for duplicate field names, which is + // handled by type resolution---it just simplifies some logic a little. + + // As well as validation, we're going to gather some information about the fields, and construct + // a hash representing the inputs for deduplication purposes. - // For deduplication purposes, we must create a hash including all details of this type. // TODO: use a longer hash! var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, tag_ty.toIntern()); @@ -20810,85 +20607,55 @@ fn zirReifyEnum( std.hash.autoHash(&hasher, field_name); } - const wip_ty = switch (try ip.getEnumType(gpa, io, pt.tid, .{ + switch (try ip.getReifiedEnumType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .type_hash = hasher.final(), .fields_len = @intCast(fields_len), - .explicit_int_tag_type = tag_ty.toIntern(), .nonexhaustive = nonexhaustive, - .key = .{ .reified = .{ - .zir_index = tracked_inst, - .type_hash = hasher.final(), - } }, + .int_tag_type = tag_ty.toIntern(), })) { - .wip => |wip| wip, .existing => |ty| { try sema.addTypeReferenceEntry(src, .fromInterned(ty)); return .fromIntern(ty); }, - }; - errdefer wip_ty.cancel(ip, pt.tid); + .wip => |wip| { + errdefer wip.cancel(ip, pt.tid); - _ = try (try sema.createTypeName( - block, - name_strategy, - "enum", - inst, - )).apply(&wip_ty, pt); + try sema.setTypeName(block, &wip, name_strategy, "enum", inst); - for (0..fields_len) |field_idx| { - const field_name_val = try field_names_arr.elemValue(pt, field_idx); - // Don't pass a reason; first loop acts as a check that this is valid. - const field_name = try sema.sliceToIpString(block, field_names_src, field_name_val, undefined); - if (wip_ty.nextField(ip, field_name, false)) |prev_field_idx| return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(field_names_src, "duplicate enum field '{f}' at index '{d}'", .{ field_name.fmt(ip), field_idx }); - errdefer msg.destroy(gpa); - try sema.errNote(field_names_src, msg, "previous field at index '{d}'", .{prev_field_idx}); - break :msg msg; - }); - } + // Populate field names and values. Duplicate checking will be handled by type resolution. + for (0..fields_len) |field_index| { + const field_name_val = try field_names_arr.elemValue(pt, field_index); + // No source location or reason; first loop checked this is valid. + const field_name = try sema.sliceToIpString(block, .unneeded, field_name_val, undefined); + wip.field_names.get(ip)[field_index] = field_name; - const enum_obj = ip.loadEnumType(wip_ty.index); - const field_value_map = enum_obj.field_value_map.unwrap().?; - for (0..fields_len) |field_idx| { - const field_val = try field_values_arr.elemValue(pt, field_idx); - const field_values = enum_obj.field_values.get(ip); - field_values[field_idx] = field_val.toIntern(); - const adapter: InternPool.Index.Adapter = .{ .indexes = field_values[0..field_idx] }; - const gop = field_value_map.get(ip).getOrPutAssumeCapacityAdapted(field_val.toIntern(), adapter); - if (gop.found_existing) return sema.failWithOwnedErrorMsg(block, msg: { - const field_names = enum_obj.field_names.get(ip); - const this_field_name = field_names[field_idx]; - const prev_field_name = field_names[gop.index]; - const msg = try sema.errMsg(field_names_src, "duplicate enum tag value '{f}' in field '{f}'", .{ - field_val.fmtValueSema(pt, sema), - this_field_name.fmt(ip), + const field_val = try field_values_arr.elemValue(pt, field_index); + wip.field_values.get(ip)[field_index] = field_val.toIntern(); + } + + const new_namespace_index = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, }); - errdefer msg.destroy(gpa); - try sema.errNote(field_names_src, msg, "previous usage in field '{f}'", .{prev_field_name.fmt(ip)}); - break :msg msg; - }); + + try sema.addTypeReferenceEntry(src, .fromInterned(wip.index)); + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); + + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entry + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + + return .fromIntern(wip.finish(ip, new_namespace_index)); + }, } - - if (nonexhaustive and fields_len > 1 and std.math.log2_int(u64, fields_len) == tag_ty.bitSize(zcu)) { - return sema.fail(block, src, "non-exhaustive enum specified every value", .{}); - } - - const new_namespace_index = try pt.createNamespace(.{ - .parent = block.namespace.toOptional(), - .owner_type = wip_ty.index, - .file_scope = block.getFileScopeIndex(zcu), - .generation = zcu.generation, - }); - - codegen_type: { - if (zcu.comp.config.use_llvm) break :codegen_type; - if (block.ownerModule().strip) break :codegen_type; - zcu.comp.link_prog_node.increaseEstimatedTotalItems(1); - try zcu.comp.queueJob(.{ .link_type = wip_ty.index }); - } - - try sema.addTypeReferenceEntry(src, .fromInterned(wip_ty.index)); - if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip_ty.index); - return .fromIntern(wip_ty.finish(ip, new_namespace_index)); } fn resolveVaListRef(sema: *Sema, block: *Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) CompileError!Air.Inst.Ref { @@ -20909,7 +20676,7 @@ fn zirCVaArg(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) C const va_list_ref = try sema.resolveVaListRef(block, va_list_src, extra.lhs); const arg_ty = try sema.resolveType(block, ty_src, extra.rhs); - if (!try sema.validateExternType(arg_ty, .param_ty)) { + if (!arg_ty.validateExtern(.param_ty, sema.pt.zcu)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "cannot get '{f}' from variadic argument", .{arg_ty.fmt(sema.pt)}); errdefer msg.destroy(sema.gpa); @@ -24205,8 +23972,8 @@ fn zirMemcpy( return sema.failWithOwnedErrorMsg(block, msg); } - const dest_elem_ty = dest_ty.indexablePtrElem(zcu); - const src_elem_ty = src_ty.indexablePtrElem(zcu); + const dest_elem_ty = dest_ty.indexableElem(zcu); + const src_elem_ty = src_ty.indexableElem(zcu); try sema.ensureLayoutResolved(dest_elem_ty); try sema.ensureLayoutResolved(src_elem_ty); @@ -24906,7 +24673,7 @@ fn zirBuiltinExtern( if (!ty.isPtrAtRuntime(zcu)) { return sema.fail(block, ty_src, "expected (optional) pointer", .{}); } - if (!try sema.validateExternType(ty, .other)) { + if (!ty.validateExtern(.other, zcu)) { const msg = msg: { const msg = try sema.errMsg(ty_src, "extern symbol cannot have type '{f}'", .{ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); @@ -24954,7 +24721,7 @@ fn zirBuiltinExtern( // So, for now, just use our containing `declaration`. .zir_index = switch (sema.owner.unwrap()) { .@"comptime" => |cu| ip.getComptimeUnit(cu).zir_index, - .type_layout, .type_inits => |owner_ty| Type.fromInterned(owner_ty).typeDeclInstAllowGeneratedTag(zcu).?, + .type_layout, .struct_defaults => |owner_ty| Type.fromInterned(owner_ty).typeDeclInstAllowGeneratedTag(zcu).?, .memoized_state => unreachable, .nav_ty, .nav_val => |nav| ip.getNav(nav).analysis.?.zir_index, .func => |func| zir_index: { @@ -25060,6 +24827,7 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD // Values are handled here. .calling_convention_c => { const callconv_ty = try sema.getBuiltinType(src, .CallingConvention); + // Cannot use `Value.uninterpret` because `c` is a *declaration* whose value depends on the target. return try sema.namespaceLookupVal( block, src, @@ -25068,17 +24836,15 @@ fn zirBuiltinValue(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD ) orelse @panic("std.builtin is corrupt"); }, .calling_convention_inline => { - comptime assert(@typeInfo(std.builtin.CallingConvention.Tag).@"enum".tag_type == u8); const callconv_ty = try sema.getBuiltinType(src, .CallingConvention); - const callconv_tag_ty = callconv_ty.unionTagType(zcu) orelse @panic("std.builtin is corrupt"); - const inline_tag_val = try pt.enumValue( - callconv_tag_ty, - (try pt.intValue( - .u8, - @intFromEnum(std.builtin.CallingConvention.@"inline"), - )).toIntern(), - ); - return sema.coerce(block, callconv_ty, Air.internedToRef(inline_tag_val.toIntern()), src); + return .fromValue(Value.uninterpret( + @as(std.builtin.CallingConvention, .@"inline"), + callconv_ty, + pt, + ) catch |err| switch (err) { + error.TypeMismatch => @panic("std.builtin is corrupt"), + error.OutOfMemory => |e| return e, + }); }, }; return .fromType(try sema.getBuiltinType(src, builtin_type)); @@ -25180,7 +24946,7 @@ pub fn validateVarType( const zcu = pt.zcu; var_ty.assertHasLayout(zcu); if (is_extern) { - if (!try sema.validateExternType(var_ty, .other)) { + if (!var_ty.validateExtern(.other, zcu)) { const msg = msg: { const msg = try sema.errMsg(src, "extern variable cannot have type '{f}'", .{var_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); @@ -25296,124 +25062,17 @@ fn explainWhyTypeIsComptime( } } -const ExternPosition = enum { - ret_ty, - param_ty, - union_field, - struct_field, - element, - other, -}; - -/// Returns true if `ty` is allowed in extern types. -/// Does not require `ty` to be resolved in any way. -pub fn validateExternType( - sema: *Sema, - ty: Type, - position: ExternPosition, -) !bool { - const pt = sema.pt; - const zcu = pt.zcu; - switch (ty.zigTypeTag(zcu)) { - .type, - .comptime_float, - .comptime_int, - .enum_literal, - .undefined, - .null, - .error_union, - .error_set, - .frame, - => return false, - .void => return switch (position) { - .ret_ty, - .union_field, - .struct_field, - .element, - => true, - .param_ty, - .other, - => false, - }, - .noreturn => return position == .ret_ty, - .@"opaque", - .bool, - .float, - .@"anyframe", - => return true, - .pointer => { - if (ty.isSlice(zcu)) return false; - const child_ty = ty.childType(zcu); - if (child_ty.zigTypeTag(zcu) == .@"fn") { - return ty.isConstPtr(zcu) and try sema.validateExternType(child_ty, .other); - } - return true; - }, - .int => switch (ty.intInfo(zcu).bits) { - 0, 8, 16, 32, 64, 128 => return true, - else => return false, - }, - .@"fn" => { - if (position != .other) return false; - // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. - // The goal is to experiment with more integrated CPU/GPU code. - if (ty.fnCallingConvention(zcu) == .nvptx_kernel) { - return true; - } - return !target_util.fnCallConvAllowsZigTypes(ty.fnCallingConvention(zcu)); - }, - .@"enum" => { - const enum_obj = zcu.intern_pool.loadEnumType(ty.toIntern()); - if (!enum_obj.int_tag_is_explicit) return false; - return sema.validateExternType(.fromInterned(enum_obj.int_tag_type), position); - }, - .@"struct" => { - const struct_obj = zcu.intern_pool.loadStructType(ty.toIntern()); - return switch (struct_obj.layout) { - .auto => false, - .@"extern" => true, - .@"packed" => switch (struct_obj.packed_backing_mode) { - .auto => false, - .explicit => try sema.validateExternType(.fromInterned(struct_obj.packed_backing_int_type), position), - }, - }; - }, - .@"union" => { - const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern()); - return switch (union_obj.layout) { - .auto => false, - .@"extern" => true, - .@"packed" => switch (union_obj.packed_backing_mode) { - .auto => false, - .explicit => try sema.validateExternType(.fromInterned(union_obj.packed_backing_int_type), position), - }, - }; - }, - .array => { - if (position == .ret_ty or position == .param_ty) return false; - return sema.validateExternType(ty.childType(zcu), .element); - }, - .vector => return sema.validateExternType(ty.childType(zcu), .element), - .optional => return ty.isPtrLikeOptional(zcu), - } -} - +/// Keep in sync with `Type.validateExtern`. pub fn explainWhyTypeIsNotExtern( sema: *Sema, msg: *Zcu.ErrorMsg, src_loc: LazySrcLoc, ty: Type, - position: ExternPosition, + position: Type.ExternPosition, ) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; switch (ty.zigTypeTag(zcu)) { - .@"opaque", - .bool, - .float, - .@"anyframe", - => return, - .type, .comptime_float, .comptime_int, @@ -25425,101 +25084,110 @@ pub fn explainWhyTypeIsNotExtern( .frame, => return, - .pointer => { - if (ty.isSlice(zcu)) { - try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); + .void => try sema.errNote(src_loc, msg, "'void' is a zero bit type", .{}), + .noreturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), + + .@"opaque", + .bool, + .float, + .@"anyframe", + => unreachable, // these *are* allowed + + .pointer => if (ty.isSlice(zcu)) { + try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); + } else { + assert(ty.childType(zcu).zigTypeTag(zcu) == .@"fn"); + if (!ty.isConstPtr(zcu)) { + try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{}); } else { - const pointee_ty = ty.childType(zcu); - if (!ty.isConstPtr(zcu) and pointee_ty.zigTypeTag(zcu) == .@"fn") { - try sema.errNote(src_loc, msg, "pointer to extern function must be 'const'", .{}); - } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, pointee_ty, .other); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.childType(zcu), .other); } }, - .void => try sema.errNote(src_loc, msg, "'void' is a zero bit type; for C 'void' use 'anyopaque'", .{}), - .noreturn => try sema.errNote(src_loc, msg, "'noreturn' is only allowed as a return type", .{}), .int => if (!std.math.isPowerOfTwo(ty.intInfo(zcu).bits)) { try sema.errNote(src_loc, msg, "only integers with 0 or power of two bits are extern compatible", .{}); } else { try sema.errNote(src_loc, msg, "only integers with 0, 8, 16, 32, 64 and 128 bits are extern compatible", .{}); }, - .@"fn" => { - if (position != .other) { - try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}); - try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{}); - return; - } - switch (ty.fnCallingConvention(zcu)) { - .auto => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}), - .async => try sema.errNote(src_loc, msg, "async function cannot be extern", .{}), - .@"inline" => try sema.errNote(src_loc, msg, "inline function cannot be extern", .{}), - else => return, - } + .@"fn" => if (position != .other) { + try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}); + try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{}); + } else switch (ty.fnCallingConvention(zcu)) { + .auto => try sema.errNote(src_loc, msg, "extern function must specify calling convention", .{}), + else => |cc| try sema.errNote(src_loc, msg, "{t} function cannot be extern", .{cc}), }, .@"enum" => { const tag_ty = ty.intTagType(zcu); try sema.errNote(src_loc, msg, "enum tag type '{f}' is not extern compatible", .{tag_ty.fmt(pt)}); try sema.explainWhyTypeIsNotExtern(msg, src_loc, tag_ty, position); }, - // MLUGG TODO: these notes are bad now (because ABI sized packed type also needs explicit backing type) - .@"struct" => try sema.errNote(src_loc, msg, "only extern structs and ABI sized packed structs are extern compatible", .{}), - .@"union" => try sema.errNote(src_loc, msg, "only extern unions and ABI sized packed unions are extern compatible", .{}), - .array => { - if (position == .ret_ty) { - return sema.errNote(src_loc, msg, "arrays are not allowed as a return type", .{}); - } else if (position == .param_ty) { - return sema.errNote(src_loc, msg, "arrays are not allowed as a parameter type", .{}); + .@"struct" => { + const struct_obj = zcu.intern_pool.loadStructType(ty.toIntern()); + switch (struct_obj.layout) { + .auto => try sema.errNote(src_loc, msg, "struct with automatic layout has no guaranteed in-memory representation", .{}), + .@"extern" => unreachable, + .@"packed" => switch (struct_obj.packed_backing_mode) { + .auto => try sema.errNote(src_loc, msg, "inferred backing integer of packed struct has unspecified signedness", .{}), + .explicit => { + const backing_int_ty: Type = .fromInterned(struct_obj.packed_backing_int_type); + try sema.errNote(src_loc, msg, "packed struct backing integer type '{f}' is not extern compatible", .{backing_int_ty.fmt(pt)}); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, backing_int_ty, position); + }, + }, } - try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.childType(zcu), .element); + }, + .@"union" => { + const union_obj = zcu.intern_pool.loadStructType(ty.toIntern()); + switch (union_obj.layout) { + .auto => try sema.errNote(src_loc, msg, "union with automatic layout has no guaranteed in-memory representation", .{}), + .@"extern" => unreachable, + .@"packed" => switch (union_obj.packed_backing_mode) { + .auto => try sema.errNote(src_loc, msg, "inferred backing integer of packed union has unspecified signedness", .{}), + .explicit => { + const backing_int_ty: Type = .fromInterned(union_obj.packed_backing_int_type); + try sema.errNote(src_loc, msg, "packed union backing integer type '{f}' is not extern compatible", .{backing_int_ty.fmt(pt)}); + try sema.explainWhyTypeIsNotExtern(msg, src_loc, backing_int_ty, position); + }, + }, + } + }, + .array => switch (position) { + .ret_ty => try sema.errNote(src_loc, msg, "arrays are not allowed as a return type", .{}), + .param_ty => try sema.errNote(src_loc, msg, "arrays are not allowed as a parameter type", .{}), + else => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.childType(zcu), .element), }, .vector => try sema.explainWhyTypeIsNotExtern(msg, src_loc, ty.childType(zcu), .element), - .optional => try sema.errNote(src_loc, msg, "only pointer like optionals are extern compatible", .{}), + .optional => try sema.errNote(src_loc, msg, "non-pointer optionals have no guaranteed in-memory representation", .{}), } } -pub fn explainWhyTypeIsNotPackable( +pub fn explainWhyTypeIsUnpackable( sema: *Sema, msg: *Zcu.ErrorMsg, - src_loc: LazySrcLoc, - ty: Type, + src: LazySrcLoc, + reason: Type.UnpackableReason, ) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; - switch (ty.zigTypeTag(zcu)) { - .void, - .bool, - .float, - .int, - .vector, - .@"enum", - => return, - .type, - .comptime_float, - .comptime_int, - .enum_literal, - .undefined, - .null, - .frame, - .noreturn, - .@"opaque", - .error_union, - .error_set, - .@"anyframe", - .optional, - .array, - => try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}), - .pointer => if (ty.isSlice(zcu)) { - try sema.errNote(src_loc, msg, "slices have no guaranteed in-memory representation", .{}); - } else { - try sema.errNote(src_loc, msg, "comptime-only pointer has no guaranteed in-memory representation", .{}); - try sema.explainWhyTypeIsComptime(msg, src_loc, ty); + switch (reason) { + .comptime_only => try sema.errNote(src, msg, "comptime-only types have no bit-packed representation", .{}), + .pointer => { + try sema.errNote(src, msg, "pointers cannot be directly bitpacked", .{}); + try sema.errNote(src, msg, "consider using 'usize' and '@intFromPtr'", .{}); }, - .@"fn" => { - try sema.errNote(src_loc, msg, "type has no guaranteed in-memory representation", .{}); - try sema.errNote(src_loc, msg, "use '*const ' to make a function pointer type", .{}); + .enum_inferred_int_tag => |enum_ty| { + const enum_src = enum_ty.srcLoc(zcu); + try sema.errNote(enum_src, msg, "integer tag type of enum is inferred", .{}); + try sema.errNote(enum_src, msg, "consider explicitly specifying the integer tag type", .{}); }, - .@"struct" => try sema.errNote(src_loc, msg, "struct in packed type must have packed layout", .{}), - .@"union" => try sema.errNote(src_loc, msg, "union in packed type must have packed layout", .{}), + .non_packed_struct => |struct_ty| { + try sema.errNote(src, msg, "non-packed structs do not have a bit-packed representation", .{}); + try sema.addDeclaredHereNote(msg, struct_ty); + }, + .non_packed_union => |union_ty| { + try sema.errNote(src, msg, "non-packed unions do not have a bit-packed representation", .{}); + try sema.addDeclaredHereNote(msg, union_ty); + }, + .other => try sema.errNote(src, msg, "type does not have a bit-packed representation", .{}), } } @@ -25545,7 +25213,7 @@ fn getPanicIdFunc(sema: *Sema, src: LazySrcLoc, panic_id: Zcu.SimplePanicId) !In try sema.ensureMemoizedStateResolved(src, .panic); const panic_fn_index = zcu.builtin_decl_values.get(panic_id.toBuiltin()); switch (sema.owner.unwrap()) { - .@"comptime", .nav_ty, .nav_val, .type_layout, .type_inits, .memoized_state => {}, + .@"comptime", .nav_ty, .nav_val, .type_layout, .struct_defaults, .memoized_state => {}, .func => |owner_func| zcu.intern_pool.funcSetHasErrorTrace(io, owner_func, true), } return panic_fn_index; @@ -25962,6 +25630,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { return inst; } + try sema.ensureLayoutResolved(child_type); if (child_type.unionTagType(zcu)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, zcu)) |field_index_usize| { const field_index: u32 = @intCast(field_index_usize); @@ -25974,6 +25643,7 @@ fn fieldVal( if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { return inst; } + try sema.ensureLayoutResolved(child_type); const field_index_usize = child_type.enumFieldIndex(field_name, zcu) orelse return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); const field_index: u32 = @intCast(field_index_usize); @@ -26195,6 +25865,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { return inst; } + try sema.ensureLayoutResolved(child_type); if (child_type.unionTagType(zcu)) |enum_ty| { if (enum_ty.enumFieldIndex(field_name, zcu)) |field_index| { const field_index_u32: u32 = @intCast(field_index); @@ -26208,6 +25879,7 @@ fn fieldPtr( if (try sema.namespaceLookupRef(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { return inst; } + try sema.ensureLayoutResolved(child_type); const field_index = child_type.enumFieldIndex(field_name, zcu) orelse { return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; @@ -26444,9 +26116,6 @@ fn finishFieldCallBind( const container_ty = ptr_ty.childType(zcu); if (container_ty.zigTypeTag(zcu) == .@"struct") { if (container_ty.structFieldIsComptime(field_index, zcu)) { - if (!container_ty.isTuple(zcu)) { - try sema.ensureFieldInitsResolved(container_ty); - } const default_val = (try container_ty.structFieldValueComptime(pt, field_index)).?; return .{ .direct = Air.internedToRef(default_val.toIntern()) }; } @@ -26623,7 +26292,7 @@ fn structFieldPtrByIndex( const ptr_field_ty = try pt.ptrType(ptr_ty_data); if (field_is_comptime) { - try sema.ensureFieldInitsResolved(struct_ty); + assert(struct_type.field_defaults.get(ip)[field_index] != .none); const val = try pt.intern(.{ .ptr = .{ .ty = ptr_field_ty.toIntern(), .base_addr = .{ .comptime_field = struct_type.field_defaults.get(ip)[field_index] }, @@ -26647,6 +26316,8 @@ fn structFieldVal( const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(struct_ty.zigTypeTag(zcu) == .@"struct"); + assert(sema.typeOf(struct_byval).toIntern() == struct_ty.toIntern()); + struct_ty.assertHasLayout(zcu); switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { @@ -26655,7 +26326,6 @@ fn structFieldVal( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); if (struct_type.field_is_comptime_bits.get(ip, field_index)) { - try sema.ensureFieldInitsResolved(struct_ty); return .fromIntern(struct_type.field_defaults.get(ip)[field_index]); } @@ -26886,6 +26556,8 @@ fn unionFieldVal( const zcu = pt.zcu; const ip = &zcu.intern_pool; assert(union_ty.zigTypeTag(zcu) == .@"union"); + assert(sema.typeOf(union_byval).toIntern() == union_ty.toIntern()); + union_ty.assertHasLayout(zcu); const union_obj = zcu.typeToUnion(union_ty).?; const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src); @@ -27149,11 +26821,11 @@ fn validateRuntimeElemAccess( const msg = try sema.errMsg( elem_index_src, "values of type '{f}' must be comptime-known, but index value is runtime-known", - .{parent_ty.fmt(sema.pt)}, + .{elem_ty.fmt(sema.pt)}, ); errdefer msg.destroy(sema.gpa); - try sema.explainWhyTypeIsComptime(msg, parent_src, parent_ty); + try sema.explainWhyTypeIsComptime(msg, parent_src, elem_ty); break :msg msg; }; @@ -27885,18 +27557,14 @@ fn coerceExtra( // empty tuple to zero-length slice // note that this allows coercing to a mutable slice. if (inst_child_ty.structFieldCount(zcu) == 0) { - // TODO MLUGG: this is *unacceptably* stupid. we're resolving the child for the alignment value - try sema.ensureLayoutResolved(dest_ty.childType(zcu)); - const align_val = dest_ty.ptrAlignment(zcu); - return Air.internedToRef(try pt.intern(.{ .slice = .{ - .ty = dest_ty.toIntern(), - .ptr = try pt.intern(.{ .ptr = .{ - .ty = dest_ty.slicePtrFieldType(zcu).toIntern(), - .base_addr = .int, - .byte_offset = align_val.toByteUnits().?, - } }), - .len = .zero_usize, - } })); + const empty_array_ty = try pt.arrayType(.{ + .len = 0, + .child = dest_info.child, + .sentinel = dest_info.sentinel, + }); + const empty_array_val = try pt.aggregateValue(empty_array_ty, &.{}); + const empty_array_ptr = try sema.uavRef(empty_array_val.toIntern()); + return sema.coerceArrayPtrToSlice(block, dest_ty, empty_array_ptr, inst_src); } // pointer to tuple to slice @@ -27955,7 +27623,7 @@ fn coerceExtra( .int, .comptime_int => { if (maybe_inst_val) |val| { // comptime-known integer to other number - if (!(try sema.intFitsInType(val, dest_ty, null))) { + if (!val.intFitsInType(dest_ty, null, zcu)) { if (!opts.report_err) return error.NotCoercible; return sema.fail(block, inst_src, "type '{f}' cannot represent integer value '{f}'", .{ dest_ty.fmt(pt), val.fmtValueSema(pt, sema) }); } @@ -28039,28 +27707,26 @@ fn coerceExtra( } break :int; }; + if (val.isUndef(zcu)) { + return .fromValue(try pt.undefValue(dest_ty)); + } const result_val = try pt.floatValue(dest_ty, val.toFloat(f128, zcu)); - const fits: bool = switch (ip.indexToKey(result_val.toIntern())) { - else => unreachable, - .undef => true, - .float => |float| fits: { - var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const operand_big_int = val.toBigInt(&buffer, zcu); - switch (float.storage) { - inline else => |x| { - if (!std.math.isFinite(x)) break :fits false; - var result_big_int: std.math.big.int.Mutable = .{ - .limbs = try sema.arena.alloc(std.math.big.Limb, std.math.big.int.calcLimbLen(x)), - .len = undefined, - .positive = undefined, - }; - switch (result_big_int.setFloat(x, .nearest_even)) { - .inexact => break :fits false, - .exact => {}, - } - break :fits result_big_int.toConst().eql(operand_big_int); - }, + const float = ip.indexToKey(result_val.toIntern()).float; + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const operand_big_int = val.toBigInt(&buffer, zcu); + const fits = switch (float.storage) { + inline else => |x| fits: { + if (!std.math.isFinite(x)) break :fits false; + var result_big_int: std.math.big.int.Mutable = .{ + .limbs = try sema.arena.alloc(std.math.big.Limb, std.math.big.int.calcLimbLen(x)), + .len = undefined, + .positive = undefined, + }; + switch (result_big_int.setFloat(x, .nearest_even)) { + .inexact => break :fits false, + .exact => {}, } + break :fits result_big_int.toConst().eql(operand_big_int); }, }; if (!fits) return sema.fail( @@ -28699,7 +28365,7 @@ pub fn coerceInMemoryAllowed( // Comptime int to regular int. if (dest_tag == .int and src_tag == .comptime_int) { if (src_val) |val| { - if (!(try sema.intFitsInType(val, dest_ty, null))) { + if (!val.intFitsInType(dest_ty, null, zcu)) { return .{ .comptime_int_not_coercible = .{ .wanted = dest_ty, .actual = val } }; } } @@ -29333,7 +28999,7 @@ fn coerceVarArgParam( } }, else => if (uncasted_ty.isAbiInt(zcu)) int: { - if (!try sema.validateExternType(uncasted_ty, .param_ty)) break :int inst; + if (!uncasted_ty.validateExtern(.param_ty, zcu)) break :int inst; const target = zcu.getTarget(); const uncasted_info = uncasted_ty.intInfo(zcu); if (uncasted_info.bits <= target.cTypeBitSize(switch (uncasted_info.signedness) { @@ -29362,7 +29028,7 @@ fn coerceVarArgParam( }; const coerced_ty = sema.typeOf(coerced); - if (!try sema.validateExternType(coerced_ty, .param_ty)) { + if (!coerced_ty.validateExtern(.param_ty, zcu)) { const msg = msg: { const msg = try sema.errMsg(inst_src, "cannot pass '{f}' to variadic function", .{coerced_ty.fmt(pt)}); errdefer msg.destroy(sema.gpa); @@ -33283,12 +32949,12 @@ fn typeIsArrayLike(sema: *Sema, ty: Type) ?ArrayLike { .elem_ty = ty.childType(zcu), }, .@"struct" => { + if (!ty.isTuple(zcu)) return null; const field_count = ty.structFieldCount(zcu); if (field_count == 0) return .{ .len = 0, .elem_ty = .noreturn, }; - if (!ty.isTuple(zcu)) return null; const elem_ty = ty.fieldType(0, zcu); for (1..field_count) |i| { if (!ty.fieldType(i, zcu).eql(elem_ty, zcu)) { @@ -33700,6 +33366,7 @@ fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError return std.math.cast(usize, int) orelse return sema.fail(block, src, "expression produces integer value '{d}' which is too big for this compiler implementation to handle", .{int}); } +/// Asserts that the layout of `union_ty` is already resolved. fn unionFieldIndex( sema: *Sema, block: *Block, @@ -33717,6 +33384,7 @@ fn unionFieldIndex( return @intCast(field_index); } +/// Asserts that the layout of `struct_ty` is already resolved. fn structFieldIndex( sema: *Sema, block: *Block, @@ -33811,64 +33479,6 @@ fn intFromFloatScalar( return pt.getCoerced(cti_result, int_ty); } -/// Asserts the value is an integer, and the destination type is ComptimeInt or Int. -/// Vectors are also accepted. Vector results are reduced with AND. -/// -/// If provided, `vector_index` reports the first element that failed the range check. -/// MLUGG TODO: move to `Value` or `Type`? -fn intFitsInType( - sema: *Sema, - val: Value, - ty: Type, - vector_index: ?*usize, -) CompileError!bool { - const pt = sema.pt; - const zcu = pt.zcu; - if (ty.toIntern() == .comptime_int_type) return true; - const info = ty.intInfo(zcu); - switch (val.toIntern()) { - .zero_usize, .zero_u8 => return true, - else => switch (zcu.intern_pool.indexToKey(val.toIntern())) { - .undef => return true, - .variable, .@"extern", .func, .ptr => { - const target = zcu.getTarget(); - const ptr_bits = target.ptrBitWidth(); - return switch (info.signedness) { - .signed => info.bits > ptr_bits, - .unsigned => info.bits >= ptr_bits, - }; - }, - .int => |int| { - var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; - const big_int = int.storage.toBigInt(&buffer); - return big_int.fitsInTwosComp(info.signedness, info.bits); - }, - .aggregate => |aggregate| { - assert(ty.zigTypeTag(zcu) == .vector); - return switch (aggregate.storage) { - .bytes => |bytes| for (bytes.toSlice(ty.vectorLen(zcu), &zcu.intern_pool), 0..) |byte, i| { - if (byte == 0) continue; - const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed); - if (info.bits >= actual_needed_bits) continue; - if (vector_index) |vi| vi.* = i; - break false; - } else true, - .elems, .repeated_elem => for (switch (aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems, - .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), - }, 0..) |elem, i| { - if (try sema.intFitsInType(Value.fromInterned(elem), ty.scalarType(zcu), null)) continue; - if (vector_index) |vi| vi.* = i; - break false; - } else true, - }; - }, - else => unreachable, - }, - } -} - fn intInRange(sema: *Sema, tag_ty: Type, int_val: Value, end: usize) !bool { const pt = sema.pt; if (!int_val.compareAllWithZero(.gte, pt.zcu)) return false; @@ -33886,7 +33496,7 @@ fn enumHasInt(sema: *Sema, ty: Type, int: Value) CompileError!bool { // The `tagValueIndex` function call below relies on the type being the integer tag type. // `getCoerced` assumes the value will fit the new type. const int_tag_ty: Type = .fromInterned(enum_type.int_tag_type); - if (!try sema.intFitsInType(int, int_tag_ty, null)) return false; + if (!int.intFitsInType(int_tag_ty, null, zcu)) return false; const int_coerced = try pt.getCoerced(int, int_tag_ty); return enum_type.tagValueIndex(&zcu.intern_pool, int_coerced.toIntern()) != null; } @@ -33919,7 +33529,6 @@ fn compareAll( } /// Asserts the values are comparable. Both operands have type `ty`. -/// MLUGG TODO: move to `Value`? fn compareScalar( sema: *Sema, lhs: Value, @@ -34422,7 +34031,7 @@ const ComptimeStoreResult = @import("Sema/comptime_ptr_access.zig").ComptimeStor // MLUGG TODO: decide how to do the namespacing here pub const type_resolution = @import("Sema/type_resolution.zig"); pub const ensureLayoutResolved = type_resolution.ensureLayoutResolved; -pub const ensureFieldInitsResolved = type_resolution.ensureFieldInitsResolved; +pub const ensureStructDefaultsResolved = type_resolution.ensureStructDefaultsResolved; pub fn getBuiltinType(sema: *Sema, src: LazySrcLoc, decl: Zcu.BuiltinDecl) SemaError!Type { assert(decl.kind() == .type); @@ -34644,48 +34253,14 @@ fn getExpectedBuiltinFnType(sema: *Sema, decl: Zcu.BuiltinDecl) CompileError!Typ }; } -/// TODO MLUGG: this is a gnarly hack -const PartialTypeName = union(enum) { - exact: struct { - name: InternPool.NullTerminatedString, - nav: InternPool.Nav.Index.Optional, - }, - anon_prefix: []const u8, - fn apply( - name: PartialTypeName, - wip: *const InternPool.WipContainerType, - pt: Zcu.PerThread, - ) (Allocator.Error || std.Io.Cancelable)!InternPool.NullTerminatedString { - const zcu = pt.zcu; - const comp = zcu.comp; - const ip = &zcu.intern_pool; - switch (name) { - .exact => |e| { - wip.setName(ip, e.name, e.nav); - return e.name; - }, - .anon_prefix => |prefix| { - const resolved_name = try ip.getOrPutStringFmt( - comp.gpa, - comp.io, - pt.tid, - "{s}_{d}", - .{ prefix, @intFromEnum(wip.index) }, - .no_embedded_nulls, - ); - wip.setName(ip, resolved_name, .none); - return resolved_name; - }, - } - } -}; -pub fn createTypeName( +fn setTypeName( sema: *Sema, block: *Block, + wip: *const InternPool.WipContainerType, name_strategy: Zir.Inst.NameStrategy, anon_prefix: []const u8, inst: Zir.Inst.Index, -) CompileError!PartialTypeName { +) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; @@ -34693,13 +34268,26 @@ pub fn createTypeName( const io = comp.io; const ip = &zcu.intern_pool; - switch (name_strategy) { - .anon => {}, // handled after switch - .parent => return .{ .exact = .{ - .name = block.type_name_ctx, - .nav = sema.owner.unwrap().nav_val.toOptional(), - } }, - .func => func_strat: { + strat: switch (name_strategy) { + .anon => { + // It would be neat to have "struct:line:column" but this name has + // to survive incremental updates, where it may have been shifted down + // or up to a different line, but unchanged, and thus not unnecessarily + // semantically analyzed. + // TODO: that would be possible, by detecting line number changes and renaming + // types appropriately. However, `@typeName` becomes a problem then. If we remove + // that builtin from the language, we can consider this. + wip.setName(ip, try ip.getOrPutStringFmt( + gpa, + io, + pt.tid, + "{f}__{s}_{d}", + .{ block.type_name_ctx.fmt(ip), anon_prefix, @intFromEnum(wip.index) }, + .no_embedded_nulls, + ), .none); + }, + .parent => wip.setName(ip, block.type_name_ctx, sema.owner.unwrap().nav_val.toOptional()), + .func => { const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail); const zir_tags = sema.code.instructions.items(.tag); @@ -34717,7 +34305,9 @@ pub fn createTypeName( // If not then this is a struct type being returned from a non-generic // function and the name doesn't matter since it will later // result in a compile error. - const arg_val = try sema.resolveValue(arg) orelse break :func_strat; // fall through to anon strat + const arg_val = try sema.resolveValue(arg) orelse { + continue :strat .anon; + }; if (arg_i != 0) w.writeByte(',') catch return error.OutOfMemory; @@ -34739,431 +34329,28 @@ pub fn createTypeName( }; w.writeByte(')') catch return error.OutOfMemory; - return .{ .exact = .{ - .name = try ip.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls), - .nav = .none, - } }; + const name = try ip.getOrPutString(gpa, io, pt.tid, aw.written(), .no_embedded_nulls); + wip.setName(ip, name, .none); }, .dbg_var => { // TODO: this logic is questionable. We ideally should be traversing the `Block` rather than relying on the order of AstGen instructions. const ref = inst.toRef(); const zir_tags = sema.code.instructions.items(.tag); const zir_data = sema.code.instructions.items(.data); - for (@intFromEnum(inst)..zir_tags.len) |i| switch (zir_tags[i]) { + const var_name = for (@intFromEnum(inst)..zir_tags.len) |i| switch (zir_tags[i]) { .dbg_var_ptr, .dbg_var_val => if (zir_data[i].str_op.operand == ref) { - return .{ .exact = .{ - .name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.{s}", .{ - block.type_name_ctx.fmt(ip), zir_data[i].str_op.getStr(sema.code), - }, .no_embedded_nulls), - .nav = .none, - } }; + break zir_data[i].str_op.getStr(sema.code); }, else => {}, + } else { + continue :strat .anon; }; - // fall through to anon strat + const name = try ip.getOrPutStringFmt(gpa, io, pt.tid, "{f}.{s}", .{ + block.type_name_ctx.fmt(ip), var_name, + }, .no_embedded_nulls); + wip.setName(ip, name, .none); }, } - - // anon strat handling - - // It would be neat to have "struct:line:column" but this name has - // to survive incremental updates, where it may have been shifted down - // or up to a different line, but unchanged, and thus not unnecessarily - // semantically analyzed. - // TODO: that would be possible, by detecting line number changes and renaming - // types appropriately. However, `@typeName` becomes a problem then. If we remove - // that builtin from the language, we can consider this. - - return .{ .anon_prefix = try std.fmt.allocPrint( - sema.arena, - "{f}__{s}", - .{ block.type_name_ctx.fmt(ip), anon_prefix }, - ) }; -} - -pub fn analyzeStructDecl( - pt: Zcu.PerThread, - file_index: Zcu.File.Index, - zir: *const Zir, - parent_namespace: InternPool.OptionalNamespaceIndex, - tracked_inst: InternPool.TrackedInst.Index, - struct_decl: *const Zir.UnwrappedStructDecl, - explicit_backing_type: ?Type, - captures: []const InternPool.CaptureValue, - type_name: PartialTypeName, -) (Allocator.Error || std.Io.Cancelable)!Type { - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; - const io = comp.io; - const ip = &zcu.intern_pool; - - const wip = switch (try ip.getStructType(gpa, io, pt.tid, .{ - .fields_len = @intCast(struct_decl.field_names.len), - .layout = struct_decl.layout, - .explicit_packed_backing_type = if (explicit_backing_type) |ty| ty.toIntern() else .none, - .any_comptime_fields = struct_decl.field_comptime_bits != null, - .any_field_defaults = struct_decl.field_default_body_lens != null, - .any_field_aligns = struct_decl.field_align_body_lens != null, - .key = .{ .declared = .{ - .zir_index = tracked_inst, - .captures = captures, - } }, - })) { - .existing => |ty| return .fromInterned(ty), - .wip => |wip| wip, - }; - errdefer wip.cancel(ip, pt.tid); - - _ = try type_name.apply(&wip, pt); - - var field_it = struct_decl.iterateFields(); - while (field_it.next()) |field| { - const name_slice = zir.nullTerminatedString(field.name); - const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); - assert(wip.nextField(ip, name, field.is_comptime) == null); // AstGen validated this for us - } - - const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ - .parent = parent_namespace, - .owner_type = wip.index, - .file_scope = file_index, - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(new_namespace_index); - - try pt.scanNamespace(new_namespace_index, struct_decl.decls); - - // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind - try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); - try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_inits = wip.index }) }); - - if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); - - try zcu.outdated.ensureUnusedCapacity(gpa, 2); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 2); - errdefer comptime unreachable; // because we don't remove the `outdated` entries - zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); - zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = wip.index }), 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); - zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = wip.index }), {}); - - return .fromInterned(wip.finish(ip, new_namespace_index)); -} -const AnalyzeUnionDeclError = error{ - OutOfMemory, - Canceled, - /// `packed union(T)` syntax was used, but `T` was not an integer type. - ExplicitBackingNotInt, - /// `union(enum(T))` syntax was used, but `T` was not an integer type. - ExplicitTagNotInt, - /// `union(T)` syntax was used, but `T` was not an enum type. - ExplicitTagNotEnum, - /// `union(T)` syntax was used, but the fields of the union do not exactly - /// correspond to the fields of the enum `T`. - ExplicitTagFieldMismatch, -}; -fn analyzeUnionDecl( - pt: Zcu.PerThread, - file_index: Zcu.File.Index, - zir: *const Zir, - parent_namespace: InternPool.OptionalNamespaceIndex, - want_safe_types: bool, - tracked_inst: InternPool.TrackedInst.Index, - union_decl: *const Zir.UnwrappedUnionDecl, - arg_type: ?Type, - captures: []const InternPool.CaptureValue, - type_name: PartialTypeName, -) AnalyzeUnionDeclError!Type { - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; - const io = comp.io; - const ip = &zcu.intern_pool; - - switch (union_decl.kind) { - .tagged_explicit => if (arg_type.?.zigTypeTag(zcu) != .@"enum") { - return error.ExplicitTagNotEnum; - }, - .tagged_enum_explicit => if (arg_type.?.zigTypeTag(zcu) != .int) { - return error.ExplicitTagNotInt; - }, - .packed_explicit => if (arg_type.?.zigTypeTag(zcu) != .int) { - return error.ExplicitBackingNotInt; - }, - .auto, - .tagged_enum, - .@"extern", - .@"packed", - => assert(arg_type == null), - } - - const wip = switch (try ip.getUnionType(gpa, io, pt.tid, .{ - .fields_len = @intCast(union_decl.field_names.len), - .layout = union_decl.kind.layout(), - .explicit_packed_backing_type = switch (union_decl.kind) { - .packed_explicit => arg_type.?.toIntern(), - else => .none, - }, - .runtime_tag = switch (union_decl.kind) { - .auto => if (want_safe_types) .safety else .none, - - .tagged_explicit, - .tagged_enum, - .tagged_enum_explicit, - => .tagged, - - .@"extern", - .@"packed", - .packed_explicit, - => .none, - }, - .have_explicit_enum_tag = union_decl.kind == .tagged_explicit, - .any_field_aligns = union_decl.field_align_body_lens != null, - .key = .{ .declared = .{ - .zir_index = tracked_inst, - .captures = captures, - .arg_ty = if (arg_type) |t| t.toIntern() else .none, - } }, - })) { - .existing => |ty| return .fromInterned(ty), - .wip => |wip| wip, - }; - errdefer wip.cancel(ip, pt.tid); - - const resolved_type_name = try type_name.apply(&wip, pt); - - const generated_tag_ty: InternPool.Index = if (union_decl.kind == .tagged_explicit) generated_tag_ty: { - const tag_type = arg_type.?; - const enum_field_names = ip.loadEnumType(tag_type.toIntern()).field_names; - // Check that the enum field names match the union field names - if (union_decl.field_names.len != enum_field_names.len) { - return error.ExplicitTagFieldMismatch; - } - for (union_decl.field_names, enum_field_names.get(ip)) |union_field_zir, enum_field_ip| { - const union_field_name = zir.nullTerminatedString(union_field_zir); - const enum_field_name = enum_field_ip.toSlice(ip); - if (!std.mem.eql(u8, union_field_name, enum_field_name)) { - return error.ExplicitTagFieldMismatch; - } - } - wip.setTagType(ip, tag_type.toIntern()); - break :generated_tag_ty .none; - } else generated_tag_ty: { - // Generate a tag type. Even if the union is untagged (`.none`), we still generate a - // hypothetical tag type. - const wip_tag_ty = switch (try ip.getEnumType(gpa, io, pt.tid, .{ - .fields_len = @intCast(union_decl.field_names.len), - .explicit_int_tag_type = switch (union_decl.kind) { - .tagged_enum_explicit => arg_type.?.toIntern(), - else => .none, - }, - .nonexhaustive = false, - .key = .{ .generated_union_tag = wip.index }, - })) { - .existing => unreachable, // enum type is keyed on this union type which we're only just creating - .wip => |wip_tag_ty| wip_tag_ty, - }; - errdefer wip_tag_ty.cancel(ip, pt.tid); - // Populate the generated tag type's name - const tag_type_name = try ip.getOrPutStringFmt( - gpa, - io, - pt.tid, - "@typeInfo({f}).@\"union\".tag_type.?", - .{resolved_type_name.fmt(ip)}, - .no_embedded_nulls, - ); - wip_tag_ty.setName(ip, tag_type_name, .none); - // Populate the generated tag type's field names - for (union_decl.field_names) |zir_name| { - const name_slice = zir.nullTerminatedString(zir_name); - const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); - assert(wip_tag_ty.nextField(ip, name, false) == null); // AstGen validated this for us - } - // If not explicitly given, populate the generated tag type's *integer* tag type - switch (union_decl.kind) { - .tagged_enum_explicit => {}, // already set by `getEnumType` - else => { - // Infer the int tag type from the field count - const bits = Type.smallestUnsignedBits(union_decl.field_names.len -| 1); - const int_tag_type = try pt.intType(.unsigned, bits); - wip_tag_ty.setTagType(ip, int_tag_type.toIntern()); - }, - } - // Create a dummy namespace for the generated tag type - const new_namespace_index = try pt.createNamespace(.{ - .parent = parent_namespace, - .owner_type = wip_tag_ty.index, - .file_scope = file_index, - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(new_namespace_index); - wip.setTagType(ip, wip_tag_ty.index); - break :generated_tag_ty wip_tag_ty.finish(ip, new_namespace_index); - }; - // If we fail to create the union type, we must delete the generated enum tag type, since it - // would hold a reference to the deleted union. - errdefer if (generated_tag_ty != .none) ip.remove(pt.tid, generated_tag_ty); - - const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ - .parent = parent_namespace, - .owner_type = wip.index, - .file_scope = file_index, - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(new_namespace_index); - - try pt.scanNamespace(new_namespace_index, union_decl.decls); - - // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind - try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); - if (generated_tag_ty != .none) { - try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_inits = generated_tag_ty }) }); - } - - if (zcu.comp.debugIncremental()) { - try zcu.incremental_debug_state.newType(zcu, wip.index); - if (generated_tag_ty != .none) { - try zcu.incremental_debug_state.newType(zcu, generated_tag_ty); - } - } - - try zcu.outdated.ensureUnusedCapacity(gpa, 2); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 2); - errdefer comptime unreachable; // because we don't remove the `outdated` entry - zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); - if (generated_tag_ty != .none) { - zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = generated_tag_ty }), 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = generated_tag_ty }), {}); - } - - return .fromInterned(wip.finish(ip, new_namespace_index)); -} -const AnalyzeEnumDeclError = error{ - OutOfMemory, - Canceled, - /// `enum(T)` syntax was used, but `T` was not an integer type. - ExplicitTagNotInt, -}; -fn analyzeEnumDecl( - pt: Zcu.PerThread, - file_index: Zcu.File.Index, - zir: *const Zir, - parent_namespace: InternPool.OptionalNamespaceIndex, - tracked_inst: InternPool.TrackedInst.Index, - enum_decl: *const Zir.UnwrappedEnumDecl, - explicit_tag_type: ?Type, - captures: []const InternPool.CaptureValue, - type_name: PartialTypeName, -) AnalyzeEnumDeclError!Type { - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; - const io = comp.io; - const ip = &zcu.intern_pool; - - if (explicit_tag_type) |ty| { - // MLUGG TODO: make a final call on whether comptime_int is a valid int tag type, and follow it everywhere. - // i think not in the name of simplicity, but my opinion might depend on whether it's broken in practice today - switch (ty.zigTypeTag(zcu)) { - .int, .comptime_int => {}, - else => return error.ExplicitTagNotInt, - } - } - - const wip = switch (try ip.getEnumType(gpa, io, pt.tid, .{ - .fields_len = @intCast(enum_decl.field_names.len), - .explicit_int_tag_type = if (explicit_tag_type) |ty| ty.toIntern() else .none, - .nonexhaustive = enum_decl.nonexhaustive, - .key = .{ .declared = .{ - .zir_index = tracked_inst, - .captures = captures, - } }, - })) { - .existing => |ty| return .fromInterned(ty), - .wip => |wip| wip, - }; - errdefer wip.cancel(ip, pt.tid); - - _ = try type_name.apply(&wip, pt); - - var field_it = enum_decl.iterateFields(); - while (field_it.next()) |field| { - const name_slice = zir.nullTerminatedString(field.name); - const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); - assert(wip.nextField(ip, name, false) == null); // AstGen validated this for us - } - - if (explicit_tag_type == null) { - // Infer the int tag type from the field count - const bits = Type.smallestUnsignedBits(enum_decl.field_names.len -| 1); - const int_tag_ty = try pt.intType(.unsigned, bits); - wip.setTagType(ip, int_tag_ty.toIntern()); - } - - const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ - .parent = parent_namespace, - .owner_type = wip.index, - .file_scope = file_index, - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(new_namespace_index); - - try pt.scanNamespace(new_namespace_index, enum_decl.decls); - - // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind - try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_inits = wip.index }) }); - - if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); - - try zcu.outdated.ensureUnusedCapacity(gpa, 1); - try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); - errdefer comptime unreachable; // because we don't remove the `outdated` entry - zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = wip.index }), 0); - zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_inits = wip.index }), {}); - - return .fromInterned(wip.finish(ip, new_namespace_index)); -} -fn analyzeOpaqueDecl( - pt: Zcu.PerThread, - file_index: Zcu.File.Index, - parent_namespace: InternPool.OptionalNamespaceIndex, - tracked_inst: InternPool.TrackedInst.Index, - opaque_decl: *const Zir.UnwrappedOpaqueDecl, - captures: []const InternPool.CaptureValue, - type_name: PartialTypeName, -) (Allocator.Error || std.Io.Cancelable)!Type { - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; - const io = comp.io; - const ip = &zcu.intern_pool; - - const wip = switch (try ip.getOpaqueType(gpa, io, pt.tid, .{ - .zir_index = tracked_inst, - .captures = captures, - })) { - .existing => |ty| return .fromInterned(ty), - .wip => |wip| wip, - }; - errdefer wip.cancel(ip, pt.tid); - - _ = try type_name.apply(&wip, pt); - - const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ - .parent = parent_namespace, - .owner_type = wip.index, - .file_scope = file_index, - .generation = zcu.generation, - }); - errdefer pt.destroyNamespace(new_namespace_index); - - try pt.scanNamespace(new_namespace_index, opaque_decl.decls); - - if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); - return .fromInterned(wip.finish(ip, new_namespace_index)); } fn zirStructDecl( @@ -35173,6 +34360,10 @@ fn zirStructDecl( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = comp.gpa; + const io = comp.io; + const ip = &zcu.intern_pool; const tracked_inst = try block.trackZir(inst); @@ -35180,32 +34371,50 @@ fn zirStructDecl( .base_node_inst = tracked_inst, .offset = .nodeOffset(.zero), }; - const backing_ty_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .node_offset_container_tag = .zero }, - }; const struct_decl = sema.code.getStructDecl(inst); const captures = try sema.getCaptures(block, src, struct_decl.captures, struct_decl.capture_names); - const backing_int_type: ?Type = ty: { - if (struct_decl.backing_int_type == .none) break :ty null; - break :ty try sema.resolveType(block, backing_ty_src, struct_decl.backing_int_type); - // MLUGG TODO validate it's an int! - }; + const ty: Type = switch (try ip.getDeclaredStructType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .captures = captures, + .fields_len = @intCast(struct_decl.field_names.len), + .layout = struct_decl.layout, + .any_comptime_fields = struct_decl.field_comptime_bits != null, + .any_field_defaults = struct_decl.field_default_body_lens != null, + .any_field_aligns = struct_decl.field_align_body_lens != null, + .packed_backing_mode = if (struct_decl.backing_int_type_body != null) .explicit else .auto, + })) { + .existing => |ty| .fromInterned(ty), + .wip => |wip| ty: { + errdefer wip.cancel(ip, pt.tid); + try sema.setTypeName(block, &wip, struct_decl.name_strategy, "struct", inst); + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + errdefer pt.destroyNamespace(new_namespace_index); + try pt.scanNamespace(new_namespace_index, struct_decl.decls); + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .struct_defaults = wip.index }) }); - const ty = try analyzeStructDecl( - pt, - block.getFileScopeIndex(zcu), - &sema.code, - block.namespace.toOptional(), - tracked_inst, - &struct_decl, - backing_int_type, - captures, - try sema.createTypeName(block, struct_decl.name_strategy, "struct", inst), - ); + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + try zcu.outdated.ensureUnusedCapacity(gpa, 2); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 2); + errdefer comptime unreachable; // because we don't remove the `outdated` entries + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), {}); + + break :ty .fromInterned(wip.finish(ip, new_namespace_index)); + }, + }; try sema.addTypeReferenceEntry(src, ty); @@ -35234,125 +34443,69 @@ fn zirUnionDecl( .base_node_inst = tracked_inst, .offset = .nodeOffset(.zero), }; - const arg_ty_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .node_offset_container_tag = .zero }, - }; const union_decl = sema.code.getUnionDecl(inst); const captures = try sema.getCaptures(block, src, union_decl.captures, union_decl.capture_names); - const arg_type: ?Type = ty: { - if (union_decl.arg_type == .none) break :ty null; - break :ty try sema.resolveType(block, arg_ty_src, union_decl.arg_type); - }; + const ty: Type = switch (try ip.getDeclaredUnionType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .captures = captures, + .fields_len = @intCast(union_decl.field_names.len), + .layout = union_decl.kind.layout(), + .any_field_aligns = union_decl.field_align_body_lens != null, + .runtime_tag = switch (union_decl.kind) { + .auto => if (block.wantSafeTypes()) .safety else .none, - const ty = analyzeUnionDecl( - pt, - block.getFileScopeIndex(zcu), - &sema.code, - block.namespace.toOptional(), - block.wantSafeTypes(), - tracked_inst, - &union_decl, - arg_type, - captures, - try sema.createTypeName(block, union_decl.name_strategy, "union", inst), - ) catch |err| switch (err) { - error.OutOfMemory, - error.Canceled, - => |e| return e, + .tagged_explicit, + .tagged_enum, + .tagged_enum_explicit, + => .tagged, - error.ExplicitBackingNotInt => return sema.fail( - block, - arg_ty_src, - "expected integer backing type, found '{f}'", - .{arg_type.?.fmt(pt)}, - ), - error.ExplicitTagNotInt => return sema.fail( - block, - arg_ty_src, - "expected integer tag type, found '{f}'", - .{arg_type.?.fmt(pt)}, - ), - error.ExplicitTagNotEnum => return sema.fail( - block, - arg_ty_src, - "expected enum tag type, found '{f}'", - .{arg_type.?.fmt(pt)}, - ), - error.ExplicitTagFieldMismatch => { - const enum_obj = ip.loadEnumType(arg_type.?.toIntern()); - const enum_to_union_map = try sema.arena.alloc(?u32, enum_obj.field_names.len); - @memset(enum_to_union_map, null); - for (union_decl.field_names, 0..) |field_name_zir, union_field_idx| { - const field_name_ip = try ip.getOrPutString(gpa, io, pt.tid, sema.code.nullTerminatedString(field_name_zir), .no_embedded_nulls); - if (enum_obj.nameIndex(ip, field_name_ip)) |enum_field_idx| { - enum_to_union_map[enum_field_idx] = @intCast(union_field_idx); - continue; - } - const union_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_name = @intCast(union_field_idx) }, - }; - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(union_field_src, "no field named '{f}' in enum '{f}'", .{ field_name_ip.fmt(ip), arg_type.?.fmt(pt) }); - errdefer msg.destroy(gpa); - try sema.addDeclaredHereNote(msg, arg_type.?); - break :msg msg; - }); - } - for (enum_to_union_map, 0..) |union_field_idx, enum_field_idx| { - if (union_field_idx != null) continue; - const field_name_ip = enum_obj.field_names.get(ip)[enum_field_idx]; - const enum_field_src: LazySrcLoc = .{ - .base_node_inst = arg_type.?.typeDeclInstAllowGeneratedTag(zcu).?, - .offset = .{ .container_field_name = @intCast(enum_field_idx) }, - }; - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "enum field '{f}' missing from union", .{field_name_ip.fmt(ip)}); - errdefer msg.destroy(gpa); - try sema.errNote(enum_field_src, msg, "enum field here", .{}); - break :msg msg; - }); - } - for (enum_to_union_map, 0..) |union_field_idx, enum_field_idx| { - if (union_field_idx.? == enum_field_idx) continue; - const field_name = sema.code.nullTerminatedString( - union_decl.field_names[union_field_idx.?], - ); - const union_field_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_name = union_field_idx.? }, - }; - const enum_field_src: LazySrcLoc = .{ - .base_node_inst = arg_type.?.typeDeclInstAllowGeneratedTag(zcu).?, - .offset = .{ .container_field_name = @intCast(enum_field_idx) }, - }; - return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "union field order does not match tag enum field order", .{}); - errdefer msg.destroy(gpa); - try sema.errNote(union_field_src, msg, "union field '{s}' is index {d}", .{ field_name, union_field_idx.? }); - try sema.errNote(enum_field_src, msg, "enum field '{s}' is index {d}", .{ field_name, enum_field_idx }); - break :msg msg; - }); - } - unreachable; + .@"extern", + .@"packed", + .packed_explicit, + => .none, + }, + .enum_tag_mode = switch (union_decl.kind) { + .tagged_explicit => .explicit, + else => .auto, + }, + .packed_backing_mode = switch (union_decl.kind) { + .packed_explicit => .explicit, + else => .auto, + }, + })) { + .existing => |ty| .fromInterned(ty), + .wip => |wip| ty: { + errdefer wip.cancel(ip, pt.tid); + try sema.setTypeName(block, &wip, union_decl.name_strategy, "union", inst); + + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + errdefer pt.destroyNamespace(new_namespace_index); + + try pt.scanNamespace(new_namespace_index, union_decl.decls); + + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); + + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entry + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + + break :ty .fromInterned(wip.finish(ip, new_namespace_index)); }, }; - const enum_tag_ty = ty.unionTagTypeHypothetical(zcu); - switch (ip.indexToKey(enum_tag_ty.toIntern()).enum_type) { - .declared, .reified => {}, - .generated_union_tag => |owner_union_ty| { - assert(owner_union_ty == ty.toIntern()); - // generated tag type [MLUGG] - // Enum inits are resolved eagerly. TODO MLUGG: honestly i don't think they SHOULD be lol - try sema.ensureFieldInitsResolved(.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_type)); - }, - } - try sema.addTypeReferenceEntry(src, ty); // Make sure we update the namespace if the declaration is re-analyzed, to pick @@ -35369,6 +34522,10 @@ fn zirEnumDecl( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = comp.gpa; + const io = comp.io; + const ip = &zcu.intern_pool; const tracked_inst = try block.trackZir(inst); @@ -35376,46 +34533,49 @@ fn zirEnumDecl( .base_node_inst = tracked_inst, .offset = .nodeOffset(.zero), }; - const tag_ty_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .node_offset_container_tag = .zero }, - }; const enum_decl = sema.code.getEnumDecl(inst); const captures = try sema.getCaptures(block, src, enum_decl.captures, enum_decl.capture_names); - const tag_type: ?Type = ty: { - if (enum_decl.tag_type == .none) break :ty null; - break :ty try sema.resolveType(block, tag_ty_src, enum_decl.tag_type); + const ty: Type = switch (try ip.getDeclaredEnumType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .captures = captures, + .fields_len = @intCast(enum_decl.field_names.len), + .nonexhaustive = enum_decl.nonexhaustive, + .int_tag_mode = if (enum_decl.tag_type_body != null) .explicit else .auto, + })) { + .existing => |ty| .fromInterned(ty), + .wip => |wip| ty: { + errdefer wip.cancel(ip, pt.tid); + + try sema.setTypeName(block, &wip, enum_decl.name_strategy, "enum", inst); + + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + errdefer pt.destroyNamespace(new_namespace_index); + + try pt.scanNamespace(new_namespace_index, enum_decl.decls); + + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); + + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entry + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + + break :ty .fromInterned(wip.finish(ip, new_namespace_index)); + }, }; - const ty = analyzeEnumDecl( - pt, - block.getFileScopeIndex(zcu), - &sema.code, - block.namespace.toOptional(), - tracked_inst, - &enum_decl, - tag_type, - captures, - try sema.createTypeName(block, enum_decl.name_strategy, "enum", inst), - ) catch |err| switch (err) { - error.OutOfMemory, - error.Canceled, - => |e| return e, - - error.ExplicitTagNotInt => return sema.fail( - block, - tag_ty_src, - "expected integer tag type, found '{f}'", - .{tag_type.?.fmt(pt)}, - ), - }; - - // Enum inits are resolved eagerly. TODO MLUGG: honestly i don't think they SHOULD be lol - try sema.ensureFieldInitsResolved(ty); - try sema.addTypeReferenceEntry(src, ty); // Make sure we update the namespace if the declaration is re-analyzed, to pick @@ -35432,6 +34592,10 @@ fn zirOpaqueDecl( ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = comp.gpa; + const io = comp.io; + const ip = &zcu.intern_pool; const tracked_inst = try block.trackZir(inst); @@ -35444,15 +34608,26 @@ fn zirOpaqueDecl( const captures = try sema.getCaptures(block, src, opaque_decl.captures, opaque_decl.capture_names); - const ty = try analyzeOpaqueDecl( - pt, - block.getFileScopeIndex(zcu), - block.namespace.toOptional(), - tracked_inst, - &opaque_decl, - captures, - try sema.createTypeName(block, opaque_decl.name_strategy, "opaque", inst), - ); + const ty: Type = switch (try ip.getDeclaredOpaqueType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .captures = captures, + })) { + .existing => |ty| .fromInterned(ty), + .wip => |wip| ty: { + errdefer wip.cancel(ip, pt.tid); + try sema.setTypeName(block, &wip, opaque_decl.name_strategy, "opaque", inst); + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + errdefer pt.destroyNamespace(new_namespace_index); + try pt.scanNamespace(new_namespace_index, opaque_decl.decls); + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + break :ty .fromInterned(wip.finish(ip, new_namespace_index)); + }, + }; try sema.addTypeReferenceEntry(src, ty); diff --git a/src/Sema/LowerZon.zig b/src/Sema/LowerZon.zig index 78d1d8d1df..bb10a39729 100644 --- a/src/Sema/LowerZon.zig +++ b/src/Sema/LowerZon.zig @@ -769,7 +769,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool const ip = &pt.zcu.intern_pool; try self.sema.ensureLayoutResolved(res_ty); - try self.sema.ensureFieldInitsResolved(res_ty); + try self.sema.ensureStructDefaultsResolved(res_ty); const struct_info = self.sema.pt.zcu.typeToStruct(res_ty).?; const fields: @FieldType(Zoir.Node, "struct_literal") = switch (node.get(self.file.zoir.?)) { diff --git a/src/Sema/type_resolution.zig b/src/Sema/type_resolution.zig index f54a7b944e..f346d217ae 100644 --- a/src/Sema/type_resolution.zig +++ b/src/Sema/type_resolution.zig @@ -18,10 +18,6 @@ const arith = @import("arith.zig"); /// `ty` may be any type; its layout is resolved *recursively* if necessary. /// Adds incremental dependencies tracking any required type resolution. /// MLUGG TODO: to make the langspec non-stupid, we need to call this from WAY fewer places (the conditions need to be less specific). -/// e.g. I think creating the type `fn (A, B) C` should force layout resolution of `A`,`B`,`C`, which will simplify some `analyzeCall` logic. -/// wait i just realised that's probably a terrible idea, fns are a common cause of dep loops rn... so maybe not lol idk... -/// perhaps "layout resolution" for a function should resolve layout of ret ty and stuff, idk. justification: the "layout" of a function is whether -/// fnHasRuntimeBits, which depends whether the ret ty is comptime-only, i.e. the ret ty layout /// MLUGG TODO: to be clear, i should audit EVERY use of this before PRing pub fn ensureLayoutResolved(sema: *Sema, ty: Type) SemaError!void { const pt = sema.pt; @@ -33,7 +29,6 @@ pub fn ensureLayoutResolved(sema: *Sema, ty: Type) SemaError!void { .anyframe_type, .simple_type, .opaque_type, - .enum_type, .error_set_type, .inferred_error_set_type, => {}, @@ -52,7 +47,7 @@ pub fn ensureLayoutResolved(sema: *Sema, ty: Type) SemaError!void { .tuple_type => |tuple| for (tuple.types.get(ip)) |field_ty| { try ensureLayoutResolved(sema, .fromInterned(field_ty)); }, - .struct_type, .union_type => { + .struct_type, .union_type, .enum_type => { try sema.declareDependency(.{ .type_layout = ty.toIntern() }); if (zcu.analysis_in_progress.contains(.wrap(.{ .type_layout = ty.toIntern() }))) { // TODO: better error message @@ -89,36 +84,36 @@ pub fn ensureLayoutResolved(sema: *Sema, ty: Type) SemaError!void { } } -/// Asserts that `ty` is either a `struct` type, or an `enum` type. -/// If `ty` is a struct, ensures that fields' default values are resolved. -/// If `ty` is an enum, ensures that fields' integer tag valus are resolved. -/// Adds incremental dependencies tracking the required type resolution. -pub fn ensureFieldInitsResolved(sema: *Sema, ty: Type) SemaError!void { +/// Asserts that `ty` is a non-tuple `struct` type, and ensures that its fields' default values +/// are resolved. Adds incremental dependencies tracking the required type resolution. +/// +/// It is not necessary to call this function to query the values of comptime fields: those values +/// are available from type *layout* resolution, see `ensureLayoutResolved`. +pub fn ensureStructDefaultsResolved(sema: *Sema, ty: Type) SemaError!void { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - switch (ip.indexToKey(ty.toIntern())) { - .struct_type, .enum_type => {}, - else => unreachable, // assertion failure - } + assert(ip.indexToKey(ty.toIntern()) == .struct_type); - try sema.declareDependency(.{ .type_inits = ty.toIntern() }); - if (zcu.analysis_in_progress.contains(.wrap(.{ .type_inits = ty.toIntern() }))) { + try sema.declareDependency(.{ .struct_defaults = ty.toIntern() }); + if (zcu.analysis_in_progress.contains(.wrap(.{ .struct_defaults = ty.toIntern() }))) { // TODO: better error message return sema.failWithOwnedErrorMsg(null, try sema.errMsg( ty.srcLoc(zcu), - "{s} '{f}' depends on itself", - .{ @tagName(ty.zigTypeTag(zcu)), ty.fmt(pt) }, + "struct '{f}' depends on itself", + .{ty.fmt(pt)}, )); } - try pt.ensureTypeInitsUpToDate(ty); + try pt.ensureStructDefaultsUpToDate(ty); } + /// Asserts that `struct_ty` is a non-packed non-tuple struct, and that `sema.owner` is that type. /// This function *does* register the `src_hash` dependency on the struct. pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; + const io = comp.io; const gpa = comp.gpa; const ip = &zcu.intern_pool; @@ -127,10 +122,6 @@ pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { const struct_obj = ip.loadStructType(struct_ty.toIntern()); const zir_index = struct_obj.zir_index.resolve(ip).?; - assert(struct_obj.layout != .@"packed"); - - try sema.declareDependency(.{ .src_hash = struct_obj.zir_index }); - var block: Block = .{ .parent = null, .sema = sema, @@ -143,40 +134,93 @@ pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { }; defer assert(block.instructions.items.len == 0); - const zir_struct = sema.code.getStructDecl(zir_index); - var field_it = zir_struct.iterateFields(); - while (field_it.next()) |zir_field| { - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = struct_obj.zir_index, - .offset = .{ .container_field_type = zir_field.idx }, - }; - const field_align_src: LazySrcLoc = .{ - .base_node_inst = struct_obj.zir_index, - .offset = .{ .container_field_align = zir_field.idx }, - }; + // There may be old field names in here from a previous update. + struct_obj.field_name_map.get(ip).clearRetainingCapacity(); - const field_ty: Type = field_ty: { - block.comptime_reason = .{ .reason = .{ - .src = field_ty_src, - .r = .{ .simple = .struct_field_types }, - } }; - const type_ref = try sema.resolveInlineBody(&block, zir_field.type_body, zir_index); - break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref); - }; + if (struct_obj.is_reified) { + // The field names are populated, but we haven't checked for duplicates (nor populated the map) yet. + for (0..struct_obj.field_names.len) |field_index| { + const name = struct_obj.field_names.get(ip)[field_index]; + if (ip.addFieldName(struct_obj.field_names, struct_obj.field_name_map, name)) |prev_field_index| { + return sema.failWithOwnedErrorMsg(&block, msg: { + const src = block.nodeOffset(.zero); + const msg = try sema.errMsg(src, "duplicate struct field '{f}' at index '{d}", .{ name.fmt(ip), field_index }); + errdefer msg.destroy(gpa); + try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index}); + break :msg msg; + }); + } + } + } else { + // Declared structs do not yet have field information populated: + // * field names + // * field comptime-ness + // * field types + // * field aligns + // It's our job to populate these now. + try sema.declareDependency(.{ .src_hash = struct_obj.zir_index }); + + // Likewise, comptime bits may be set. We clear them all first because it avoids needing + // "unset bit with AND" logic below (instead we only need the "set bit with OR" case). + @memset(struct_obj.field_is_comptime_bits.getAll(ip), 0); + + const zir_struct = sema.code.getStructDecl(zir_index); + var field_it = zir_struct.iterateFields(); + while (field_it.next()) |zir_field| { + { + const name_slice = sema.code.nullTerminatedString(zir_field.name); + const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); + assert(ip.addFieldName(struct_obj.field_names, struct_obj.field_name_map, name) == null); // AstGen validated this for us + } + + if (zir_field.is_comptime) { + const bit_bag_index = zir_field.idx / 32; + const mask = @as(u32, 1) << @intCast(zir_field.idx % 32); + struct_obj.field_is_comptime_bits.getAll(ip)[bit_bag_index] |= mask; + } + + { + const field_ty_src = block.src(.{ .container_field_type = zir_field.idx }); + const field_ty: Type = field_ty: { + block.comptime_reason = .{ .reason = .{ + .src = field_ty_src, + .r = .{ .simple = .struct_field_types }, + } }; + const type_ref = try sema.resolveInlineBody(&block, zir_field.type_body, zir_index); + break :field_ty try sema.analyzeAsType(&block, field_ty_src, .struct_field_types, type_ref); + }; + struct_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); + } + + if (struct_obj.field_aligns.len == 0) { + assert(zir_field.align_body == null); + } else { + const field_align_src = block.src(.{ .container_field_align = zir_field.idx }); + const field_align: Alignment = a: { + block.comptime_reason = .{ .reason = .{ + .src = field_align_src, + .r = .{ .simple = .struct_field_attrs }, + } }; + const align_body = zir_field.align_body orelse break :a .none; + const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index); + break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref); + }; + struct_obj.field_aligns.get(ip)[zir_field.idx] = field_align; + } + } + } + + if (struct_obj.layout == .@"packed") { + return resolvePackedStructLayout(sema, &block, struct_ty, &struct_obj); + } + + // Resolve the layout of all fields, and check their types are allowed. + for (struct_obj.field_types.get(ip), 0..) |field_ty_ip, field_index| { + const field_ty: Type = .fromInterned(field_ty_ip); assert(!field_ty.isGenericPoison()); - + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_index) }); try sema.ensureLayoutResolved(field_ty); - const explicit_field_align: Alignment = a: { - block.comptime_reason = .{ .reason = .{ - .src = field_align_src, - .r = .{ .simple = .struct_field_attrs }, - } }; - const align_body = zir_field.align_body orelse break :a .none; - const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index); - break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref); - }; - if (field_ty.zigTypeTag(zcu) == .@"opaque") { return sema.failWithOwnedErrorMsg(&block, msg: { const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in struct", .{field_ty.fmt(pt)}); @@ -186,7 +230,8 @@ pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { break :msg msg; }); } - if (struct_obj.layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) { + + if (struct_obj.layout == .@"extern" and !field_ty.validateExtern(.struct_field, zcu)) { return sema.failWithOwnedErrorMsg(&block, msg: { const msg = try sema.errMsg(field_ty_src, "extern structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); @@ -195,35 +240,14 @@ pub fn resolveStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { break :msg msg; }); } - - struct_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); - if (struct_obj.field_aligns.len != 0) { - struct_obj.field_aligns.get(ip)[zir_field.idx] = explicit_field_align; - } else { - assert(explicit_field_align == .none); - } } - try finishStructLayout(sema, &block, struct_ty.srcLoc(zcu), struct_ty.toIntern(), &struct_obj); -} + // Fields are okay. Now we need to resolve the struct's overall layout (size, field offsets, etc). -/// Called after populating field types and alignments; populates field offsets, runtime order, and -/// overall struct layout information (size, alignment, comptime-only state, etc). -pub fn finishStructLayout( - sema: *Sema, - /// Only used to report compile errors. - block: *Block, - struct_src: LazySrcLoc, - struct_ty: InternPool.Index, - struct_obj: *const InternPool.LoadedStructType, -) SemaError!void { - const pt = sema.pt; - const zcu = pt.zcu; - const comp = zcu.comp; - const io = comp.io; - const ip = &zcu.intern_pool; + var any_comptime_fields = false; var comptime_only = false; var one_possible_value = true; + var has_runtime_bits = false; var struct_align: Alignment = .@"1"; // Unlike `struct_obj.field_aligns`, these are not `.none`. const resolved_field_aligns = try sema.arena.alloc(Alignment, struct_obj.field_names.len); @@ -240,12 +264,15 @@ pub fn finishStructLayout( // Non-`comptime` fields contribute to the struct's layout. struct_align = struct_align.maxStrict(field_align); if (field_ty.comptimeOnly(zcu)) comptime_only = true; + if (field_ty.hasRuntimeBits(zcu)) has_runtime_bits = true; if (try field_ty.onePossibleValue(pt) == null) one_possible_value = false; if (struct_obj.layout == .auto) { struct_obj.field_runtime_order.get(ip)[field_idx] = @enumFromInt(field_idx); } - } else if (struct_obj.layout == .auto) { + } else { + assert(struct_obj.layout == .auto); // comptime fields not allowed in extern or packed structs struct_obj.field_runtime_order.get(ip)[field_idx] = .omitted; // comptime fields are not in the runtime order + any_comptime_fields = true; } align_out.* = field_align; } @@ -297,75 +324,53 @@ pub fn finishStructLayout( cur_offset = offset + field_ty.abiSize(zcu); } const struct_size = std.math.cast(u32, struct_align.forward(cur_offset)) orelse return sema.fail( - block, - struct_src, + &block, + struct_ty.srcLoc(zcu), "struct layout requires size {d}, this compiler implementation supports up to {d}", .{ struct_align.forward(cur_offset), std.math.maxInt(u32) }, ); ip.resolveStructLayout( io, - struct_ty, + struct_ty.toIntern(), struct_size, struct_align, false, // MLUGG TODO XXX NPV one_possible_value, comptime_only, + has_runtime_bits, ); + + if (any_comptime_fields and !struct_obj.is_reified) { + // We also resolve field inits in this case. MLUGG TODO: this sucks, see TODO in resolveStructDefaults + return resolveStructDefaultsInner(sema, &block, &struct_obj); + } } /// Asserts that `struct_ty` is a packed struct, and that `sema.owner` is that type. /// This function *does* register the `src_hash` dependency on the struct. -pub fn resolvePackedStructLayout(sema: *Sema, struct_ty: Type) CompileError!void { +fn resolvePackedStructLayout( + sema: *Sema, + block: *Block, + struct_ty: Type, + struct_obj: *const InternPool.LoadedStructType, +) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; + const io = comp.io; const gpa = comp.gpa; const ip = &zcu.intern_pool; - assert(sema.owner.unwrap().type_layout == struct_ty.toIntern()); - - const struct_obj = ip.loadStructType(struct_ty.toIntern()); - const zir_index = struct_obj.zir_index.resolve(ip).?; - - assert(struct_obj.layout == .@"packed"); - - try sema.declareDependency(.{ .src_hash = struct_obj.zir_index }); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = struct_obj.namespace, - .instructions = .{}, - .inlining = null, - .comptime_reason = undefined, // always set before using `block` - .src_base_inst = struct_obj.zir_index, - .type_name_ctx = struct_obj.name, - }; - defer assert(block.instructions.items.len == 0); - + // Resolve the layout of all fields, and check their types are allowed. + // Also count the number of bits while we're at it. var field_bits: u64 = 0; - const zir_struct = sema.code.getStructDecl(zir_index); - var field_it = zir_struct.iterateFields(); - while (field_it.next()) |zir_field| { - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = struct_obj.zir_index, - .offset = .{ .container_field_type = zir_field.idx }, - }; - const field_ty: Type = field_ty: { - block.comptime_reason = .{ .reason = .{ - .src = field_ty_src, - .r = .{ .simple = .struct_field_types }, - } }; - const type_ref = try sema.resolveInlineBody(&block, zir_field.type_body, zir_index); - break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref); - }; + for (struct_obj.field_types.get(ip), 0..) |field_ty_ip, field_index| { + const field_ty: Type = .fromInterned(field_ty_ip); assert(!field_ty.isGenericPoison()); - struct_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); - + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_index) }); try sema.ensureLayoutResolved(field_ty); - if (field_ty.zigTypeTag(zcu) == .@"opaque") { - return sema.failWithOwnedErrorMsg(&block, msg: { + return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in struct", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{}); @@ -373,62 +378,73 @@ pub fn resolvePackedStructLayout(sema: *Sema, struct_ty: Type) CompileError!void break :msg msg; }); } - if (!field_ty.packable(zcu)) { - return sema.failWithOwnedErrorMsg(&block, msg: { - const msg = try sema.errMsg(field_ty_src, "packed structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - try sema.explainWhyTypeIsNotPackable(msg, field_ty_src, field_ty); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - } + if (field_ty.unpackable(zcu)) |reason| return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(field_ty_src, "packed structs cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); + errdefer msg.destroy(gpa); + try sema.explainWhyTypeIsUnpackable(msg, field_ty_src, reason); + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }); assert(!field_ty.comptimeOnly(zcu)); // packable types are not comptime-only field_bits += field_ty.bitSize(zcu); } - try resolvePackedStructBackingInt(sema, &block, field_bits, struct_ty, &struct_obj); -} + const explicit_backing_int_ty: ?Type = if (struct_obj.is_reified) ty: { + break :ty switch (struct_obj.packed_backing_mode) { + .explicit => .fromInterned(struct_obj.packed_backing_int_type), + .auto => null, + }; + } else ty: { + const zir_index = struct_obj.zir_index.resolve(ip).?; + const zir_struct = sema.code.getStructDecl(zir_index); + const backing_int_type_body = zir_struct.backing_int_type_body orelse { + break :ty null; // inferred backing type + }; + // Explicitly specified, so evaluate the backing int type expression. + const backing_int_type_src = block.src(.container_arg); + block.comptime_reason = .{ .reason = .{ + .src = backing_int_type_src, + .r = .{ .simple = .packed_struct_backing_int_type }, + } }; + const type_ref = try sema.resolveInlineBody(block, backing_int_type_body, zir_index); + break :ty try sema.analyzeAsType(block, backing_int_type_src, .packed_struct_backing_int_type, type_ref); + }; -pub fn resolvePackedStructBackingInt( - sema: *Sema, - block: *Block, - field_bits: u64, - struct_ty: Type, - struct_obj: *const InternPool.LoadedStructType, -) SemaError!void { - const pt = sema.pt; - const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; - const io = comp.io; - const ip = &zcu.intern_pool; - - switch (struct_obj.packed_backing_mode) { - .explicit => { - // We only need to validate the type. - const backing_ty: Type = .fromInterned(struct_obj.packed_backing_int_type); - assert(backing_ty.zigTypeTag(zcu) == .int); - if (field_bits != backing_ty.intInfo(zcu).bits) return sema.failWithOwnedErrorMsg(block, msg: { - const src = struct_ty.srcLoc(zcu); - const msg = try sema.errMsg(src, "backing integer bit width does not match total bit width of fields", .{}); - errdefer msg.destroy(gpa); - try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) }); - try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits}); - break :msg msg; - }); - }, - .auto => { - // We need to generate the inferred tag. - const want_bits = std.math.cast(u16, field_bits) orelse return sema.fail( - block, - struct_ty.srcLoc(zcu), - "packed struct bit width '{d}' exceeds maximum bit width of 65535", - .{field_bits}, - ); - const backing_int = try pt.intType(.unsigned, want_bits); - ip.resolvePackedStructBackingInt(io, struct_ty.toIntern(), backing_int.toIntern()); - }, - } + // Finally, either validate or infer the backing int type. + const backing_int_ty: Type = if (explicit_backing_int_ty) |backing_ty| ty: { + // We only need to validate the type. + if (backing_ty.zigTypeTag(zcu) != .int) return sema.failWithOwnedErrorMsg(block, msg: { + const src = struct_ty.srcLoc(zcu); + const msg = try sema.errMsg(src, "expected backing integer type, found '{f}'", .{backing_ty.fmt(pt)}); + errdefer msg.destroy(gpa); + try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) }); + try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits}); + break :msg msg; + }); + if (field_bits != backing_ty.intInfo(zcu).bits) return sema.failWithOwnedErrorMsg(block, msg: { + const src = struct_ty.srcLoc(zcu); + const msg = try sema.errMsg(src, "backing integer bit width does not match total bit width of fields", .{}); + errdefer msg.destroy(gpa); + try sema.errNote(src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_ty.bitSize(zcu) }); + try sema.errNote(src, msg, "struct fields have total bit width '{d}'", .{field_bits}); + break :msg msg; + }); + break :ty backing_ty; + } else ty: { + // We need to generate the inferred tag. + const backing_int_bits = std.math.cast(u16, field_bits) orelse return sema.fail( + block, + struct_ty.srcLoc(zcu), + "packed struct bit width '{d}' exceeds maximum bit width of 65535", + .{field_bits}, + ); + break :ty try pt.intType(.unsigned, backing_int_bits); + }; + ip.resolvePackedStructLayout( + io, + struct_ty.toIntern(), + backing_int_ty.toIntern(), + ); } /// Asserts that `struct_ty` is a non-tuple struct, and that `sema.owner` is that type. @@ -436,25 +452,33 @@ pub fn resolvePackedStructBackingInt( pub fn resolveStructDefaults(sema: *Sema, struct_ty: Type) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; - const comp = zcu.comp; - const gpa = comp.gpa; const ip = &zcu.intern_pool; - assert(sema.owner.unwrap().type_inits == struct_ty.toIntern()); + assert(sema.owner.unwrap().struct_defaults == struct_ty.toIntern()); try sema.ensureLayoutResolved(struct_ty); const struct_obj = ip.loadStructType(struct_ty.toIntern()); - const zir_index = struct_obj.zir_index.resolve(ip).?; try sema.declareDependency(.{ .src_hash = struct_obj.zir_index }); + // This logic isn't used for reified structs, because the signature of `@Struct` requires that + // default values are populated and correctly typed from the moment the struct type is interned + // (because `Sema.zirReifyStruct` had to dereference the default value from a pointer). + assert(!struct_obj.is_reified); + if (struct_obj.field_defaults.len == 0) { // The struct has no default field values, so the slice has been omitted. return; } - const field_types = struct_obj.field_types.get(ip); + for (struct_obj.field_is_comptime_bits.getAll(ip)) |bit_bag| { + if (bit_bag != 0) { + // There is a comptime field, so layout resolution already filled in the defaults for us! + // MLUGG TODO: perhaps a better idea would be for layout resolution to populate only the defaults *for comptime fields*. + return; + } + } var block: Block = .{ .parent = null, @@ -468,16 +492,30 @@ pub fn resolveStructDefaults(sema: *Sema, struct_ty: Type) CompileError!void { }; defer assert(block.instructions.items.len == 0); + return resolveStructDefaultsInner(sema, &block, &struct_obj); +} +/// MLUGG TODO: i dislike this, see the 'TODO' in the prev func +fn resolveStructDefaultsInner( + sema: *Sema, + block: *Block, + struct_obj: *const InternPool.LoadedStructType, +) CompileError!void { + const pt = sema.pt; + const zcu = pt.zcu; + const comp = zcu.comp; + const gpa = comp.gpa; + const ip = &zcu.intern_pool; + // We'll need to map the struct decl instruction to provide result types + const zir_index = struct_obj.zir_index.resolve(ip).?; try sema.inst_map.ensureSpaceForInstructions(gpa, &.{zir_index}); + const field_types = struct_obj.field_types.get(ip); + const zir_struct = sema.code.getStructDecl(zir_index); var field_it = zir_struct.iterateFields(); while (field_it.next()) |zir_field| { - const default_val_src: LazySrcLoc = .{ - .base_node_inst = struct_obj.zir_index, - .offset = .{ .container_field_value = zir_field.idx }, - }; + const default_val_src = block.src(.{ .container_field_value = zir_field.idx }); block.comptime_reason = .{ .reason = .{ .src = default_val_src, .r = .{ .simple = .struct_field_default_value }, @@ -491,13 +529,13 @@ pub fn resolveStructDefaults(sema: *Sema, struct_ty: Type) CompileError!void { // Provide the result type sema.inst_map.putAssumeCapacity(zir_index, .fromIntern(field_ty.toIntern())); defer assert(sema.inst_map.remove(zir_index)); - break :ref try sema.resolveInlineBody(&block, default_body, zir_index); + break :ref try sema.resolveInlineBody(block, default_body, zir_index); }; - const coerced = try sema.coerce(&block, field_ty, uncoerced, default_val_src); - const default_val = try sema.resolveConstValue(&block, default_val_src, coerced, null); + const coerced = try sema.coerce(block, field_ty, uncoerced, default_val_src); + const default_val = try sema.resolveConstValue(block, default_val_src, coerced, null); if (default_val.canMutateComptimeVarState(zcu)) { const field_name = struct_obj.field_names.get(ip)[zir_field.idx]; - return sema.failWithContainsReferenceToComptimeVar(&block, default_val_src, field_name, "field default value", default_val); + return sema.failWithContainsReferenceToComptimeVar(block, default_val_src, field_name, "field default value", default_val); } struct_obj.field_defaults.get(ip)[zir_field.idx] = default_val.toIntern(); } @@ -508,6 +546,7 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; + const io = comp.io; const gpa = comp.gpa; const ip = &zcu.intern_pool; @@ -516,10 +555,6 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { const union_obj = ip.loadUnionType(union_ty.toIntern()); const zir_index = union_obj.zir_index.resolve(ip).?; - assert(union_obj.layout != .@"packed"); - - try sema.declareDependency(.{ .src_hash = union_obj.zir_index }); - var block: Block = .{ .parent = null, .sema = sema, @@ -532,48 +567,169 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { }; defer assert(block.instructions.items.len == 0); - const zir_union = sema.code.getUnionDecl(zir_index); - var field_it = zir_union.iterateFields(); - while (field_it.next()) |zir_field| { - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = .{ .container_field_type = zir_field.idx }, - }; - const field_align_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = .{ .container_field_align = zir_field.idx }, + // MLUGG TODO: this is fucking ugly bro + const explicit_enum_tag_ty: ?Type = if (union_obj.is_reified) ty: { + break :ty switch (union_obj.enum_tag_mode) { + .explicit => .fromInterned(union_obj.enum_tag_type), + .auto => null, }; + } else ty: { + const zir_union = sema.code.getUnionDecl(zir_index); + if (zir_union.kind != .tagged_explicit) { + break :ty null; // enum tag type will be automatically generated + } + // Explicitly specified, so evaluate the enum tag type expression. + const tag_type_body = zir_union.arg_type_body.?; + const tag_type_src = block.src(.container_arg); + block.comptime_reason = .{ .reason = .{ + .src = tag_type_src, + .r = .{ .simple = .union_enum_tag_type }, + } }; + const type_ref = try sema.resolveInlineBody(&block, tag_type_body, zir_index); + break :ty try sema.analyzeAsType(&block, tag_type_src, .union_enum_tag_type, type_ref); + }; + const enum_tag_ty: Type = if (explicit_enum_tag_ty) |enum_tag_ty| ty: { + if (enum_tag_ty.zigTypeTag(zcu) != .@"enum") return sema.fail( + &block, + block.src(.container_arg), + "expected enum tag type, found '{f}'", + .{enum_tag_ty.fmt(pt)}, + ); + break :ty enum_tag_ty; + } else switch (try ip.getGeneratedEnumTagType(gpa, io, pt.tid, .{ + .union_type = union_ty.toIntern(), + // MLUGG TODO: a bit hacky icl + .int_tag_mode = mode: { + if (union_obj.is_reified) break :mode .auto; + const zir_union = sema.code.getUnionDecl(zir_index); + if (zir_union.kind != .tagged_enum_explicit) break :mode .auto; + break :mode .explicit; + }, + .fields_len = @intCast(union_obj.field_types.len), + })) { + .existing => |tag_ty| .fromInterned(tag_ty), + .wip => |wip| tag_ty: { + errdefer wip.cancel(ip, pt.tid); + _ = wip.setName(ip, try ip.getOrPutStringFmt( + gpa, + io, + pt.tid, + "@typeInfo({f}).@\"union\".tag_type.?", + .{union_obj.name.fmt(ip)}, + .no_embedded_nulls, + ), .none); + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = union_obj.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = zcu.namespacePtr(union_obj.namespace).file_scope, + .generation = zcu.generation, + }); + if (comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + try zcu.outdated.ensureUnusedCapacity(gpa, 1); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1); + errdefer comptime unreachable; // because we don't remove the `outdated` entry + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + break :tag_ty .fromInterned(wip.finish(ip, new_namespace_index)); + }, + }; - const field_ty: Type = field_ty: { - block.comptime_reason = .{ .reason = .{ - .src = field_ty_src, - .r = .{ .simple = .union_field_types }, - } }; - const type_body = zir_field.type_body orelse break :field_ty .void; - const type_ref = try sema.resolveInlineBody(&block, type_body, zir_index); - break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref); - }; - assert(!field_ty.isGenericPoison()); - union_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); + try sema.ensureLayoutResolved(enum_tag_ty); + const enum_obj = ip.loadEnumType(enum_tag_ty.toIntern()); - try sema.ensureLayoutResolved(field_ty); + if (union_obj.is_reified) { + // We have field names in `union_obj.reified_field_names`, but we haven't + // checked them against the backing type yet. + const union_field_names = union_obj.reified_field_names.get(ip); + match_fields: { + // We can efficiently *check* if the fields match... + if (union_field_names.len == enum_obj.field_names.len) { + for (union_field_names, enum_obj.field_names.get(ip)) |union_field_name, enum_field_name| { + if (!std.mem.eql(u8, union_field_name.toSlice(ip), enum_field_name.toSlice(ip))) break; + } else { + break :match_fields; + } + } + // ...but if they don't, reporting a nice error is a little more involved. If some field + // is present in the enum but not the union, or vice versa, we will report that instead + // of a generic "field order mismatch" error. Of course, this error is impossible for a + // generated tag type, because we populated that from the union ZIR! + assert(enum_obj.owner_union != union_ty.toIntern()); + return failUnionFieldMismatch(sema, &block, union_field_names, enum_tag_ty, &enum_obj); + } + } else { + // Declared unions do not have field types or aligns populated yet. + // We also need to check the field names match the backing enum. + try sema.declareDependency(.{ .src_hash = union_obj.zir_index }); + const zir_union = sema.code.getUnionDecl(zir_index); - const explicit_field_align: Alignment = a: { - block.comptime_reason = .{ .reason = .{ - .src = field_align_src, - .r = .{ .simple = .union_field_attrs }, - } }; - const align_body = zir_field.align_body orelse break :a .none; - const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index); - break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref); - }; - - if (union_obj.field_aligns.len != 0) { - union_obj.field_aligns.get(ip)[zir_field.idx] = explicit_field_align; - } else { - assert(explicit_field_align == .none); + // We'll first check the field names against the backing enum, and only analyze the types + // once we know the fields match one-to-one. + match_fields: { + // We can efficiently *check* if the fields match... + if (zir_union.field_names.len == enum_obj.field_names.len) { + for (zir_union.field_names, enum_obj.field_names.get(ip)) |union_field_name_zir, enum_field_name| { + const union_field_name_slice = sema.code.nullTerminatedString(union_field_name_zir); + if (!std.mem.eql(u8, union_field_name_slice, enum_field_name.toSlice(ip))) break; + } else { + break :match_fields; + } + } + // ...but if they don't, reporting a nice error is a little more involved. If some field + // is present in the enum but not the union, or vice versa, we will report that instead + // of a generic "field order mismatch" error. Of course, this error is impossible for a + // generated tag type, because we populated that from the union ZIR! + assert(enum_obj.owner_union != union_ty.toIntern()); + const union_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, zir_union.field_names.len); + for (zir_union.field_names, union_field_names) |name_zir, *name| { + name.* = try ip.getOrPutString(gpa, io, pt.tid, sema.code.nullTerminatedString(name_zir), .no_embedded_nulls); + } + return failUnionFieldMismatch(sema, &block, union_field_names, enum_tag_ty, &enum_obj); } + // Field names okay; populate types and aligns. + var field_it = zir_union.iterateFields(); + while (field_it.next()) |zir_field| { + const field_ty_src = block.src(.{ .container_field_type = zir_field.idx }); + const field_ty: Type = field_ty: { + block.comptime_reason = .{ .reason = .{ + .src = field_ty_src, + .r = .{ .simple = .union_field_types }, + } }; + const type_body = zir_field.type_body orelse break :field_ty .void; + const type_ref = try sema.resolveInlineBody(&block, type_body, zir_index); + break :field_ty try sema.analyzeAsType(&block, field_ty_src, .union_field_types, type_ref); + }; + union_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); + + const field_align_src = block.src(.{ .container_field_align = zir_field.idx }); + const explicit_field_align: Alignment = a: { + block.comptime_reason = .{ .reason = .{ + .src = field_align_src, + .r = .{ .simple = .union_field_attrs }, + } }; + const align_body = zir_field.align_body orelse break :a .none; + const align_ref = try sema.resolveInlineBody(&block, align_body, zir_index); + break :a try sema.analyzeAsAlign(&block, field_align_src, align_ref); + }; + if (union_obj.field_aligns.len != 0) { + union_obj.field_aligns.get(ip)[zir_field.idx] = explicit_field_align; + } else { + assert(explicit_field_align == .none); + } + } + } + + if (union_obj.layout == .@"packed") { + return resolvePackedUnionLayout(sema, &block, union_ty, &union_obj, enum_tag_ty); + } + + // Resolve the layout of all fields, and check their types are allowed. + for (union_obj.field_types.get(ip), 0..) |field_ty_ip, field_index| { + const field_ty: Type = .fromInterned(field_ty_ip); + assert(!field_ty.isGenericPoison()); + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_index) }); + try sema.ensureLayoutResolved(field_ty); if (field_ty.zigTypeTag(zcu) == .@"opaque") { return sema.failWithOwnedErrorMsg(&block, msg: { const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in union", .{field_ty.fmt(pt)}); @@ -583,7 +739,7 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { break :msg msg; }); } - if (union_obj.layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) { + if (union_obj.layout == .@"extern" and !field_ty.validateExtern(.union_field, zcu)) { return sema.failWithOwnedErrorMsg(&block, msg: { const msg = try sema.errMsg(field_ty_src, "extern unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); @@ -594,36 +750,11 @@ pub fn resolveUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { } } - try finishUnionLayout( - sema, - &block, - union_ty.srcLoc(zcu), - union_ty.toIntern(), - &union_obj, - .fromInterned(union_obj.enum_tag_type), - ); -} - -/// Called after populating field types and alignments; populates overall union layout -/// information (size, alignment, comptime-only state, etc). -pub fn finishUnionLayout( - sema: *Sema, - /// Only used to report compile errors. - block: *Block, - union_src: LazySrcLoc, - union_ty: InternPool.Index, - union_obj: *const InternPool.LoadedUnionType, - enum_tag_ty: Type, -) SemaError!void { - const pt = sema.pt; - const zcu = pt.zcu; - const comp = zcu.comp; - const io = comp.io; - const ip = &zcu.intern_pool; - + // Fields are okay. Now we need to resolve the union's overall layout (size, alignment, etc). var payload_align: Alignment = .@"1"; var payload_size: u64 = 0; var comptime_only = false; + var has_runtime_bits = union_obj.runtime_tag != .none and enum_tag_ty.hasRuntimeBits(zcu); var possible_values: enum { none, one, many } = .none; for (0..union_obj.field_types.len) |field_idx| { const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[field_idx]); @@ -637,6 +768,7 @@ pub fn finishUnionLayout( payload_align = payload_align.maxStrict(field_align); payload_size = @max(payload_size, field_ty.abiSize(zcu)); if (field_ty.comptimeOnly(zcu)) comptime_only = true; + if (field_ty.hasRuntimeBits(zcu)) has_runtime_bits = true; if (!field_ty.isNoReturn(zcu)) { if (try field_ty.onePossibleValue(pt) != null) { possible_values = .many; // this field alone has many possible values @@ -664,78 +796,100 @@ pub fn finishUnionLayout( }; const casted_size = std.math.cast(u32, size) orelse return sema.fail( - block, - union_src, + &block, + union_ty.srcLoc(zcu), "union layout requires size {d}, this compiler implementation supports up to {d}", .{ size, std.math.maxInt(u32) }, ); ip.resolveUnionLayout( io, - union_ty, + union_ty.toIntern(), + enum_tag_ty.toIntern(), casted_size, @intCast(padding), // okay because padding is no greater than size alignment, possible_values == .none, // MLUGG TODO: make sure queries use `LoadedUnionType.has_no_possible_value`! possible_values == .one, comptime_only, + has_runtime_bits, ); } - -pub fn resolvePackedUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { +fn failUnionFieldMismatch(sema: *Sema, block: *Block, union_field_names: []const InternPool.NullTerminatedString, enum_tag_ty: Type, enum_obj: *const InternPool.LoadedEnumType) CompileError { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; const gpa = comp.gpa; const ip = &zcu.intern_pool; - - assert(sema.owner.unwrap().type_layout == union_ty.toIntern()); - - const union_obj = ip.loadUnionType(union_ty.toIntern()); - const zir_index = union_obj.zir_index.resolve(ip).?; - - assert(union_obj.layout == .@"packed"); - - try sema.declareDependency(.{ .src_hash = union_obj.zir_index }); - - var block: Block = .{ - .parent = null, - .sema = sema, - .namespace = union_obj.namespace, - .instructions = .{}, - .inlining = null, - .comptime_reason = undefined, // always set before using `block` - .src_base_inst = union_obj.zir_index, - .type_name_ctx = union_obj.name, - }; - defer assert(block.instructions.items.len == 0); - - const zir_union = sema.code.getUnionDecl(zir_index); - var field_it = zir_union.iterateFields(); - while (field_it.next()) |zir_field| { - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = .{ .container_field_type = zir_field.idx }, + const enum_to_union_map = try sema.arena.alloc(?u32, enum_obj.field_names.len); + @memset(enum_to_union_map, null); + for (union_field_names, 0..) |field_name, union_field_index| { + if (enum_obj.nameIndex(ip, field_name)) |enum_field_index| { + enum_to_union_map[enum_field_index] = @intCast(union_field_index); + continue; + } + const union_field_src = block.src(.{ .container_field_name = @intCast(union_field_index) }); + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(union_field_src, "no field named '{f}' in enum '{f}'", .{ field_name.fmt(ip), enum_tag_ty.fmt(pt) }); + errdefer msg.destroy(gpa); + try sema.addDeclaredHereNote(msg, enum_tag_ty); + break :msg msg; + }); + } + for (enum_to_union_map, 0..) |union_field_index, enum_field_index| { + if (union_field_index != null) continue; + const field_name_ip = enum_obj.field_names.get(ip)[enum_field_index]; + const enum_field_src: LazySrcLoc = .{ + .base_node_inst = enum_tag_ty.typeDeclInstAllowGeneratedTag(zcu).?, + .offset = .{ .container_field_name = @intCast(enum_field_index) }, }; - const field_ty: Type = field_ty: { - block.comptime_reason = .{ .reason = .{ - .src = field_ty_src, - .r = .{ .simple = .union_field_types }, - } }; - // MLUGG TODO: i think this should probably be a compile error? (if so, it's an astgen one, right?) - const type_body = zir_field.type_body orelse break :field_ty .void; - const type_ref = try sema.resolveInlineBody(&block, type_body, zir_index); - break :field_ty try sema.analyzeAsType(&block, field_ty_src, type_ref); + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(block.nodeOffset(.zero), "enum field '{f}' missing from union", .{field_name_ip.fmt(ip)}); + errdefer msg.destroy(gpa); + try sema.errNote(enum_field_src, msg, "enum field here", .{}); + break :msg msg; + }); + } + // The only problem is the field ordering. + for (enum_to_union_map, 0..) |union_field_index, enum_field_index| { + if (union_field_index.? == enum_field_index) continue; + const field_name = enum_obj.field_names.get(ip)[enum_field_index]; + const union_field_src = block.src(.{ .container_field_name = union_field_index.? }); + const enum_field_src: LazySrcLoc = .{ + .base_node_inst = enum_tag_ty.typeDeclInstAllowGeneratedTag(zcu).?, + .offset = .{ .container_field_name = @intCast(enum_field_index) }, }; + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(block.nodeOffset(.zero), "union field order does not match tag enum field order", .{}); + errdefer msg.destroy(gpa); + try sema.errNote(union_field_src, msg, "union field '{f}' is index {d}", .{ field_name.fmt(ip), union_field_index.? }); + try sema.errNote(enum_field_src, msg, "enum field '{f}' is index {d}", .{ field_name.fmt(ip), enum_field_index }); + break :msg msg; + }); + } + unreachable; // we already determined that *something* is wrong +} +fn resolvePackedUnionLayout( + sema: *Sema, + block: *Block, + union_ty: Type, + union_obj: *const InternPool.LoadedUnionType, + enum_tag_ty: Type, +) CompileError!void { + const pt = sema.pt; + const zcu = pt.zcu; + const comp = zcu.comp; + const io = comp.io; + const gpa = comp.gpa; + const ip = &zcu.intern_pool; + + // Resolve the layout of all fields, and check their types are allowed. + for (union_obj.field_types.get(ip), 0..) |field_ty_ip, field_index| { + const field_ty: Type = .fromInterned(field_ty_ip); assert(!field_ty.isGenericPoison()); - union_obj.field_types.get(ip)[zir_field.idx] = field_ty.toIntern(); - - assert(zir_field.align_body == null); // packed union fields cannot be aligned - assert(zir_field.value_body == null); // packed union fields cannot have tag values - + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_index) }); try sema.ensureLayoutResolved(field_ty); - if (field_ty.zigTypeTag(zcu) == .@"opaque") { - return sema.failWithOwnedErrorMsg(&block, msg: { + return sema.failWithOwnedErrorMsg(block, msg: { const msg = try sema.errMsg(field_ty_src, "cannot directly embed opaque type '{f}' in union", .{field_ty.fmt(pt)}); errdefer msg.destroy(gpa); try sema.errNote(field_ty_src, msg, "opaque types have unknown size", .{}); @@ -743,132 +897,109 @@ pub fn resolvePackedUnionLayout(sema: *Sema, union_ty: Type) CompileError!void { break :msg msg; }); } - if (!field_ty.packable(zcu)) { - return sema.failWithOwnedErrorMsg(&block, msg: { - const msg = try sema.errMsg(field_ty_src, "packed unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); - errdefer msg.destroy(gpa); - try sema.explainWhyTypeIsNotPackable(msg, field_ty_src, field_ty); - try sema.addDeclaredHereNote(msg, field_ty); - break :msg msg; - }); - } + if (field_ty.unpackable(zcu)) |reason| return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(field_ty_src, "packed unions cannot contain fields of type '{f}'", .{field_ty.fmt(pt)}); + errdefer msg.destroy(gpa); + try sema.explainWhyTypeIsUnpackable(msg, field_ty_src, reason); + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }); assert(!field_ty.comptimeOnly(zcu)); // packable types are not comptime-only } - try resolvePackedUnionBackingInt(sema, &block, union_ty, &union_obj, false); + const explicit_backing_int_ty: ?Type = if (union_obj.is_reified) ty: { + switch (union_obj.packed_backing_mode) { + .explicit => break :ty .fromInterned(union_obj.packed_backing_int_type), + .auto => break :ty null, + } + } else ty: { + const zir_index = union_obj.zir_index.resolve(ip).?; + const zir_union = sema.code.getUnionDecl(zir_index); + const backing_int_type_body = zir_union.arg_type_body orelse { + break :ty null; // inferred backing type + }; + // Explicitly specified, so evaluate the backing int type expression. + const backing_int_type_src = block.src(.container_arg); + block.comptime_reason = .{ .reason = .{ + .src = backing_int_type_src, + .r = .{ .simple = .packed_union_backing_int_type }, + } }; + const type_ref = try sema.resolveInlineBody(block, backing_int_type_body, zir_index); + break :ty try sema.analyzeAsType(block, backing_int_type_src, .packed_union_backing_int_type, type_ref); + }; + + // Finally, either validate or infer the backing int type. + const backing_int_ty: Type = if (explicit_backing_int_ty) |backing_ty| ty: { + const backing_int_bits = backing_ty.intInfo(zcu).bits; + for (union_obj.field_types.get(ip), 0..) |field_type_ip, field_idx| { + const field_type: Type = .fromInterned(field_type_ip); + const field_bits = field_type.bitSize(zcu); + if (field_bits != backing_int_bits) return sema.failWithOwnedErrorMsg(block, msg: { + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_idx) }); + const msg = try sema.errMsg(field_ty_src, "field bit width does not match backing integer", .{}); + errdefer msg.destroy(gpa); + try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits }); + try sema.errNote(field_ty_src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_ty.fmt(pt), backing_int_bits }); + try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{}); + break :msg msg; + }); + } + break :ty backing_ty; + } else if (union_obj.field_types.len == 0) ty: { + // Special case: there is no first field to infer the type from. Treat the union as empty (zero-bit). + break :ty .u0; + } else ty: { + const field_types = union_obj.field_types.get(ip); + const first_field_type: Type = .fromInterned(field_types[0]); + const first_field_bits = first_field_type.bitSize(zcu); + for (field_types[1..], 1..) |field_type_ip, field_idx| { + const field_type: Type = .fromInterned(field_type_ip); + const field_bits = field_type.bitSize(zcu); + if (field_bits != first_field_bits) return sema.failWithOwnedErrorMsg(block, msg: { + const first_field_ty_src = block.src(.{ .container_field_type = 0 }); + const field_ty_src = block.src(.{ .container_field_type = @intCast(field_idx) }); + const msg = try sema.errMsg(field_ty_src, "field bit width does not match earlier field", .{}); + errdefer msg.destroy(gpa); + try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits }); + try sema.errNote(first_field_ty_src, msg, "other field type '{f}' has bit width '{d}'", .{ first_field_type.fmt(pt), first_field_bits }); + try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{}); + break :msg msg; + }); + } + const backing_int_bits = std.math.cast(u16, first_field_bits) orelse return sema.fail( + block, + union_ty.srcLoc(zcu), + "packed union bit width '{d}' exceeds maximum bit width of 65535", + .{first_field_bits}, + ); + break :ty try pt.intType(.unsigned, backing_int_bits); + }; + ip.resolvePackedUnionLayout( + io, + union_ty.toIntern(), + enum_tag_ty.toIntern(), + backing_int_ty.toIntern(), + ); } -/// MLUGG TODO doc comment; asserts all fields are resolved or whatever -pub fn resolvePackedUnionBackingInt( - sema: *Sema, - block: *Block, - union_ty: Type, - union_obj: *const InternPool.LoadedUnionType, - is_reified: bool, -) SemaError!void { +pub fn resolveEnumLayout(sema: *Sema, enum_ty: Type) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; const comp = zcu.comp; - const gpa = comp.gpa; const io = comp.io; - const ip = &zcu.intern_pool; - switch (union_obj.packed_backing_mode) { - .explicit => { - const backing_int_type: Type = .fromInterned(union_obj.packed_backing_int_type); - const backing_int_bits = backing_int_type.intInfo(zcu).bits; - for (union_obj.field_types.get(ip), 0..) |field_type_ip, field_idx| { - const field_type: Type = .fromInterned(field_type_ip); - const field_bits = field_type.bitSize(zcu); - if (field_bits != backing_int_bits) return sema.failWithOwnedErrorMsg(block, msg: { - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = if (is_reified) - .nodeOffset(.zero) - else - .{ .container_field_type = @intCast(field_idx) }, - }; - const msg = try sema.errMsg(field_ty_src, "field bit width does not match backing integer", .{}); - errdefer msg.destroy(gpa); - try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits }); - try sema.errNote(field_ty_src, msg, "backing integer '{f}' has bit width '{d}'", .{ backing_int_type.fmt(pt), backing_int_bits }); - try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{}); - break :msg msg; - }); - } - }, - .auto => switch (union_obj.field_types.len) { - 0 => ip.resolvePackedUnionBackingInt(io, union_ty.toIntern(), .u0_type), - else => { - const field_types = union_obj.field_types.get(ip); - const first_field_type: Type = .fromInterned(field_types[0]); - const first_field_bits = first_field_type.bitSize(zcu); - for (field_types[1..], 1..) |field_type_ip, field_idx| { - const field_type: Type = .fromInterned(field_type_ip); - const field_bits = field_type.bitSize(zcu); - if (field_bits != first_field_bits) return sema.failWithOwnedErrorMsg(block, msg: { - const first_field_ty_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = if (is_reified) - .nodeOffset(.zero) - else - .{ .container_field_type = 0 }, - }; - const field_ty_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = if (is_reified) - .nodeOffset(.zero) - else - .{ .container_field_type = @intCast(field_idx) }, - }; - const msg = try sema.errMsg(field_ty_src, "field bit width does not match earlier field", .{}); - errdefer msg.destroy(gpa); - try sema.errNote(field_ty_src, msg, "field type '{f}' has bit width '{d}'", .{ field_type.fmt(pt), field_bits }); - try sema.errNote(first_field_ty_src, msg, "other field type '{f}' has bit width '{d}'", .{ first_field_type.fmt(pt), first_field_bits }); - try sema.errNote(field_ty_src, msg, "all fields in a packed union must have the same bit width", .{}); - break :msg msg; - }); - } - const backing_int_bits = std.math.cast(u16, first_field_bits) orelse return sema.fail( - block, - block.nodeOffset(.zero), - "packed union bit width '{d}' exceeds maximum bit width of 65535", - .{first_field_bits}, - ); - const backing_int_type = try pt.intType(.unsigned, backing_int_bits); - ip.resolvePackedUnionBackingInt(io, union_ty.toIntern(), backing_int_type.toIntern()); - }, - }, - } -} - -/// Asserts that `enum_ty` is an enum and that `sema.owner` is that type. -/// This function *does* register the `src_hash` dependency on the enum. -pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void { - const pt = sema.pt; - const zcu = pt.zcu; - const comp = zcu.comp; const gpa = comp.gpa; const ip = &zcu.intern_pool; - assert(sema.owner.unwrap().type_inits == enum_ty.toIntern()); + assert(sema.owner.unwrap().type_layout == enum_ty.toIntern()); const enum_obj = ip.loadEnumType(enum_ty.toIntern()); - // We'll populate this map. - const field_value_map = enum_obj.field_value_map.unwrap() orelse { - // The enum has an automatically generated tag and is auto-numbered. We know that we have - // generated a suitably large type in `analyzeEnumDecl`, so we have no work to do. - return; - }; - const maybe_parent_union_obj: ?InternPool.LoadedUnionType = un: { if (enum_obj.owner_union == .none) break :un null; break :un ip.loadUnionType(enum_obj.owner_union); }; - const tracked_inst = enum_obj.zir_index.unwrap() orelse maybe_parent_union_obj.?.zir_index; - const zir_index = tracked_inst.resolve(ip).?; - try sema.declareDependency(.{ .src_hash = tracked_inst }); + const tracked_inst = enum_obj.zir_index.unwrap() orelse maybe_parent_union_obj.?.zir_index; var block: Block = .{ .parent = null, @@ -882,7 +1013,139 @@ pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void { }; defer assert(block.instructions.items.len == 0); - const int_tag_ty: Type = .fromInterned(enum_obj.int_tag_type); + // There may be old field names in the map from a previous update. + enum_obj.field_name_map.get(ip).clearRetainingCapacity(); + + if (maybe_parent_union_obj) |*union_obj| { + if (union_obj.is_reified) { + // In the case of reification, the union stores the field names, just for us to copy. + @memcpy(enum_obj.field_names.get(ip), union_obj.reified_field_names.get(ip)); + // The list of field names is now populated, but we haven't checked for duplicates yet, + // nor have we populated the hash map. + for (0..enum_obj.field_names.len) |field_index| { + const name = enum_obj.field_names.get(ip)[field_index]; + if (ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name)) |prev_field_index| { + return sema.failWithOwnedErrorMsg(&block, msg: { + const src = block.nodeOffset(.zero); + const msg = try sema.errMsg(src, "duplicate union field '{f}' at index '{d}", .{ name.fmt(ip), field_index }); + errdefer msg.destroy(gpa); + try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index}); + break :msg msg; + }); + } + } + } else { + // Generated tag enums for declared unions do not yet have field names populated. It is + // our job to populate them now. + try sema.declareDependency(.{ .src_hash = union_obj.zir_index }); + const zir_union = sema.code.getUnionDecl(union_obj.zir_index.resolve(ip).?); + for (zir_union.field_names) |zir_field_name| { + const name_slice = sema.code.nullTerminatedString(zir_field_name); + const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); + assert(ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name) == null); // AstGen validated this for us + } + } + } else { + if (enum_obj.is_reified) { + // The field names are populated, but we haven't checked for duplicates (nor populated the map) yet. + for (0..enum_obj.field_names.len) |field_index| { + const name = enum_obj.field_names.get(ip)[field_index]; + if (ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name)) |prev_field_index| { + return sema.failWithOwnedErrorMsg(&block, msg: { + const src = block.nodeOffset(.zero); + const msg = try sema.errMsg(src, "duplicate enum field '{f}' at index '{d}", .{ name.fmt(ip), field_index }); + errdefer msg.destroy(gpa); + try sema.errNote(src, msg, "previous field at index '{d}'", .{prev_field_index}); + break :msg msg; + }); + } + } + } else { + // Declared enums do not yet have field names populated. It is our job to populate them now. + try sema.declareDependency(.{ .src_hash = enum_obj.zir_index.unwrap().? }); + const zir_enum = sema.code.getEnumDecl(enum_obj.zir_index.unwrap().?.resolve(ip).?); + for (zir_enum.field_names) |zir_field_name| { + const name_slice = sema.code.nullTerminatedString(zir_field_name); + const name = try ip.getOrPutString(gpa, io, pt.tid, name_slice, .no_embedded_nulls); + assert(ip.addFieldName(enum_obj.field_names, enum_obj.field_name_map, name) == null); // AstGen validated this for us + } + } + } + + // Field names populated; now deal with the backing integer type. If explicitly provided, + // validate it; otherwise, infer it. + + const explicit_int_tag_ty: ?Type = if (enum_obj.is_reified) ty: { + break :ty switch (enum_obj.int_tag_mode) { + .explicit => .fromInterned(enum_obj.int_tag_type), + .auto => null, + }; + } else if (maybe_parent_union_obj) |*union_obj| ty: { + if (union_obj.is_reified) { + // Reification has no equivalent of 'union(enum(T))'. + break :ty null; + } + const zir_index = union_obj.zir_index.resolve(ip).?; + const zir_union = sema.code.getUnionDecl(zir_index); + if (zir_union.kind != .tagged_enum_explicit) { + break :ty null; // int tag type will be inferred + } + // Explicitly specified, so evaluate the int tag type expression. + const tag_type_body = zir_union.arg_type_body.?; + const tag_type_src = block.src(.container_arg); + block.comptime_reason = .{ .reason = .{ + .src = tag_type_src, + .r = .{ .simple = .enum_int_tag_type }, + } }; + const type_ref = try sema.resolveInlineBody(&block, tag_type_body, zir_index); + break :ty try sema.analyzeAsType(&block, tag_type_src, .enum_int_tag_type, type_ref); + } else ty: { + const zir_index = enum_obj.zir_index.unwrap().?.resolve(ip).?; + const zir_enum = sema.code.getEnumDecl(zir_index); + const tag_type_body = zir_enum.tag_type_body orelse { + break :ty null; // int tag type will be inferred + }; + // Explicitly specified, so evaluate the int tag type expression. + const tag_type_src = block.src(.container_arg); + block.comptime_reason = .{ .reason = .{ + .src = tag_type_src, + .r = .{ .simple = .enum_int_tag_type }, + } }; + const type_ref = try sema.resolveInlineBody(&block, tag_type_body, zir_index); + break :ty try sema.analyzeAsType(&block, tag_type_src, .enum_int_tag_type, type_ref); + }; + const int_tag_ty: Type = if (explicit_int_tag_ty) |int_tag_ty| ty: { + if (int_tag_ty.zigTypeTag(zcu) != .int) return sema.fail( + &block, + block.src(.container_arg), + "expected integer tag type, found '{f}'", + .{int_tag_ty.fmt(pt)}, + ); + break :ty int_tag_ty; + } else ty: { + // Infer the int tag type from the field count + const bits = Type.smallestUnsignedBits(enum_obj.field_names.len -| 1); + break :ty try pt.intType(.unsigned, bits); + }; + + ip.resolveEnumLayout(io, enum_ty.toIntern(), int_tag_ty.toIntern()); + + // Finally, deal with field values. For declared types we need to analyze the expressions, while + // reified types already have them populated; but either way, we need to populate the hash map + // (and validate the values along the way). + + // We'll populate this map. + const field_value_map = enum_obj.field_value_map.unwrap() orelse { + // The enum is auto-numbered with an inferred tag type. We know that the tag type generated + // earlier is sufficient for the number of fields, so we have nothing more to do. + assert(enum_obj.int_tag_mode == .auto); + return; + }; + + // There may be old field values in here from a previous update. + field_value_map.get(ip).clearRetainingCapacity(); + + const zir_index = tracked_inst.resolve(ip).?; // Map the enum (or union) decl instruction to provide the tag type as the result type try sema.inst_map.ensureSpaceForInstructions(gpa, &.{zir_index}); @@ -891,36 +1154,38 @@ pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void { // First, populate any explicitly provided values. This is the part that actually depends on // the ZIR, and hence depends on whether this is a declared or generated enum. If any explicit - // value is invalid, we'll emit an error here. + // value is straight-up invalid, we'll emit an error here. if (maybe_parent_union_obj) |union_obj| { - const zir_union = sema.code.getUnionDecl(zir_index); - var field_it = zir_union.iterateFields(); - while (field_it.next()) |zir_field| { - const field_val_src: LazySrcLoc = .{ - .base_node_inst = union_obj.zir_index, - .offset = .{ .container_field_value = zir_field.idx }, - }; - block.comptime_reason = .{ .reason = .{ - .src = field_val_src, - .r = .{ .simple = .enum_field_values }, - } }; - const value_body = zir_field.value_body orelse { - enum_obj.field_values.get(ip)[zir_field.idx] = .none; - continue; - }; - const uncoerced = try sema.resolveInlineBody(&block, value_body, zir_index); - const coerced = try sema.coerce(&block, int_tag_ty, uncoerced, field_val_src); - const val = try sema.resolveConstValue(&block, field_val_src, coerced, null); - enum_obj.field_values.get(ip)[zir_field.idx] = val.toIntern(); + if (union_obj.is_reified) { + // Generated tag type for reified union; values already populated. + } else { + // Generated tag type for declared union; evaluate the expressions given in the union declaration. + const zir_union = sema.code.getUnionDecl(zir_index); + var field_it = zir_union.iterateFields(); + while (field_it.next()) |zir_field| { + const field_val_src = block.src(.{ .container_field_value = zir_field.idx }); + block.comptime_reason = .{ .reason = .{ + .src = field_val_src, + .r = .{ .simple = .enum_field_values }, + } }; + const value_body = zir_field.value_body orelse { + enum_obj.field_values.get(ip)[zir_field.idx] = .none; + continue; + }; + const uncoerced = try sema.resolveInlineBody(&block, value_body, zir_index); + const coerced = try sema.coerce(&block, int_tag_ty, uncoerced, field_val_src); + const val = try sema.resolveConstValue(&block, field_val_src, coerced, null); + enum_obj.field_values.get(ip)[zir_field.idx] = val.toIntern(); + } } + } else if (enum_obj.is_reified) { + // Reified enum; values already populated. } else { + // Declared enum; evaluate the expressions given in the enum declaration. const zir_enum = sema.code.getEnumDecl(zir_index); var field_it = zir_enum.iterateFields(); while (field_it.next()) |zir_field| { - const field_val_src: LazySrcLoc = .{ - .base_node_inst = enum_obj.zir_index.unwrap().?, - .offset = .{ .container_field_value = zir_field.idx }, - }; + const field_val_src = block.src(.{ .container_field_value = zir_field.idx }); block.comptime_reason = .{ .reason = .{ .src = field_val_src, .r = .{ .simple = .enum_field_values }, @@ -940,14 +1205,14 @@ pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void { // field values. This is also where we'll detect duplicates. for (0..enum_obj.field_names.len) |field_idx| { - const field_val_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = @intCast(field_idx) }, - }; + const field_val_src = block.src(.{ .container_field_value = @intCast(field_idx) }); // If the field value was not specified, compute the implicit value. const field_val = val: { const explicit_val = enum_obj.field_values.get(ip)[field_idx]; - if (explicit_val != .none) break :val explicit_val; + if (explicit_val != .none) { + assert(ip.typeOf(explicit_val) == int_tag_ty.toIntern()); + break :val explicit_val; + } if (field_idx == 0) { // Implicit value is 0, which is valid for every integer type. const val = (try pt.intValue(int_tag_ty, 0)).toIntern(); @@ -967,23 +1232,23 @@ pub fn resolveEnumValues(sema: *Sema, enum_ty: Type) CompileError!void { enum_obj.field_values.get(ip)[field_idx] = val; break :val val; }; - const adapter: InternPool.Index.Adapter = .{ .indexes = enum_obj.field_values.get(ip)[0..field_idx] }; - const gop = field_value_map.get(ip).getOrPutAssumeCapacityAdapted(field_val, adapter); - if (!gop.found_existing) continue; - const prev_field_val_src: LazySrcLoc = .{ - .base_node_inst = tracked_inst, - .offset = .{ .container_field_value = @intCast(gop.index) }, - }; - return sema.failWithOwnedErrorMsg(&block, msg: { - const msg = try sema.errMsg(field_val_src, "enum tag value '{f}' already taken", .{ - Value.fromInterned(field_val).fmtValueSema(pt, sema), + if (ip.addFieldTagValue(enum_obj.field_values, field_value_map, field_val)) |prev_field_index| { + return sema.failWithOwnedErrorMsg(&block, msg: { + const prev_field_val_src = block.src(.{ .container_field_value = prev_field_index }); + const msg = try sema.errMsg(field_val_src, "enum tag value '{f}' for field '{f}' already taken", .{ + Value.fromInterned(field_val).fmtValueSema(pt, sema), + enum_obj.field_names.get(ip)[field_idx].fmt(ip), + }); + errdefer msg.destroy(gpa); + try sema.errNote(prev_field_val_src, msg, "previous occurrence in field '{f}'", .{ + enum_obj.field_names.get(ip)[prev_field_index].fmt(ip), + }); + break :msg msg; }); - errdefer msg.destroy(gpa); - try sema.errNote(prev_field_val_src, msg, "previous occurrence here", .{}); - break :msg msg; - }); + } } + // MLUGG TODO: fate of this line rests on whether comptime_int is a valid int tag type if (enum_obj.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) { const fields_len = enum_obj.field_names.len; if (fields_len >= 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(zcu)) { diff --git a/src/Type.zig b/src/Type.zig index 52dc5ed1eb..111f6347ed 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -437,6 +437,7 @@ pub fn toValue(self: Type) Value { /// - an enum with an explicit tag type has the ABI size of the integer tag type, /// making it one-possible-value only if the integer tag type has 0 bits. pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool { + ty.assertHasLayout(zcu); const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| int_type.bits != 0, @@ -499,14 +500,18 @@ pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool { .generic_poison => unreachable, }, .struct_type => { - // TODO MLUGG: memoize this state when resolving struct? const struct_obj = ip.loadStructType(ty.toIntern()); - for (struct_obj.field_types.get(ip), 0..) |field_ty_ip, field_idx| { - if (struct_obj.field_is_comptime_bits.get(ip, field_idx)) continue; - const field_ty: Type = .fromInterned(field_ty_ip); - if (field_ty.hasRuntimeBits(zcu)) return true; + switch (struct_obj.layout) { + .auto, .@"extern" => return struct_obj.has_runtime_bits, + .@"packed" => return Type.fromInterned(struct_obj.packed_backing_int_type).hasRuntimeBits(zcu), + } + }, + .union_type => { + const union_obj = ip.loadUnionType(ty.toIntern()); + switch (union_obj.layout) { + .auto, .@"extern" => return union_obj.has_runtime_bits, + .@"packed" => return Type.fromInterned(union_obj.packed_backing_int_type).hasRuntimeBits(zcu), } - return false; }, .tuple_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { @@ -515,23 +520,8 @@ pub fn hasRuntimeBits(ty: Type, zcu: *const Zcu) bool { } return false; }, - .union_type => { - // TODO MLUGG: memoize this state when resolving union? - const union_obj = ip.loadUnionType(ty.toIntern()); - switch (union_obj.runtime_tag) { - .none => {}, - .safety, .tagged => { - if (Type.fromInterned(union_obj.enum_tag_type).hasRuntimeBits(zcu)) return true; - }, - } - for (union_obj.field_types.get(ip)) |field_ty_ip| { - const field_ty: Type = .fromInterned(field_ty_ip); - if (field_ty.hasRuntimeBits(zcu)) return true; - } - return false; - }, - // MLUGG TODO: i think this can go away and the assert move to the defer? + // MLUGG TODO: this answer was already here but... does it actually make sense? .opaque_type => true, .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).int_tag_type).hasRuntimeBits(zcu), @@ -618,17 +608,18 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool { .generic_poison, => false, }, - .struct_type => ip.loadStructType(ty.toIntern()).layout != .auto, - .union_type => { - const union_obj = ip.loadUnionType(ty.toIntern()); - if (union_obj.layout == .auto) return false; - return switch (union_obj.runtime_tag) { - .none => true, - .tagged => false, - .safety => unreachable, // well-defined layout can't have a safety tag - }; + .struct_type => switch (ip.loadStructType(ty.toIntern()).layout) { + .auto => false, + .@"extern", .@"packed" => true, + }, + .union_type => switch (ip.loadUnionType(ty.toIntern()).layout) { + .auto => false, + .@"extern", .@"packed" => true, + }, + .enum_type => switch (ip.loadEnumType(ty.toIntern()).int_tag_mode) { + .explicit => true, + .auto => false, }, - .enum_type => ip.loadEnumType(ty.toIntern()).int_tag_is_explicit, // values, not types .undef, @@ -664,28 +655,29 @@ pub fn fnHasRuntimeBits(fn_ty: Type, zcu: *Zcu) bool { if (param_ty == .generic_poison_type) return false; if (Type.fromInterned(param_ty).comptimeOnly(zcu)) return false; } + const ret_ty: Type = .fromInterned(fn_info.return_type); + if (ret_ty.toIntern() == .generic_poison_type) { + return false; + } + if (ret_ty.zigTypeTag(zcu) == .error_union and + ret_ty.errorUnionPayload(zcu).toIntern() == .generic_poison_type) + { + return false; + } if (fn_info.return_type == .generic_poison_type) return false; if (Type.fromInterned(fn_info.return_type).comptimeOnly(zcu)) return false; if (fn_info.cc == .@"inline") return false; return true; } -pub fn isFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool { +/// Like `hasRuntimeBits`, but also returns `true` for runtime functions. +pub fn isRuntimeFnOrHasRuntimeBits(ty: Type, zcu: *Zcu) bool { switch (ty.zigTypeTag(zcu)) { .@"fn" => return ty.fnHasRuntimeBits(zcu), else => return ty.hasRuntimeBits(zcu), } } -/// Same as `isFnOrHasRuntimeBits` but comptime-only types may return a false positive. -/// MLUGG TODO: this function is a bit silly now... -pub fn isFnOrHasRuntimeBitsIgnoreComptime(ty: Type, zcu: *Zcu) bool { - return switch (ty.zigTypeTag(zcu)) { - .@"fn" => true, - else => return ty.hasRuntimeBits(zcu), - }; -} - pub fn isNoReturn(ty: Type, zcu: *const Zcu) bool { return zcu.intern_pool.isNoReturn(ty.toIntern()); } @@ -711,7 +703,6 @@ pub fn ptrAddressSpace(ty: Type, zcu: *const Zcu) std.builtin.AddressSpace { } /// Never returns `none`. Asserts that all necessary type resolution is already done. -/// MLUGG TODO: check that it really does never return `.none` pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment { const ip = &zcu.intern_pool; const target = zcu.getTarget(); @@ -810,7 +801,7 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field const field_align = Type.fromInterned(field_ty).abiAlignment(zcu); - big_align = big_align.max(field_align); + big_align = big_align.maxStrict(field_align); } return big_align; }, @@ -818,14 +809,20 @@ pub fn abiAlignment(ty: Type, zcu: *const Zcu) Alignment { const struct_obj = ip.loadStructType(ty.toIntern()); switch (struct_obj.layout) { .@"packed" => return Type.fromInterned(struct_obj.packed_backing_int_type).abiAlignment(zcu), - .auto, .@"extern" => return struct_obj.alignment, + .auto, .@"extern" => { + assert(struct_obj.alignment != .none); + return struct_obj.alignment; + }, } }, .union_type => { const union_obj = ip.loadUnionType(ty.toIntern()); switch (union_obj.layout) { .@"packed" => return Type.fromInterned(union_obj.packed_backing_int_type).abiAlignment(zcu), - .auto, .@"extern" => return getUnionLayout(union_obj, zcu).abi_align, + .auto, .@"extern" => { + assert(union_obj.alignment != .none); + return union_obj.alignment; + }, } }, .enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).int_tag_type).abiAlignment(zcu), @@ -1277,38 +1274,17 @@ pub fn nullablePtrElem(ty: Type, zcu: *const Zcu) Type { } } -/// Given that `ty` is an indexable pointer, returns its element type. Specifically: -/// * for `*[n]T`, returns `T` -/// * for `*@Vector(n, T)`, returns `T` -/// * for `[]T`, returns `T` -/// * for `[*]T`, returns `T` -/// * for `[*c]T`, returns `T` +/// Asserts that `ty` is an indexable type, and returns its element type. Tuples (and pointers to +/// tuples) are not supported because they do not have a single element type. /// -/// Tuples are not supported because they do not have a single element type. -/// -/// MLUGG TODO: should i even have this one? it's a subset of indexableElem -pub fn indexablePtrElem(ty: Type, zcu: *const Zcu) Type { - const ip = &zcu.intern_pool; - const ptr_type = ip.indexToKey(ty.toIntern()).ptr_type; - return switch (ptr_type.flags.size) { - .many, .slice, .c => return .fromInterned(ptr_type.child), - .one => switch (ip.indexToKey(ptr_type.child)) { - inline .array_type, .vector_type => |arr| return .fromInterned(arr.child), - else => unreachable, - }, - }; -} - -/// Given that `ty` is an indexable type, returns its element type. Specifically: -/// * for `[n]T`, returns `T` -/// * for `@Vector(n, T)`, returns `T` -/// * for `*[n]T`, returns `T` -/// * for `*@Vector(n, T)`, returns `T` -/// * for `[]T`, returns `T` -/// * for `[*]T`, returns `T` -/// * for `[*c]T`, returns `T` -/// -/// Tuples are not supported because they do not have a single element type. +/// Returns `T` for each of the following types: +/// * `[n]T` +/// * `@Vector(n, T)` +/// * `*[n]T` +/// * `*@Vector(n, T)` +/// * `[]T` +/// * `[*]T` +/// * `[*c]T` pub fn indexableElem(ty: Type, zcu: *const Zcu) Type { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { @@ -1348,6 +1324,7 @@ pub fn optionalChild(ty: Type, zcu: *const Zcu) Type { /// Returns the tag type of a union, if the type is a union and it has a tag type. /// Otherwise, returns `null`. pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; switch (ip.indexToKey(ty.toIntern())) { .union_type => {}, @@ -1363,6 +1340,7 @@ pub fn unionTagType(ty: Type, zcu: *const Zcu) ?Type { /// Same as `unionTagType` but includes safety tag. /// Codegen should use this version. pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .union_type => { @@ -1377,11 +1355,13 @@ pub fn unionTagTypeSafety(ty: Type, zcu: *const Zcu) ?Type { /// Asserts the type is a union; returns the tag type, even if the tag will /// not be stored at runtime. pub fn unionTagTypeHypothetical(ty: Type, zcu: *const Zcu) Type { + assertHasLayout(ty, zcu); const union_obj = zcu.typeToUnion(ty).?; return Type.fromInterned(union_obj.enum_tag_type); } pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; const union_obj = zcu.typeToUnion(ty).?; const union_fields = union_obj.field_types.get(ip); @@ -1390,17 +1370,20 @@ pub fn unionFieldType(ty: Type, enum_tag: Value, zcu: *const Zcu) ?Type { } pub fn unionFieldTypeByIndex(ty: Type, index: usize, zcu: *const Zcu) Type { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; const union_obj = zcu.typeToUnion(ty).?; return Type.fromInterned(union_obj.field_types.get(ip)[index]); } pub fn unionTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 { + assertHasLayout(ty, zcu); const union_obj = zcu.typeToUnion(ty).?; return zcu.unionTagFieldIndex(union_obj, enum_tag); } pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; const union_obj = zcu.typeToUnion(ty).?; for (union_obj.field_types.get(ip)) |field_ty| { @@ -1413,14 +1396,17 @@ pub fn unionHasAllZeroBitFieldTypes(ty: Type, zcu: *Zcu) bool { /// Asserts the type is either an extern or packed union. pub fn unionBackingType(ty: Type, pt: Zcu.PerThread) !Type { const zcu = pt.zcu; - return switch (ty.containerLayout(zcu)) { + assertHasLayout(ty, zcu); + const loaded_union = zcu.intern_pool.loadUnionType(ty.toIntern()); + return switch (loaded_union.layout) { .@"extern" => try pt.arrayType(.{ .len = ty.abiSize(zcu), .child = .u8_type }), - .@"packed" => try pt.intType(.unsigned, @intCast(ty.bitSize(zcu))), + .@"packed" => .fromInterned(loaded_union.packed_backing_int_type), .auto => unreachable, }; } pub fn unionGetLayout(ty: Type, zcu: *const Zcu) Zcu.UnionLayout { + assertHasLayout(ty, zcu); const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern()); return Type.getUnionLayout(union_obj, zcu); } @@ -1865,11 +1851,8 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { for (field_vals, 0..) |*field_val, i_usize| { const i: u32 = @intCast(i_usize); if (struct_obj.field_is_comptime_bits.get(ip, i)) { - // MLUGG TODO: this is kinda a problem... we don't necessarily know the opv field vals! - // for now i'm just not letting structs with comptime fields be opv :) - if (true) return null; - assertHasInits(ty, zcu); field_val.* = struct_obj.field_defaults.get(ip)[i]; + assert(field_val.* != .none); continue; } const field_ty = Type.fromInterned(struct_obj.field_types.get(ip)[i]); @@ -2257,19 +2240,23 @@ pub fn errorSetNames(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString. } pub fn enumFields(ty: Type, zcu: *const Zcu) InternPool.NullTerminatedString.Slice { + assertHasLayout(ty, zcu); return zcu.intern_pool.loadEnumType(ty.toIntern()).field_names; } pub fn enumFieldCount(ty: Type, zcu: *const Zcu) usize { + assertHasLayout(ty, zcu); return zcu.intern_pool.loadEnumType(ty.toIntern()).field_names.len; } pub fn enumFieldName(ty: Type, field_index: usize, zcu: *const Zcu) InternPool.NullTerminatedString { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; return ip.loadEnumType(ty.toIntern()).field_names.get(ip)[field_index]; } pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu: *const Zcu) ?u32 { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(ty.toIntern()); return enum_type.nameIndex(ip, field_name); @@ -2279,6 +2266,7 @@ pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, zcu /// an integer which represents the enum value. Returns the field index in /// declaration order, or `null` if `enum_tag` does not match any field. pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 { + assertHasLayout(ty, zcu); const ip = &zcu.intern_pool; const enum_type = ip.loadEnumType(ty.toIntern()); const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) { @@ -2293,28 +2281,40 @@ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 { /// Returns none in the case of a tuple which uses the integer index as the field name. pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString { const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_names.get(ip)[index].toOptional(), - .tuple_type => .none, + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + assertHasLayout(ty, zcu); + return ip.loadStructType(ty.toIntern()).field_names.get(ip)[index].toOptional(); + }, + .tuple_type => return .none, else => unreachable, - }; + } } pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 { const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, - .tuple_type => |tuple| tuple.types.len, + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + assertHasLayout(ty, zcu); + return ip.loadStructType(ty.toIntern()).field_types.len; + }, + .tuple_type => |tuple| return tuple.types.len, else => unreachable, - }; + } } /// Returns the field type. Supports structs and unions. pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type { const ip = &zcu.intern_pool; const types = switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_types, - .union_type => ip.loadUnionType(ty.toIntern()).field_types, + .struct_type => types: { + assertHasLayout(ty, zcu); + break :types ip.loadStructType(ty.toIntern()).field_types; + }, + .union_type => types: { + assertHasLayout(ty, zcu); + break :types ip.loadUnionType(ty.toIntern()).field_types; + }, .tuple_type => |tuple| tuple.types, else => unreachable, }; @@ -2335,11 +2335,13 @@ pub fn resolvedFieldAlignment(ty: Type, index: usize, zcu: *const Zcu) Alignment return switch (ip.indexToKey(ty.toIntern())) { .tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]).abiAlignment(zcu), .struct_type => { + assertHasLayout(ty, zcu); const struct_obj = ip.loadStructType(ty.toIntern()); const field_ty: Type = .fromInterned(struct_obj.field_types.get(ip)[index]); return field_ty.defaultStructFieldAlignment(struct_obj.layout, zcu); }, .union_type => { + assertHasLayout(ty, zcu); const union_obj = ip.loadUnionType(ty.toIntern()); const field_ty: Type = .fromInterned(union_obj.field_types.get(ip)[index]); return field_ty.abiAlignment(zcu); @@ -2353,12 +2355,14 @@ pub fn explicitFieldAlignment(ty: Type, index: usize, zcu: *const Zcu) Alignment return switch (ip.indexToKey(ty.toIntern())) { .tuple_type => .none, .struct_type => { + assertHasLayout(ty, zcu); const struct_obj = ip.loadStructType(ty.toIntern()); assert(struct_obj.layout != .@"packed"); if (struct_obj.field_aligns.len == 0) return .none; return struct_obj.field_aligns.get(ip)[index]; }, .union_type => { + assertHasLayout(ty, zcu); const union_obj = ip.loadUnionType(ty.toIntern()); assert(union_obj.layout != .@"packed"); if (union_obj.field_aligns.len == 0) return .none; @@ -2413,7 +2417,6 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val .struct_type => { const struct_type = ip.loadStructType(ty.toIntern()); if (struct_type.field_is_comptime_bits.get(ip, index)) { - assertHasInits(ty, zcu); return .fromInterned(struct_type.field_defaults.get(ip)[index]); } else { return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt); @@ -2433,11 +2436,14 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool { const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).field_is_comptime_bits.get(ip, index), - .tuple_type => |tuple| tuple.values.get(ip)[index] != .none, + switch (ip.indexToKey(ty.toIntern())) { + .struct_type => { + assertHasLayout(ty, zcu); + return ip.loadStructType(ty.toIntern()).field_is_comptime_bits.get(ip, index); + }, + .tuple_type => |tuple| return tuple.values.get(ip)[index] != .none, else => unreachable, - }; + } } pub const FieldOffset = struct { @@ -2850,8 +2856,79 @@ pub fn isNullFromType(ty: Type, zcu: *const Zcu) ?bool { return null; } -/// Returns true if `ty` is allowed in packed types. -pub fn packable(ty: Type, zcu: *const Zcu) bool { +pub const UnpackableReason = union(enum) { + comptime_only, + pointer, + enum_inferred_int_tag: Type, + non_packed_struct: Type, + non_packed_union: Type, + other, +}; + +/// Returns `null` iff `ty` is allowed in packed types. +pub fn unpackable(ty: Type, zcu: *const Zcu) ?UnpackableReason { + return switch (ty.zigTypeTag(zcu)) { + .void, + .bool, + .float, + .int, + => null, + + .type, + .comptime_float, + .comptime_int, + .enum_literal, + .undefined, + .null, + => .comptime_only, + + .noreturn, + .@"opaque", + .error_union, + .error_set, + .frame, + .@"anyframe", + .@"fn", + .array, + .vector, + => .other, + + .optional => if (ty.isPtrLikeOptional(zcu)) + .pointer + else + .other, + + .pointer => .pointer, + + .@"enum" => switch (zcu.intern_pool.loadEnumType(ty.toIntern()).int_tag_mode) { + .explicit => null, + .auto => .{ .enum_inferred_int_tag = ty }, + }, + + .@"struct" => switch (ty.containerLayout(zcu)) { + .@"packed" => null, + .auto, .@"extern" => .{ .non_packed_struct = ty }, + }, + .@"union" => switch (ty.containerLayout(zcu)) { + .@"packed" => null, + .auto, .@"extern" => .{ .non_packed_union = ty }, + }, + }; +} + +pub const ExternPosition = enum { + ret_ty, + param_ty, + union_field, + struct_field, + element, + other, +}; + +/// Returns true if `ty` is allowed in extern types. +/// Does not require `ty` to be resolved in any way. +/// Keep in sync with `Sema.explainWhyTypeIsNotExtern`. +pub fn validateExtern(ty: Type, position: ExternPosition, zcu: *const Zcu) bool { return switch (ty.zigTypeTag(zcu)) { .type, .comptime_float, @@ -2862,22 +2939,83 @@ pub fn packable(ty: Type, zcu: *const Zcu) bool { .error_union, .error_set, .frame, - .noreturn, - .@"opaque", - .@"anyframe", - .@"fn", - .array, => false, - .optional => return ty.isPtrLikeOptional(zcu), - .void, + + .void => switch (position) { + .ret_ty, + .union_field, + .struct_field, + .element, + => true, + .param_ty, + .other, + => false, + }, + + .noreturn => position == .ret_ty, + + .@"opaque", .bool, .float, - .int, - .vector, + .@"anyframe", => true, - .@"enum" => zcu.intern_pool.loadEnumType(ty.toIntern()).int_tag_is_explicit, - .pointer => !ty.isSlice(zcu), - .@"struct", .@"union" => ty.containerLayout(zcu) == .@"packed", + + .pointer => { + if (ty.isSlice(zcu)) return false; + const child_ty = ty.childType(zcu); + if (child_ty.zigTypeTag(zcu) == .@"fn") { + return ty.isConstPtr(zcu) and child_ty.validateExtern(.other, zcu); + } + return true; + }, + .int => switch (ty.intInfo(zcu).bits) { + 0, 8, 16, 32, 64, 128 => true, + else => false, + }, + .@"fn" => { + if (position != .other) return false; + // For now we want to authorize PTX kernel to use zig objects, even if we end up exposing the ABI. + // The goal is to experiment with more integrated CPU/GPU code. + if (ty.fnCallingConvention(zcu) == .nvptx_kernel) { + return true; + } + return !target_util.fnCallConvAllowsZigTypes(ty.fnCallingConvention(zcu)); + }, + .@"enum" => { + const enum_obj = zcu.intern_pool.loadEnumType(ty.toIntern()); + return switch (enum_obj.int_tag_mode) { + .auto => false, + .explicit => Type.fromInterned(enum_obj.int_tag_type).validateExtern(position, zcu), + }; + }, + .@"struct" => { + const struct_obj = zcu.intern_pool.loadStructType(ty.toIntern()); + return switch (struct_obj.layout) { + .auto => false, + .@"extern" => true, + .@"packed" => switch (struct_obj.packed_backing_mode) { + .auto => false, + .explicit => Type.fromInterned(struct_obj.packed_backing_int_type).validateExtern(position, zcu), + }, + }; + }, + .@"union" => { + const union_obj = zcu.intern_pool.loadUnionType(ty.toIntern()); + return switch (union_obj.layout) { + .auto => false, + .@"extern" => true, + .@"packed" => switch (union_obj.packed_backing_mode) { + .auto => false, + .explicit => Type.fromInterned(union_obj.packed_backing_int_type).validateExtern(position, zcu), + }, + }; + }, + .array => { + if (position == .ret_ty or position == .param_ty) return false; + return ty.childType(zcu).validateExtern(.element, zcu); + }, + .vector => ty.childType(zcu).validateExtern(.element, zcu), + .optional => ty.isPtrLikeOptional(zcu), }; } @@ -2889,7 +3027,6 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void { .anyframe_type, .simple_type, .opaque_type, - .enum_type, .error_set_type, .inferred_error_set_type, => {}, @@ -2906,12 +3043,11 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void { .tuple_type => |tuple| for (tuple.types.get(&zcu.intern_pool)) |field_ty| { assertHasLayout(.fromInterned(field_ty), zcu); }, - .struct_type, .union_type => { + .struct_type, .union_type, .enum_type => { const unit: InternPool.AnalUnit = .wrap(.{ .type_layout = ty.toIntern() }); assert(!zcu.outdated.contains(unit)); assert(!zcu.potentially_outdated.contains(unit)); }, - else => unreachable, // assertion failure; not a struct or union // values, not types .simple_value, @@ -2930,23 +3066,13 @@ pub fn assertHasLayout(ty: Type, zcu: *const Zcu) void { .opt, .aggregate, .un, + .undef, // memoization, not types .memoized_call, => unreachable, } } -/// Asserts that `ty` is an enum or struct type whose field values/defaults are resolved. -pub fn assertHasInits(ty: Type, zcu: *const Zcu) void { - switch (zcu.intern_pool.indexToKey(ty.toIntern())) { - .struct_type, .enum_type => {}, - else => unreachable, - } - const unit: InternPool.AnalUnit = .wrap(.{ .type_inits = ty.toIntern() }); - assert(!zcu.outdated.contains(unit)); - assert(!zcu.potentially_outdated.contains(unit)); -} - /// Recursively walks the type and marks for each subtype how many times it has been seen fn collectSubtypes(ty: Type, pt: Zcu.PerThread, visited: *std.AutoArrayHashMapUnmanaged(Type, u16)) error{OutOfMemory}!void { const zcu = pt.zcu; @@ -3116,6 +3242,7 @@ pub const Comparison = struct { }; }; +pub const @"u0": Type = .{ .ip_index = .u0_type }; pub const @"u1": Type = .{ .ip_index = .u1_type }; pub const @"u8": Type = .{ .ip_index = .u8_type }; pub const @"u16": Type = .{ .ip_index = .u16_type }; diff --git a/src/Value.zig b/src/Value.zig index 5986eee6d6..ca9ef96046 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -2207,7 +2207,7 @@ pub fn pointerDerivationAdvanced(ptr_val: Value, arena: Allocator, pt: Zcu.PerTh const ptr_ty_info = Type.fromInterned(ptr.ty).ptrInfo(zcu); const need_child: Type = .fromInterned(ptr_ty_info.child); - if (need_child.comptimeOnly(zcu)) { + if (need_child.comptimeOnly(zcu) or need_child.zigTypeTag(zcu) == .@"opaque") { // No refinement can happen - this pointer is presumably invalid. // Just offset it. const parent = try arena.create(PointerDeriveStep); @@ -2595,8 +2595,8 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory pub fn doPointersOverlap(ptr_val_a: Value, ptr_val_b: Value, elem_count: u64, zcu: *const Zcu) bool { const ip = &zcu.intern_pool; - const a_elem_ty = ptr_val_a.typeOf(zcu).indexablePtrElem(zcu); - const b_elem_ty = ptr_val_b.typeOf(zcu).indexablePtrElem(zcu); + const a_elem_ty = ptr_val_a.typeOf(zcu).indexableElem(zcu); + const b_elem_ty = ptr_val_b.typeOf(zcu).indexableElem(zcu); const a_ptr = ip.indexToKey(ptr_val_a.toIntern()).ptr; const b_ptr = ip.indexToKey(ptr_val_b.toIntern()).ptr; @@ -2682,3 +2682,58 @@ pub fn eqlScalarNum(lhs: Value, rhs: Value, zcu: *Zcu) bool { const rhs_bigint = rhs.toBigInt(&rhs_bigint_space, zcu); return lhs_bigint.eql(rhs_bigint); } + +/// Asserts the value is an integer, and the destination type is ComptimeInt or Int. +/// Vectors are also accepted. Vector results are reduced with AND. +/// +/// If provided, `vector_index` reports the first element that failed the range check. +pub fn intFitsInType( + val: Value, + ty: Type, + vector_index: ?*usize, + zcu: *const Zcu, +) bool { + if (ty.toIntern() == .comptime_int_type) return true; + const info = ty.intInfo(zcu); + switch (val.toIntern()) { + .zero_usize, .zero_u8 => return true, + else => switch (zcu.intern_pool.indexToKey(val.toIntern())) { + .undef => return true, + .variable, .@"extern", .func, .ptr => { + const target = zcu.getTarget(); + const ptr_bits = target.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; + }, + .int => |int| { + var buffer: InternPool.Key.Int.Storage.BigIntSpace = undefined; + const big_int = int.storage.toBigInt(&buffer); + return big_int.fitsInTwosComp(info.signedness, info.bits); + }, + .aggregate => |aggregate| { + assert(ty.zigTypeTag(zcu) == .vector); + return switch (aggregate.storage) { + .bytes => |bytes| for (bytes.toSlice(ty.vectorLen(zcu), &zcu.intern_pool), 0..) |byte, i| { + if (byte == 0) continue; + const actual_needed_bits = std.math.log2(byte) + 1 + @intFromBool(info.signedness == .signed); + if (info.bits >= actual_needed_bits) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + .elems, .repeated_elem => for (switch (aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems, + .repeated_elem => |elem| @as(*const [1]InternPool.Index, &elem), + }, 0..) |elem, i| { + if (Value.fromInterned(elem).intFitsInType(ty.scalarType(zcu), null, zcu)) continue; + if (vector_index) |vi| vi.* = i; + break false; + } else true, + }; + }, + else => unreachable, + }, + } +} diff --git a/src/Zcu.zig b/src/Zcu.zig index e1760fecb9..5aef6a11d1 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1912,40 +1912,6 @@ pub const SrcLoc = struct { const full = tree.fullPtrType(parent_node).?; return tree.nodeToSpan(full.ast.bit_range_end.unwrap().?); }, - .node_offset_container_tag => |node_off| { - const tree = try src_loc.file_scope.getTree(zcu); - const parent_node = node_off.toAbsolute(src_loc.base_node); - - switch (tree.nodeTag(parent_node)) { - .container_decl_arg, .container_decl_arg_trailing => { - const full = tree.containerDeclArg(parent_node); - const arg_node = full.ast.arg.unwrap().?; - return tree.nodeToSpan(arg_node); - }, - .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => { - const full = tree.taggedUnionEnumTag(parent_node); - const arg_node = full.ast.arg.unwrap().?; - - return tree.tokensToSpan( - tree.firstToken(arg_node) - 2, - tree.lastToken(arg_node) + 1, - tree.nodeMainToken(arg_node), - ); - }, - else => unreachable, - } - }, - .node_offset_field_default => |node_off| { - const tree = try src_loc.file_scope.getTree(zcu); - const parent_node = node_off.toAbsolute(src_loc.base_node); - - const full: Ast.full.ContainerField = switch (tree.nodeTag(parent_node)) { - .container_field => tree.containerField(parent_node), - .container_field_init => tree.containerFieldInit(parent_node), - else => unreachable, - }; - return tree.nodeToSpan(full.ast.value_expr.unwrap().?); - }, .node_offset_init_ty => |node_off| { const tree = try src_loc.file_scope.getTree(zcu); const parent_node = node_off.toAbsolute(src_loc.base_node); @@ -2021,6 +1987,14 @@ pub const SrcLoc = struct { } return tree.nodeToSpan(node); }, + .container_arg => { + const tree = try src_loc.file_scope.getTree(zcu); + const node = src_loc.base_node; + var buf: [2]Ast.Node.Index = undefined; + const container_decl = tree.fullContainerDecl(&buf, node) orelse return tree.nodeToSpan(node); + const arg_node = container_decl.ast.arg.unwrap() orelse return tree.nodeToSpan(node); + return tree.nodeToSpan(arg_node); + }, .container_field_name, .container_field_value, .container_field_type, @@ -2262,7 +2236,11 @@ pub const SrcLoc = struct { var param_it = full.iterate(tree); for (0..param_idx) |_| assert(param_it.next() != null); const param = param_it.next().?; - return tree.nodeToSpan(param.type_expr.?); + if (param.anytype_ellipsis3) |tok| { + return tree.tokenToSpan(tok); + } else { + return tree.nodeToSpan(param.type_expr.?); + } }, } } @@ -2484,10 +2462,6 @@ pub const LazySrcLoc = struct { node_offset_ptr_bitoffset: Ast.Node.Offset, /// The source location points to the host size of a pointer. node_offset_ptr_hostsize: Ast.Node.Offset, - /// The source location points to the tag type of an union or an enum. - node_offset_container_tag: Ast.Node.Offset, - /// The source location points to the default value of a field. - node_offset_field_default: Ast.Node.Offset, /// The source location points to the type of an array or struct initializer. node_offset_init_ty: Ast.Node.Offset, /// The source location points to the LHS of an assignment (or assign-op, e.g. `+=`). @@ -2532,6 +2506,11 @@ pub const LazySrcLoc = struct { fn_proto_param_type: FnProtoParam, array_cat_lhs: ArrayCat, array_cat_rhs: ArrayCat, + /// The source location points to the backing or tag type expression of + /// the container type declaration at the base node. + /// + /// For 'union(enum(T))', this points to 'T', not 'enum(T)'. + container_arg, /// The source location points to the name of the field at the given index /// of the container type declaration at the base node. container_field_name: u32, @@ -3149,7 +3128,7 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void { .nav_val => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_val = nav }), .nav_ty => |nav| try zcu.markPoDependeeUpToDate(.{ .nav_ty = nav }), .type_layout => |ty| try zcu.markPoDependeeUpToDate(.{ .type_layout = ty }), - .type_inits => |ty| try zcu.markPoDependeeUpToDate(.{ .type_inits = ty }), + .struct_defaults => |ty| try zcu.markPoDependeeUpToDate(.{ .struct_defaults = ty }), .func => |func| try zcu.markPoDependeeUpToDate(.{ .func_ies = func }), .memoized_state => |stage| try zcu.markPoDependeeUpToDate(.{ .memoized_state = stage }), } @@ -3165,7 +3144,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni .nav_val => |nav| .{ .nav_val = nav }, .nav_ty => |nav| .{ .nav_ty = nav }, .type_layout => |ty| .{ .type_layout = ty }, - .type_inits => |ty| .{ .type_inits = ty }, + .struct_defaults => |ty| .{ .struct_defaults = ty }, .func => |func_index| .{ .func_ies = func_index }, .memoized_state => |stage| .{ .memoized_state = stage }, }; @@ -3195,88 +3174,44 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni } } +/// Selects an outdated `AnalUnit` to analyze next. Called from the main semantic analysis loop when +/// there is no work immediately queued. The unit is chosen such that it is unlikely to require any +/// recursive analysis (all of its previously-marked dependencies are already up-to-date), because +/// recursive analysis can cause over-analysis on incremental updates. pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit { if (!zcu.comp.config.incremental) return null; - if (zcu.outdated.count() == 0) { - // Any units in `potentially_outdated` must just be stuck in loops with one another: none of those - // units have had any outdated dependencies so far, and all of their remaining PO deps are triggered - // by other units in `potentially_outdated`. So, we can safety assume those units up-to-date. - zcu.potentially_outdated.clearRetainingCapacity(); - log.debug("findOutdatedToAnalyze: no outdated depender", .{}); - return null; - } - - // Our goal is to find an outdated AnalUnit which itself has no outdated or - // PO dependencies. Most of the time, such an AnalUnit will exist - we track - // them in the `outdated_ready` set for efficiency. However, this is not - // necessarily the case, since the Decl dependency graph may contain loops - // via mutually recursive definitions: - // pub const A = struct { b: *B }; - // pub const B = struct { b: *A }; - // In this case, we must defer to more complex logic below. - if (zcu.outdated_ready.count() > 0) { const unit = zcu.outdated_ready.keys()[0]; - log.debug("findOutdatedToAnalyze: trivial {f}", .{zcu.fmtAnalUnit(unit)}); + log.debug("findOutdatedToAnalyze: {f}", .{zcu.fmtAnalUnit(unit)}); return unit; } - // There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some - // AnalUnit with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of - // A or B. We should definitely not select a function, since a function can't be responsible for the - // loop (IES dependencies can't have loops). We should also, of course, not select a `comptime` - // declaration, since you can't depend on those! + // Usually, getting here means that everything is up-to-date, so there is no more work to do. We + // will see that `zcu.outdated` and `zcu.potentially_outdated` are both empty. + // + // However, if a previous update had a dependency loop compile error, there is a cycle in the + // dependency graph (which is usually acyclic), which can cause a scenario where no unit appears + // to be ready, because they're all waiting for the next in the loop to be up-to-date. In that + // case, we usually have to just bite the bullet and analyze one of them. An exception is if + // `zcu.outdated` is empty but `zcu.potentially_outdated` is non-empty: in that case, the only + // possible situation is a cycle where everything is actually up-to-date, so we can clear out + // `zcu.potentially_outdated` and we are done. - // The choice of this unit could have a big impact on how much total analysis we perform, since - // if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit - // may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a unit - // which the most things depend on - the idea is that this will resolve a lot of loops (but this - // is only a heuristic). - - log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{ - zcu.outdated.count(), - zcu.potentially_outdated.count(), - }); - - const ip = &zcu.intern_pool; - - var chosen_unit: ?AnalUnit = null; - var chosen_unit_dependers: u32 = undefined; - - // MLUGG TODO: i'm 99% sure this is now impossible. check!!! - inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| { - for (outdated_units) |unit| { - var n: u32 = 0; - var it = ip.dependencyIterator(switch (unit.unwrap()) { - .func => continue, // a `func` definitely can't be causing the loop so it is a bad choice - .@"comptime" => continue, // a `comptime` block can't even be depended on so it is a terrible choice - .type_layout => |ty| .{ .type_layout = ty }, - .type_inits => |ty| .{ .type_inits = ty }, - .nav_val => |nav| .{ .nav_val = nav }, - .nav_ty => |nav| .{ .nav_ty = nav }, - .memoized_state => { - // If we've hit a loop and some `.memoized_state` is outdated, we should make that choice eagerly. - // In general, it's good to resolve this early on, since -- for instance -- almost every function - // references the panic handler. - return unit; - }, - }); - while (it.next()) |_| n += 1; - - if (chosen_unit == null or n > chosen_unit_dependers) { - chosen_unit = unit; - chosen_unit_dependers = n; - } - } + if (zcu.outdated.count() == 0) { + // Everything is up-to-date. There could be lingering entries in `zcu.potentially_outdated` + // from a dependency loop on a previous update. + zcu.potentially_outdated.clearRetainingCapacity(); + log.debug("findOutdatedToAnalyze: all up-to-date", .{}); + return null; } - log.debug("findOutdatedToAnalyze: heuristic returned '{f}' ({d} dependers)", .{ - zcu.fmtAnalUnit(chosen_unit.?), - chosen_unit_dependers, + const unit = zcu.outdated.keys()[0]; + log.debug("findOutdatedToAnalyze: dependency loop affecting {d} units, selected {f}", .{ + zcu.outdated.count(), + zcu.fmtAnalUnit(unit), }); - - return chosen_unit.?; + return unit; } /// During an incremental update, before semantic analysis, call this to flush all values from @@ -3356,12 +3291,59 @@ pub fn mapOldZirToNew( } while (match_stack.pop()) |match_item| { - // First, a check: if the number of captures of this type has changed, we can't map it, because - // we wouldn't know how to correlate type information with the last update. - // Synchronizes with logic in `Zcu.PerThread.recreateStructType` etc. - if (old_zir.typeCapturesLen(match_item.old_inst) != new_zir.typeCapturesLen(match_item.new_inst)) { - // Don't map this type or anything within it. - continue; + // There are some properties of type declarations which cannot change across incremental + // updates. If they have, we need to ignore this mapping. These properties are essentially + // everything passed into `InternPool.getDeclaredStructType` (likewise for unions, enums, + // and opaques). + const old_tag = old_zir.instructions.items(.data)[@intFromEnum(match_item.old_inst)].extended.opcode; + const new_tag = new_zir.instructions.items(.data)[@intFromEnum(match_item.new_inst)].extended.opcode; + if (old_tag != new_tag) continue; + switch (old_tag) { + .struct_decl => { + const old = old_zir.getStructDecl(match_item.old_inst); + const new = new_zir.getStructDecl(match_item.new_inst); + if (old.captures.len != new.captures.len) continue; + if (old.field_names.len != new.field_names.len) continue; + if (old.layout != new.layout) continue; + const old_any_field_aligns = old.field_align_body_lens != null; + const old_any_field_defaults = old.field_default_body_lens != null; + const old_any_comptime_fields = old.field_comptime_bits != null; + const old_explicit_backing_int = old.backing_int_type_body != null; + const new_any_field_aligns = new.field_align_body_lens != null; + const new_any_field_defaults = new.field_default_body_lens != null; + const new_any_comptime_fields = new.field_comptime_bits != null; + const new_explicit_backing_int = new.backing_int_type_body != null; + if (old_any_field_aligns != new_any_field_aligns) continue; + if (old_any_field_defaults != new_any_field_defaults) continue; + if (old_any_comptime_fields != new_any_comptime_fields) continue; + if (old_explicit_backing_int != new_explicit_backing_int) continue; + }, + .union_decl => { + const old = old_zir.getUnionDecl(match_item.old_inst); + const new = new_zir.getUnionDecl(match_item.new_inst); + if (old.captures.len != new.captures.len) continue; + if (old.field_names.len != new.field_names.len) continue; + if (old.kind != new.kind) continue; + const old_any_field_aligns = old.field_align_body_lens != null; + const new_any_field_aligns = new.field_align_body_lens != null; + if (old_any_field_aligns != new_any_field_aligns) continue; + }, + .enum_decl => { + const old = old_zir.getEnumDecl(match_item.old_inst); + const new = new_zir.getEnumDecl(match_item.new_inst); + if (old.captures.len != new.captures.len) continue; + if (old.field_names.len != new.field_names.len) continue; + if (old.nonexhaustive != new.nonexhaustive) continue; + const old_explicit_tag_type = old.tag_type_body != null; + const new_explicit_tag_type = new.tag_type_body != null; + if (old_explicit_tag_type != new_explicit_tag_type) continue; + }, + .opaque_decl => { + const old = old_zir.getOpaqueDecl(match_item.old_inst); + const new = new_zir.getOpaqueDecl(match_item.new_inst); + if (old.captures.len != new.captures.len) continue; + }, + else => unreachable, } // Match the namespace declaration itself @@ -4068,7 +4050,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R } if (has_inits) { // this should only be referenced by the type - const unit: AnalUnit = .wrap(.{ .type_inits = ty }); + const unit: AnalUnit = .wrap(.{ .struct_defaults = ty }); try units.putNoClobber(gpa, unit, referencer); } @@ -4184,7 +4166,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoArrayHashMapUnmanaged(AnalUnit, ?R const other: AnalUnit = .wrap(switch (unit.unwrap()) { .nav_val => |n| .{ .nav_ty = n }, .nav_ty => |n| .{ .nav_val = n }, - .@"comptime", .type_layout, .type_inits, .func, .memoized_state => break :queue_paired, + .@"comptime", .type_layout, .struct_defaults, .func, .memoized_state => break :queue_paired, }); const gop = try units.getOrPut(gpa, other); if (gop.found_existing) break :queue_paired; @@ -4305,6 +4287,16 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File { return zcu.fileByIndex(zcu.navFileScopeIndex(nav)); } +pub fn navAlignment(zcu: *Zcu, nav_index: InternPool.Nav.Index) InternPool.Alignment { + const ty: Type, const alignment = switch (zcu.intern_pool.getNav(nav_index).status) { + .unresolved => unreachable, + .type_resolved => |r| .{ .fromInterned(r.type), r.alignment }, + .fully_resolved => |r| .{ Value.fromInterned(r.val).typeOf(zcu), r.alignment }, + }; + if (alignment != .none) return alignment; + return ty.abiAlignment(zcu); +} + pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Alt(FormatAnalUnit, formatAnalUnit) { return .{ .data = .{ .unit = unit, .zcu = zcu } }; } @@ -4331,7 +4323,7 @@ fn formatAnalUnit(data: FormatAnalUnit, writer: *Io.Writer) Io.Writer.Error!void } }, .nav_val, .nav_ty => |nav, tag| return writer.print("{t}('{f}' [{}])", .{ tag, ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) }), - .type_layout, .type_inits => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }), + .type_layout, .struct_defaults => |ty, tag| return writer.print("{t}('{f}' [{}])", .{ tag, Type.fromInterned(ty).containerTypeName(ip).fmt(ip), @intFromEnum(ty) }), .func => |func| { const nav = zcu.funcInfo(func).owner_nav; return writer.print("func('{f}' [{}])", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(func) }); @@ -4357,7 +4349,7 @@ fn formatDependee(data: FormatDependee, writer: *Io.Writer) Io.Writer.Error!void const fqn = ip.getNav(nav).fqn; return writer.print("{t}('{f}')", .{ tag, fqn.fmt(ip) }); }, - .type_layout, .type_inits => |ip_index, tag| { + .type_layout, .struct_defaults => |ip_index, tag| { const name = Type.fromInterned(ip_index).containerTypeName(ip); return writer.print("{t}('{f}')", .{ tag, name.fmt(ip) }); }, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index e3c0c21244..d937367c4a 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -695,20 +695,46 @@ pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) (Alloca .file = file_index, .inst = .main_struct_inst, }); - const file_root_type = try Sema.analyzeStructDecl( - pt, - file_index, - &file.zir.?, - .none, - tracked_inst, - &struct_decl, - null, - &.{}, - .{ .exact = .{ - .name = try file.internFullyQualifiedName(pt), - .nav = .none, - } }, - ); + const wip: InternPool.WipContainerType = switch (try ip.getDeclaredStructType(gpa, io, pt.tid, .{ + .zir_index = tracked_inst, + .captures = &.{}, + .fields_len = @intCast(struct_decl.field_names.len), + .layout = struct_decl.layout, + .any_comptime_fields = struct_decl.field_comptime_bits != null, + .any_field_defaults = struct_decl.field_default_body_lens != null, + .any_field_aligns = struct_decl.field_align_body_lens != null, + .packed_backing_mode = if (struct_decl.backing_int_type_body != null) .explicit else .auto, + })) { + .existing => unreachable, // it would have been set as `zcu.fileRootType` already + .wip => |wip| wip, + }; + errdefer wip.cancel(ip, pt.tid); + + wip.setName(ip, try file.internFullyQualifiedName(pt), .none); + const new_namespace_index: InternPool.NamespaceIndex = try pt.createNamespace(.{ + .parent = .none, + .owner_type = wip.index, + .file_scope = file_index, + .generation = zcu.generation, + }); + errdefer pt.destroyNamespace(new_namespace_index); + try pt.scanNamespace(new_namespace_index, struct_decl.decls); + // MLUGG TODO: we could potentially revert this language change if we wanted? don't mind + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .type_layout = wip.index }) }); + try zcu.comp.queueJob(.{ .analyze_unit = .wrap(.{ .struct_defaults = wip.index }) }); + + if (zcu.comp.debugIncremental()) try zcu.incremental_debug_state.newType(zcu, wip.index); + + try zcu.outdated.ensureUnusedCapacity(gpa, 2); + try zcu.outdated_ready.ensureUnusedCapacity(gpa, 2); + errdefer comptime unreachable; // because we don't remove the `outdated` entries + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), 0); + zcu.outdated.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), 0); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .type_layout = wip.index }), {}); + zcu.outdated_ready.putAssumeCapacityNoClobber(.wrap(.{ .struct_defaults = wip.index }), {}); + + const file_root_type: Type = .fromInterned(wip.finish(ip, new_namespace_index)); + zcu.setFileRootType(file_index, file_root_type.toIntern()); if (zcu.comp.time_report) |*tr| tr.stats.n_imported_files += 1; } @@ -1048,11 +1074,6 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void assert(!zcu.analysis_in_progress.contains(anal_unit)); - // Determine whether or not this type is outdated. For this kind of `AnalUnit`, that's - // the only indicator as to whether or not analysis is required; when a struct/union is - // first created, it's marked as outdated. - // MLUGG TODO: make that actually true, it's a good strategy here! - const was_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); @@ -1113,17 +1134,11 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void }; defer sema.deinit(); - const result = switch (ty.containerLayout(zcu)) { - .auto, .@"extern" => switch (ty.zigTypeTag(zcu)) { - .@"struct" => Sema.type_resolution.resolveStructLayout(&sema, ty), - .@"union" => Sema.type_resolution.resolveUnionLayout(&sema, ty), - else => unreachable, - }, - .@"packed" => switch (ty.zigTypeTag(zcu)) { - .@"struct" => Sema.type_resolution.resolvePackedStructLayout(&sema, ty), - .@"union" => Sema.type_resolution.resolvePackedUnionLayout(&sema, ty), - else => unreachable, - }, + const result = switch (ty.zigTypeTag(zcu)) { + .@"enum" => Sema.type_resolution.resolveEnumLayout(&sema, ty), + .@"struct" => Sema.type_resolution.resolveStructLayout(&sema, ty), + .@"union" => Sema.type_resolution.resolveUnionLayout(&sema, ty), + else => unreachable, }; result catch |err| switch (err) { error.AnalysisFail => { @@ -1145,36 +1160,31 @@ pub fn ensureTypeLayoutUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void sema.flushExports() catch |err| switch (err) { error.OutOfMemory => |e| return e, }; - - codegen_type: { - if (zcu.comp.config.use_llvm) break :codegen_type; - if (file.mod.?.strip) break :codegen_type; - zcu.comp.link_prog_node.increaseEstimatedTotalItems(1); - try zcu.comp.queueJob(.{ .link_type = ty.toIntern() }); - } } -/// Ensures that the default/tag values of the given `struct` or `enum` type are fully up-to-date, -/// performing re-analysis if necessary. Asserts that `ty` is a struct (not a tuple!) or an enum. -/// Returns `error.AnalysisFail` if an analysis error is encountered during resolution; the caller -/// is free to ignore this, since the error is already registered. -pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { +/// Ensures that the default values of the given "declared" (not reified) `struct` type are fully +/// up-to-date, performing re-analysis if necessary. Asserts that `ty` is a struct (not tuple) type. +/// Returns `error.AnalysisFail` if an analysis error is encountered while resolving the default +/// field values; the caller is free to ignore this, since the error is already registered. +pub fn ensureStructDefaultsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { const tracy = trace(@src()); defer tracy.end(); const zcu = pt.zcu; const gpa = zcu.gpa; - const anal_unit: AnalUnit = .wrap(.{ .type_inits = ty.toIntern() }); + assert(ty.zigTypeTag(zcu) == .@"struct"); + assert(!ty.isTuple(zcu)); - log.debug("ensureTypeInitsUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)}); + const anal_unit: AnalUnit = .wrap(.{ .struct_defaults = ty.toIntern() }); + + log.debug("ensureStructDefaultsUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)}); assert(!zcu.analysis_in_progress.contains(anal_unit)); // Determine whether or not this type is outdated. For this kind of `AnalUnit`, that's // the only indicator as to whether or not analysis is required; when a struct/enum is // first created, it's marked as outdated. - // MLUGG TODO: make that actually true, it's a good strategy here! const was_outdated = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); @@ -1194,7 +1204,7 @@ pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { } // For types, we already know that we have to invalidate all dependees. // TODO: we actually *could* detect whether everything was the same. should we bother? - try zcu.markDependeeOutdated(.marked_po, .{ .type_inits = ty.toIntern() }); + try zcu.markDependeeOutdated(.marked_po, .{ .struct_defaults = ty.toIntern() }); } else { // We can trust the current information about this unit. if (zcu.failed_analysis.contains(anal_unit)) return error.AnalysisFail; @@ -1236,12 +1246,7 @@ pub fn ensureTypeInitsUpToDate(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { }; defer sema.deinit(); - const result = switch (ty.zigTypeTag(zcu)) { - .@"struct" => Sema.type_resolution.resolveStructDefaults(&sema, ty), - .@"enum" => Sema.type_resolution.resolveEnumValues(&sema, ty), - else => unreachable, - }; - result catch |err| switch (err) { + Sema.type_resolution.resolveStructDefaults(&sema, ty) catch |err| switch (err) { error.AnalysisFail => { if (!zcu.failed_analysis.contains(anal_unit)) { // If this unit caused the error, it would have an entry in `failed_analysis`. @@ -1270,20 +1275,6 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu const tracy = trace(@src()); defer tracy.end(); - // TODO: document this elsewhere mlugg! - // For my own benefit, here's how a namespace update for a normal (non-file-root) type works: - // `const S = struct { ... };` - // We are adding or removing a declaration within this `struct`. - // * `S` registers a dependency on `.{ .src_hash = (declaration of S) }` - // * Any change to the `struct` body -- including changing a declaration -- invalidates this - // * `S` is re-analyzed, but notes: - // * there is an existing struct instance (at this `TrackedInst` with these captures) - // * the struct's resolution is up-to-date (because nothing about the fields changed) - // * so, it uses the same `struct` - // * but this doesn't stop it from updating the namespace! - // * we basically do `scanDecls`, updating the namespace as needed - // * so everyone lived happily ever after - const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -3033,7 +3024,7 @@ fn analyzeFuncBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.Sem const zir = file.zir.?; try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {}); - errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit); + defer assert(zcu.analysis_in_progress.swapRemove(anal_unit)); func.setAnalyzed(ip, io); if (func.analysisUnordered(ip).inferred_error_set) { @@ -3231,9 +3222,6 @@ fn analyzeFuncBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.Sem func.setResolvedErrorSet(ip, io, ies.resolved); } - // MLUGG TODO: i think this can go away and the assert move to the defer? - assert(zcu.analysis_in_progress.swapRemove(anal_unit)); - try sema.flushExports(); defer { @@ -3835,7 +3823,6 @@ pub fn enumValue(pt: Zcu.PerThread, ty: Type, tag_int: InternPool.Index) Allocat /// declaration order. pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Allocator.Error!Value { const ip = &pt.zcu.intern_pool; - ty.assertHasInits(pt.zcu); const enum_type = ip.loadEnumType(ty.toIntern()); assert(field_index < enum_type.field_names.len); @@ -3859,7 +3846,9 @@ pub fn enumValueFieldIndex(pt: Zcu.PerThread, ty: Type, field_index: u32) Alloca pub fn undefValue(pt: Zcu.PerThread, ty: Type) Allocator.Error!Value { if (std.debug.runtime_safety) { - assert(try ty.onePossibleValue(pt) == null); + if (try ty.onePossibleValue(pt)) |opv| { + assert(opv.isUndef(pt.zcu)); + } } return .fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); } @@ -3941,7 +3930,10 @@ pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Ind for (elems) |elem| { if (!Value.fromInterned(elem).isUndef(pt.zcu)) break; } else if (elems.len > 0) { - return pt.undefValue(ty); // all-undef + // All undef, so return an undef struct. However, don't use `undefValue`, because its + // non-OPV assertion can loop on `[1]@TypeOf(undefined)`: that type has an OPV of + // `.{undefined}`, which here we normalize to `undefined`. + return .fromInterned(try pt.intern(.{ .undef = ty.toIntern() })); } return .fromInterned(try pt.intern(.{ .aggregate = .{ .ty = ty.toIntern(), @@ -4096,19 +4088,6 @@ pub fn getExtern(pt: Zcu.PerThread, key: InternPool.Key.Extern) Allocator.Error! return result.index; } -// TODO: this shouldn't need a `PerThread`! Fix the signature of `Type.abiAlignment`. -// MLUGG TODO: that's done, move it! -pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPool.Alignment { - const zcu = pt.zcu; - const ty: Type, const alignment = switch (zcu.intern_pool.getNav(nav_index).status) { - .unresolved => unreachable, - .type_resolved => |r| .{ .fromInterned(r.type), r.alignment }, - .fully_resolved => |r| .{ Value.fromInterned(r.val).typeOf(zcu), r.alignment }, - }; - if (alignment != .none) return alignment; - return ty.abiAlignment(zcu); -} - /// Given a namespace, re-scan its declarations from the type definition if they have not /// yet been re-scanned on this update. /// If the type declaration instruction has been lost, returns `error.AnalysisFail`. @@ -4393,7 +4372,7 @@ pub fn resolveTypeForCodegen(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { .@"struct" => switch (ip.indexToKey(ty.toIntern())) { .struct_type => { try pt.ensureTypeLayoutUpToDate(ty); - try pt.ensureTypeInitsUpToDate(ty); + try pt.ensureStructDefaultsUpToDate(ty); }, .tuple_type => |tuple| for (0..tuple.types.len) |i| { const field_is_comptime = tuple.values.get(ip)[i] != .none; @@ -4405,7 +4384,7 @@ pub fn resolveTypeForCodegen(pt: Zcu.PerThread, ty: Type) Zcu.SemaError!void { }, .@"union" => try pt.ensureTypeLayoutUpToDate(ty), - .@"enum" => try pt.ensureTypeInitsUpToDate(ty), + .@"enum" => try pt.ensureTypeLayoutUpToDate(ty), } } pub fn resolveValueTypesForCodegen(pt: Zcu.PerThread, val: Value) Zcu.SemaError!void { diff --git a/src/codegen.zig b/src/codegen.zig index 67575beb3f..e9acab66e4 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -347,7 +347,6 @@ pub fn generateSymbol( .void => unreachable, // non-runtime value .null => unreachable, // non-runtime value .@"unreachable" => unreachable, // non-runtime value - .empty_tuple => return, .false, .true => try w.writeByte(switch (simple_value) { .false => 0, .true => 1, @@ -1065,20 +1064,20 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo const elem_ty = ty.childType(zcu); const ptr = ip.indexToKey(val.toIntern()).ptr; if (ptr.base_addr == .int) return .{ .immediate = ptr.byte_offset }; - switch (ptr.base_addr) { + if (ptr.byte_offset == 0) switch (ptr.base_addr) { .int => unreachable, // handled above - .nav => |nav| if (elem_ty.isFnOrHasRuntimeBits(zcu)) { + .nav => |nav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { return .{ .lea_nav = nav }; } else { // Create the 0xaa bit pattern... const undef_ptr_bits: u64 = @intCast((@as(u66, 1) << @intCast(target.ptrBitWidth() + 1)) / 3); // ...but align the pointer - const alignment = pt.navAlignment(nav); + const alignment = zcu.navAlignment(nav); return .{ .immediate = alignment.forward(undef_ptr_bits) }; }, - .uav => |uav| if (elem_ty.isFnOrHasRuntimeBits(zcu)) { + .uav => |uav| if (elem_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { return .{ .lea_uav = uav }; } else { // Create the 0xaa bit pattern... @@ -1089,7 +1088,7 @@ pub fn lowerValue(pt: Zcu.PerThread, val: Value, target: *const std.Target) Allo }, else => {}, - } + }; }, }, .int => { diff --git a/src/codegen/aarch64/Select.zig b/src/codegen/aarch64/Select.zig index 0e6387949a..7449464905 100644 --- a/src/codegen/aarch64/Select.zig +++ b/src/codegen/aarch64/Select.zig @@ -6594,7 +6594,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory, if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte| break :fill_byte .{ .constant = fill_byte }; } - switch (dst_ty.indexablePtrElem(zcu).abiSize(zcu)) { + switch (dst_ty.indexableElem(zcu).abiSize(zcu)) { 0 => unreachable, 1 => break :fill_byte .{ .value = bin_op.rhs }, 2, 4, 8 => |size| { @@ -7217,7 +7217,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory, const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused; const ty_nav = air.data(air.inst_index).ty_nav; - if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) { + if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) { false => { try isel.nav_relocs.append(gpa, .{ .nav = ty_nav.nav, @@ -7240,7 +7240,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory, }); try isel.emit(.adrp(ptr_ra.x(), 0)); }, - } else try isel.movImmediate(ptr_ra.x(), isel.pt.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa)); + } else try isel.movImmediate(ptr_ra.x(), zcu.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa)); } if (air.next()) |next_air_tag| continue :air_tag next_air_tag; }, @@ -10738,7 +10738,7 @@ pub const Value = struct { } }), }), .simple_value => |simple_value| switch (simple_value) { - .undefined, .void, .null, .empty_tuple, .@"unreachable" => unreachable, + .undefined, .void, .null, .@"unreachable" => unreachable, .true => continue :constant_key .{ .int = .{ .ty = .bool_type, .storage = .{ .u64 = 1 }, @@ -10931,7 +10931,7 @@ pub const Value = struct { .ptr => |ptr| { assert(offset == 0 and size == 8); break :free switch (ptr.base_addr) { - .nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) { + .nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) { false => { try isel.nav_relocs.append(zcu.gpa, .{ .nav = nav, @@ -10965,9 +10965,9 @@ pub const Value = struct { }, } else continue :constant_key .{ .int = .{ .ty = .usize_type, - .storage = .{ .u64 = isel.pt.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) }, + .storage = .{ .u64 = zcu.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) }, } }, - .uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isFnOrHasRuntimeBits(zcu)) switch (true) { + .uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isRuntimeFnOrHasRuntimeBits(zcu)) switch (true) { false => { try isel.uav_relocs.append(zcu.gpa, .{ .uav = uav, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 831a64779b..5e9f21e1aa 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -789,7 +789,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const ptr_ty: Type = .fromInterned(uav.orig_ty); - if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isFnOrHasRuntimeBits(zcu)) { + if (ptr_ty.isPtrAtRuntime(zcu) and !uav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { return dg.writeCValue(w, .{ .undef = ptr_ty }); } @@ -862,7 +862,7 @@ pub const DeclGen = struct { // Render an undefined pointer if we have a pointer to a zero-bit or comptime type. const nav_ty: Type = .fromInterned(ip.getNav(owner_nav).typeOf(ip)); const ptr_ty = try pt.navPtrType(owner_nav); - if (!nav_ty.isFnOrHasRuntimeBits(zcu)) { + if (!nav_ty.isRuntimeFnOrHasRuntimeBits(zcu)) { return dg.writeCValue(w, .{ .undef = ptr_ty }); } @@ -1043,7 +1043,6 @@ pub const DeclGen = struct { .undefined => unreachable, .void => unreachable, .null => unreachable, - .empty_tuple => unreachable, .@"unreachable" => unreachable, .false => try w.writeAll("false"), @@ -3077,7 +3076,7 @@ pub fn genDecl(o: *Object) Error!void { const nav = ip.getNav(o.dg.pass.nav); const nav_ty: Type = .fromInterned(nav.typeOf(ip)); - if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return; + if (!nav_ty.hasRuntimeBits(zcu)) return; switch (ip.indexToKey(nav.status.fully_resolved.val)) { .@"extern" => |@"extern"| { if (!ip.isFunctionType(nav_ty.toIntern())) return o.dg.renderFwdDecl(o.dg.pass.nav, .{ @@ -3676,7 +3675,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const elem_has_bits = ptr_ty.indexablePtrElem(zcu).hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = ptr_ty.indexableElem(zcu).hasRuntimeBitsIgnoreComptime(zcu); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3792,7 +3791,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), @@ -3829,7 +3828,7 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = pt.zcu; const inst_ty = f.typeOfIndex(inst); const elem_ty = inst_ty.childType(zcu); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + if (!elem_ty.hasRuntimeBits(zcu)) return .{ .undef = inst_ty }; const local = try f.allocLocalValue(.{ .ctype = try f.ctypeFromType(elem_ty, .complete), @@ -4502,7 +4501,7 @@ fn airPtrAddSub(f: *Function, inst: Air.Inst.Index, operator: u8) !CValue { const inst_ty = f.typeOfIndex(inst); const inst_scalar_ty = inst_ty.scalarType(zcu); - const elem_ty = inst_scalar_ty.indexablePtrElem(zcu); + const elem_ty = inst_scalar_ty.indexableElem(zcu); if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) return f.moveCValue(inst, inst_ty, lhs); const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); @@ -7037,7 +7036,7 @@ fn airMemcpy(f: *Function, inst: Air.Inst.Index, function_paren: []const u8) !CV try w.writeAll(", "); try writeArrayLen(f, dest_ptr, dest_ty); try w.writeAll(" * sizeof("); - try f.renderType(w, dest_ty.indexablePtrElem(zcu)); + try f.renderType(w, dest_ty.indexableElem(zcu)); try w.writeAll("));"); try f.object.newline(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1327b7b2e1..ad115504ba 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3725,7 +3725,6 @@ pub const Object = struct { .undefined => unreachable, // non-runtime value .void => unreachable, // non-runtime value .null => unreachable, // non-runtime value - .empty_tuple => unreachable, // non-runtime value .@"unreachable" => unreachable, // non-runtime value .false => .false, @@ -4604,7 +4603,7 @@ pub const NavGen = struct { _ = try o.resolveLlvmFunction(pt, owner_nav); } else { const variable_index = try o.resolveGlobalNav(pt, nav_index); - variable_index.setAlignment(pt.navAlignment(nav_index).toLlvm(), &o.builder); + variable_index.setAlignment(zcu.navAlignment(nav_index).toLlvm(), &o.builder); if (resolved.@"linksection".toSlice(ip)) |section| variable_index.setSection(try o.builder.string(section), &o.builder); if (is_const) variable_index.setMutability(.constant, &o.builder); @@ -5953,7 +5952,7 @@ pub const FuncGen = struct { return .none; } - const have_block_result = inst_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu); + const have_block_result = inst_ty.hasRuntimeBits(zcu); var breaks: BreakList = if (have_block_result) .{ .list = .{} } else .{ .len = 0 }; defer if (have_block_result) breaks.list.deinit(self.gpa); @@ -6000,7 +5999,7 @@ pub const FuncGen = struct { // Add the values to the lists only if the break provides a value. const operand_ty = self.typeOf(branch.operand); - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (operand_ty.hasRuntimeBits(zcu)) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -9581,7 +9580,7 @@ pub const FuncGen = struct { const zcu = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const pointee_type = ptr_ty.childType(zcu); - if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) + if (!pointee_type.hasRuntimeBits(zcu)) return (try o.lowerPtrToVoid(pt, ptr_ty)).toValue(); const pointee_llvm_ty = try o.lowerType(pt, pointee_type); @@ -9595,7 +9594,7 @@ pub const FuncGen = struct { const zcu = pt.zcu; const ptr_ty = self.typeOfIndex(inst); const ret_ty = ptr_ty.childType(zcu); - if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) + if (!ret_ty.hasRuntimeBits(zcu)) return (try o.lowerPtrToVoid(pt, ptr_ty)).toValue(); if (self.ret_ptr != .none) return self.ret_ptr; const ret_llvm_ty = try o.lowerType(pt, ret_ty); @@ -9897,7 +9896,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const ptr_ty = self.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(zcu); - if (!operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .none; + if (!operand_ty.hasRuntimeBits(zcu)) return .none; const ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const llvm_abi_ty = try o.getAtomicAbiType(pt, operand_ty, false); @@ -11478,7 +11477,7 @@ pub const FuncGen = struct { const zcu = pt.zcu; const info = ptr_ty.ptrInfo(zcu); const elem_ty = Type.fromInterned(info.child); - if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (!elem_ty.hasRuntimeBits(zcu)) { return; } const ptr_alignment = ptr_ty.ptrAlignment(zcu).toLlvm(); diff --git a/src/codegen/riscv64/CodeGen.zig b/src/codegen/riscv64/CodeGen.zig index dd4ca3f88b..72174554ad 100644 --- a/src/codegen/riscv64/CodeGen.zig +++ b/src/codegen/riscv64/CodeGen.zig @@ -2673,7 +2673,7 @@ fn genBinOp( defer func.register_manager.unlockReg(tmp_lock); // RISC-V has no immediate mul, so we copy the size to a temporary register - const elem_size = lhs_ty.indexablePtrElem(zcu).abiSize(zcu); + const elem_size = lhs_ty.indexableElem(zcu).abiSize(zcu); const elem_size_reg = try func.copyToTmpRegister(Type.u64, .{ .immediate = elem_size }); try func.genBinOp( @@ -3913,7 +3913,7 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void { const base_ptr_ty = func.typeOf(bin_op.lhs); const result: MCValue = if (!is_volatile and func.liveness.isUnused(inst)) .unreach else result: { - const elem_ty = base_ptr_ty.indexablePtrElem(zcu); + const elem_ty = base_ptr_ty.indexableElem(zcu); if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none; const base_ptr_mcv = try func.resolveInst(bin_op.lhs); const base_ptr_lock: ?RegisterLock = switch (base_ptr_mcv) { diff --git a/src/codegen/spirv/CodeGen.zig b/src/codegen/spirv/CodeGen.zig index e6850df250..5217002f22 100644 --- a/src/codegen/spirv/CodeGen.zig +++ b/src/codegen/spirv/CodeGen.zig @@ -821,7 +821,6 @@ fn constant(cg: *CodeGen, ty: Type, val: Value, repr: Repr) Error!Id { .undefined, .void, .null, - .empty_tuple, .@"unreachable", => unreachable, // non-runtime values @@ -1150,7 +1149,7 @@ fn constantUavRef( } // const is_fn_body = decl_ty.zigTypeTag(zcu) == .@"fn"; - if (!uav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (!uav_ty.hasRuntimeBits(zcu)) { // Pointer to nothing - return undefined return cg.module.constUndef(ty_id); } @@ -1196,7 +1195,7 @@ fn constantNavRef(cg: *CodeGen, ty: Type, nav_index: InternPool.Nav.Index) !Id { }, } - if (!nav_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (!nav_ty.hasRuntimeBits(zcu)) { // Pointer to nothing - return undefined. return cg.module.constUndef(ty_id); } @@ -4381,7 +4380,7 @@ fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id { fn ptrElemPtr(cg: *CodeGen, ptr_ty: Type, ptr_id: Id, index_id: Id) !Id { const zcu = cg.module.zcu; // Construct new pointer type for the resulting pointer - const elem_ty = ptr_ty.indexablePtrElem(zcu); + const elem_ty = ptr_ty.indexableElem(zcu); const elem_ty_id = try cg.resolveType(elem_ty, .indirect); const elem_ptr_ty_id = try cg.module.ptrType(elem_ty_id, cg.module.storageClass(ptr_ty.ptrAddressSpace(zcu))); if (ptr_ty.isSinglePointer(zcu)) { @@ -5028,7 +5027,7 @@ fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) const gpa = cg.module.gpa; const zcu = cg.module.zcu; const ty = cg.typeOfIndex(inst); - const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu); + const have_block_result = ty.hasRuntimeBits(zcu); const cf = switch (cg.control_flow) { .structured => |*cf| cf, @@ -5166,7 +5165,7 @@ fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void { switch (cg.control_flow) { .structured => |*cf| { - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (operand_ty.hasRuntimeBits(zcu)) { const operand_id = try cg.resolve(br.operand); const block_result_var_id = cf.block_results.get(br.block_inst).?; try cg.store(operand_ty, block_result_var_id, operand_id, .{}); @@ -5177,7 +5176,7 @@ fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void { }, .unstructured => |cf| { const block = cf.blocks.get(br.block_inst).?; - if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (operand_ty.hasRuntimeBits(zcu)) { const operand_id = try cg.resolve(br.operand); // block_label should not be undefined here, lest there // is a br or br_void in the function's body. diff --git a/src/codegen/wasm/CodeGen.zig b/src/codegen/wasm/CodeGen.zig index 90de3461ca..955e9b51d3 100644 --- a/src/codegen/wasm/CodeGen.zig +++ b/src/codegen/wasm/CodeGen.zig @@ -2099,7 +2099,7 @@ fn airRetPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void { const child_type = cg.typeOfIndex(inst).childType(zcu); const result = result: { - if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) { + if (!child_type.hasRuntimeBits(zcu)) { break :result try cg.allocStack(Type.usize); // create pointer to void } @@ -3161,7 +3161,6 @@ fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue { .undefined, .void, .null, - .empty_tuple, .@"unreachable", => unreachable, // non-runtime values .false, .true => return .{ .imm32 = switch (simple_value) { diff --git a/src/codegen/x86_64/CodeGen.zig b/src/codegen/x86_64/CodeGen.zig index 872404b715..a9898398d7 100644 --- a/src/codegen/x86_64/CodeGen.zig +++ b/src/codegen/x86_64/CodeGen.zig @@ -104121,7 +104121,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void { }, .slice_elem_val, .ptr_elem_val => { const bin_op = air_datas[@intFromEnum(inst)].bin_op; - const res_ty = cg.typeOf(bin_op.lhs).indexablePtrElem(zcu); + const res_ty = cg.typeOf(bin_op.lhs).indexableElem(zcu); var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs }); try ops[0].toSlicePtr(cg); var res: [1]Temp = undefined; @@ -188179,8 +188179,8 @@ const Select = struct { .signed => false, .unsigned => size.bitSize(cg.target) >= int_info.bits, } else false, - .elem_size_is => |size| size == ty.indexablePtrElem(zcu).abiSize(zcu), - .po2_elem_size => std.math.isPowerOfTwo(ty.indexablePtrElem(zcu).abiSize(zcu)), + .elem_size_is => |size| size == ty.indexableElem(zcu).abiSize(zcu), + .po2_elem_size => std.math.isPowerOfTwo(ty.indexableElem(zcu).abiSize(zcu)), }; } }; @@ -189941,9 +189941,9 @@ const Select = struct { op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu), @divExact(op.flags.base.size.bitSize(s.cg.target), 8), )), - .elem_size => @intCast(op.flags.base.ref.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), - .src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), - .dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), + .elem_size => @intCast(op.flags.base.ref.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), + .src0_elem_size => @intCast(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), + .dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)), .src0_elem_size_mul_src1 => @intCast(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) * Select.Operand.Ref.src1.valueOf(s).immediate), .vector_index => switch (op.flags.base.ref.typeOf(s).ptrInfo(s.cg.pt.zcu).flags.vector_index) { @@ -189953,7 +189953,7 @@ const Select = struct { .src1 => @intCast(Select.Operand.Ref.src1.valueOf(s).immediate), .src1_sub_bit_size => @as(SignedImm, @intCast(Select.Operand.Ref.src1.valueOf(s).immediate)) - @as(SignedImm, @intCast(s.cg.nonBoolScalarBitSize(op.flags.base.ref.typeOf(s)))), - .log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).scalarType(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))), + .log2_src0_elem_size => @intCast(std.math.log2(Select.Operand.Ref.src0.typeOf(s).indexableElem(s.cg.pt.zcu).abiSize(s.cg.pt.zcu))), .elem_mask => @as(u8, std.math.maxInt(u8)) >> @intCast( 8 - ((s.cg.unalignedSize(op.flags.base.ref.typeOf(s)) - 1) % @divExact(op.flags.base.size.bitSize(s.cg.target), 8) + 1 >> diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 03b757f5b4..7440711574 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1552,7 +1552,7 @@ fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde const sec_si = try coff.navSection(zcu, nav.status.fully_resolved); try coff.nodes.ensureUnusedCapacity(gpa, 1); const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{ - .alignment = pt.navAlignment(nav_index).toStdMem(), + .alignment = zcu.navAlignment(nav_index).toStdMem(), .moved = true, }); coff.nodes.appendAssumeCapacity(.{ .nav = nmi }); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 588b4e3fc3..96db16681c 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1479,7 +1479,7 @@ fn updateTlv( log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index }); - const required_alignment = pt.navAlignment(nav_index); + const required_alignment = zcu.navAlignment(nav_index); const sym = self.symbol(sym_index); const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; diff --git a/src/link/Elf2.zig b/src/link/Elf2.zig index 81e6c23af8..bab805af76 100644 --- a/src/link/Elf2.zig +++ b/src/link/Elf2.zig @@ -2906,7 +2906,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) try elf.nodes.ensureUnusedCapacity(gpa, 1); const sec_si = elf.navSection(ip, nav.status.fully_resolved); const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{ - .alignment = pt.navAlignment(nav_index).toStdMem(), + .alignment = zcu.navAlignment(nav_index).toStdMem(), .moved = true, }); elf.nodes.appendAssumeCapacity(.{ .nav = nmi }); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 49555c2746..fc3ac0fca8 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -925,7 +925,7 @@ pub fn updateNav( const sect_index = try self.getNavOutputSection(macho_file, zcu, nav_index, code); if (isThreadlocal(macho_file, nav_index)) - try self.updateTlv(macho_file, pt, nav_index, sym_index, sect_index, code) + try self.updateTlv(macho_file, zcu, nav_index, sym_index, sect_index, code) else try self.updateNavCode(macho_file, pt, nav_index, sym_index, sect_index, code); @@ -1030,13 +1030,13 @@ fn updateNavCode( fn updateTlv( self: *ZigObject, macho_file: *MachO, - pt: Zcu.PerThread, + zcu: *Zcu, nav_index: InternPool.Nav.Index, sym_index: Symbol.Index, sect_index: u8, code: []const u8, ) !void { - const ip = &pt.zcu.intern_pool; + const ip = &zcu.intern_pool; const nav = ip.getNav(nav_index); log.debug("updateTlv {f} (0x{x})", .{ nav.fqn.fmt(ip), nav_index }); @@ -1045,7 +1045,7 @@ fn updateTlv( const init_sym_index = try self.createTlvInitializer( macho_file, nav.fqn.toSlice(ip), - pt.navAlignment(nav_index), + zcu.navAlignment(nav_index), sect_index, code, ); diff --git a/src/print_value.zig b/src/print_value.zig index e58288a16a..5b29bb04d6 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -72,8 +72,13 @@ pub fn print( .undef => try writer.writeAll("undefined"), .simple_value => |simple_value| switch (simple_value) { .void => try writer.writeAll("{}"), - .empty_tuple => try writer.writeAll(".{}"), - else => try writer.writeAll(@tagName(simple_value)), + + .undefined, + .null, + .true, + .false, + .@"unreachable", + => try writer.writeAll(@tagName(simple_value)), }, .variable => try writer.writeAll("(variable)"), .@"extern" => |e| try writer.print("(extern '{f}')", .{e.name.fmt(ip)}), @@ -248,17 +253,26 @@ fn printAggregate( const len = ty.arrayLen(zcu); if (is_ref) try writer.writeByte('&'); - try writer.writeAll(".{ "); - - const max_len = @min(len, max_aggregate_items); - for (0..max_len) |i| { - if (i != 0) try writer.writeAll(", "); - try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema); + switch (len) { + 0 => try writer.writeAll(".{}"), + 1 => { + try writer.writeAll(".{"); + try print(try val.fieldValue(pt, 0), writer, level - 1, pt, opt_sema); + try writer.writeByte('}'); + }, + else => { + try writer.writeAll(".{ "); + const max_len = @min(len, max_aggregate_items); + for (0..max_len) |i| { + if (i != 0) try writer.writeAll(", "); + try print(try val.fieldValue(pt, i), writer, level - 1, pt, opt_sema); + } + if (len > max_aggregate_items) { + try writer.writeAll(", ..."); + } + try writer.writeAll(" }"); + }, } - if (len > max_aggregate_items) { - try writer.writeAll(", ..."); - } - return writer.writeAll(" }"); } fn printPtr( diff --git a/src/print_zir.zig b/src/print_zir.zig index cd7d18351c..34c816fad6 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1439,10 +1439,10 @@ const Writer = struct { try stream.print("{s}, ", .{@tagName(struct_decl.name_strategy)}); - if (struct_decl.backing_int_type != .none) { + if (struct_decl.backing_int_type_body) |backing_int_type_body| { assert(struct_decl.layout == .@"packed"); try stream.writeAll("packed("); - try self.writeInstRef(stream, struct_decl.backing_int_type); + try self.writeBracedDecl(stream, backing_int_type_body); try stream.writeAll("), "); } else { try stream.print("{s}, ", .{@tagName(struct_decl.layout)}); @@ -1507,18 +1507,18 @@ const Writer = struct { .@"packed" => try stream.writeAll("packed, "), .packed_explicit => { try stream.writeAll("packed("); - try self.writeInstRef(stream, union_decl.arg_type); + try self.writeBracedDecl(stream, union_decl.arg_type_body.?); try stream.writeAll("), "); }, .tagged_explicit => { - try stream.writeAll("auto("); - try self.writeInstRef(stream, union_decl.arg_type); + try stream.writeAll("tagged("); + try self.writeBracedDecl(stream, union_decl.arg_type_body.?); try stream.writeAll("), "); }, - .tagged_enum => try stream.writeAll("auto(enum)"), + .tagged_enum => try stream.writeAll("tagged(enum), "), .tagged_enum_explicit => { - try stream.writeAll("auto(enum("); - try self.writeInstRef(stream, union_decl.arg_type); + try stream.writeAll("tagged(enum("); + try self.writeBracedDecl(stream, union_decl.arg_type_body.?); try stream.writeAll(")), "); }, } @@ -1577,7 +1577,11 @@ const Writer = struct { try stream.print("{s}, ", .{@tagName(enum_decl.name_strategy)}); try self.writeFlag(stream, "nonexhaustive, ", enum_decl.nonexhaustive); - try self.writeInstRef(stream, enum_decl.tag_type); + if (enum_decl.tag_type_body) |tag_type_body| { + try stream.writeAll("tag("); + try self.writeBracedDecl(stream, tag_type_body); + try stream.writeAll("), "); + } try self.writeCaptures(stream, enum_decl.captures, enum_decl.capture_names); try stream.writeAll(", "); @@ -1585,9 +1589,9 @@ const Writer = struct { try stream.writeAll(", "); if (enum_decl.field_names.len == 0) { - try stream.writeAll(", {}) "); + try stream.writeAll("{}) "); } else { - try stream.writeAll(", {\n"); + try stream.writeAll("{\n"); self.indent += 2; var it = enum_decl.iterateFields();