Merge pull request #25154 from ziglang/no-decl-val-3

rework byval ZIR instructions; forbid runtime vector indexes
This commit is contained in:
Andrew Kelley 2025-09-21 01:49:28 -07:00 committed by GitHub
commit 010d9a63f2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
58 changed files with 937 additions and 1608 deletions

View file

@ -8,7 +8,7 @@ const S = packed struct {
test "overaligned pointer to packed struct" {
var foo: S align(4) = .{ .a = 1, .b = 2 };
const ptr: *align(4) S = &foo;
const ptr_to_b: *u32 = &ptr.b;
const ptr_to_b = &ptr.b;
try expect(ptr_to_b.* == 2);
}

View file

@ -1370,19 +1370,12 @@ pub fn printValue(
},
.array => {
if (!is_any) @compileError("cannot format array without a specifier (i.e. {s} or {any})");
if (max_depth == 0) return w.writeAll("{ ... }");
try w.writeAll("{ ");
for (value, 0..) |elem, i| {
try w.printValue(fmt, options, elem, max_depth - 1);
if (i < value.len - 1) {
try w.writeAll(", ");
}
}
try w.writeAll(" }");
return printArray(w, fmt, options, &value, max_depth);
},
.vector => {
.vector => |vector| {
if (!is_any and fmt.len != 0) invalidFmtError(fmt, value);
return printVector(w, fmt, options, value, max_depth);
const array: [vector.len]vector.child = value;
return printArray(w, fmt, options, &array, max_depth);
},
.@"fn" => @compileError("unable to format function body type, use '*const " ++ @typeName(T) ++ "' for a function pointer type"),
.type => {
@ -1436,12 +1429,25 @@ pub fn printVector(
value: anytype,
max_depth: usize,
) Error!void {
const len = @typeInfo(@TypeOf(value)).vector.len;
const vector = @typeInfo(@TypeOf(value)).vector;
const array: [vector.len]vector.child = value;
return printArray(w, fmt, options, &array, max_depth);
}
pub fn printArray(
w: *Writer,
comptime fmt: []const u8,
options: std.fmt.Options,
ptr_to_array: anytype,
max_depth: usize,
) Error!void {
if (max_depth == 0) return w.writeAll("{ ... }");
try w.writeAll("{ ");
inline for (0..len) |i| {
try w.printValue(fmt, options, value[i], max_depth - 1);
if (i < len - 1) try w.writeAll(", ");
for (ptr_to_array, 0..) |elem, i| {
try w.printValue(fmt, options, elem, max_depth - 1);
if (i < ptr_to_array.len - 1) {
try w.writeAll(", ");
}
}
try w.writeAll(" }");
}

View file

@ -1248,7 +1248,9 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
i += progress_pulsing.len;
} else {
const percent = completed_items * 100 / estimated_total;
i += (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent}) catch &.{}).len;
if (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent})) |b| {
i += b.len;
} else |_| {}
}
},
.success => {
@ -1265,7 +1267,9 @@ fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
i += progress_pulsing_error.len;
} else {
const percent = completed_items * 100 / estimated_total;
i += (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent}) catch &.{}).len;
if (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent})) |b| {
i += b.len;
} else |_| {}
}
},
}
@ -1364,12 +1368,18 @@ fn computeNode(
if (!is_empty_root) {
if (name.len != 0 or estimated_total > 0) {
if (estimated_total > 0) {
i += (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total }) catch &.{}).len;
if (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total })) |b| {
i += b.len;
} else |_| {}
} else if (completed_items != 0) {
i += (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items}) catch &.{}).len;
if (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items})) |b| {
i += b.len;
} else |_| {}
}
if (name.len != 0) {
i += (std.fmt.bufPrint(buf[i..], "{s}", .{name}) catch &.{}).len;
if (std.fmt.bufPrint(buf[i..], "{s}", .{name})) |b| {
i += b.len;
} else |_| {}
}
}

View file

@ -1187,7 +1187,7 @@ pub const Cpu = struct {
pub const Index = std.math.Log2Int(std.meta.Int(.unsigned, usize_count * @bitSizeOf(usize)));
pub const ShiftInt = std.math.Log2Int(usize);
pub const empty = Set{ .ints = [1]usize{0} ** usize_count };
pub const empty: Set = .{ .ints = @splat(0) };
pub fn isEmpty(set: Set) bool {
return for (set.ints) |x| {

View file

@ -1661,6 +1661,11 @@ test "Thread.getCurrentId" {
test "thread local storage" {
if (builtin.single_threaded) return error.SkipZigTest;
if (builtin.cpu.arch == .thumbeb) {
// https://github.com/ziglang/zig/issues/24061
return error.SkipZigTest;
}
const thread1 = try Thread.spawn(.{}, testTls, .{});
const thread2 = try Thread.spawn(.{}, testTls, .{});
try testTls();

View file

@ -215,8 +215,8 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
}
}
fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void {
for (0..dm) |d| {
fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: *const BlockVec) void {
inline for (0..dm) |d| {
for (0..4) |i| {
mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .little);
mem.writeInt(u32, out[64 * d + 16 * i + 4 ..][0..4], x[i][1 + 4 * d], .little);
@ -242,7 +242,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
while (degree >= d and i + 64 * d <= in.len) : (i += 64 * d) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(d, buf[0 .. 64 * d], x);
hashToBytes(d, buf[0 .. 64 * d], &x);
var xout = out[i..];
const xin = in[i..];
@ -266,7 +266,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
if (i < in.len) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(1, buf[0..64], x);
hashToBytes(1, buf[0..64], &x);
var xout = out[i..];
const xin = in[i..];
@ -284,7 +284,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
while (degree >= d and i + 64 * d <= out.len) : (i += 64 * d) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(d, out[i..][0 .. 64 * d], x);
hashToBytes(d, out[i..][0 .. 64 * d], &x);
inline for (0..d) |d_| {
if (count64) {
const next = @addWithOverflow(ctx[3][4 * d_], d);
@ -301,7 +301,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type
contextFeedback(&x, ctx);
var buf: [64]u8 = undefined;
hashToBytes(1, buf[0..], x);
hashToBytes(1, buf[0..], &x);
@memcpy(out[i..], buf[0 .. out.len - i]);
}
}
@ -394,7 +394,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
}
}
fn hashToBytes(out: *[64]u8, x: BlockVec) void {
fn hashToBytes(out: *[64]u8, x: *const BlockVec) void {
for (0..4) |i| {
mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .little);
mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .little);
@ -417,7 +417,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
while (i + 64 <= in.len) : (i += 64) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(buf[0..], x);
hashToBytes(buf[0..], &x);
var xout = out[i..];
const xin = in[i..];
@ -438,7 +438,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
if (i < in.len) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(buf[0..], x);
hashToBytes(buf[0..], &x);
var xout = out[i..];
const xin = in[i..];
@ -455,7 +455,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
while (i + 64 <= out.len) : (i += 64) {
chacha20Core(x[0..], ctx);
contextFeedback(&x, ctx);
hashToBytes(out[i..][0..64], x);
hashToBytes(out[i..][0..64], &x);
if (count64) {
const next = @addWithOverflow(ctx[12], 1);
ctx[12] = next[0];
@ -469,7 +469,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type {
contextFeedback(&x, ctx);
var buf: [64]u8 = undefined;
hashToBytes(buf[0..], x);
hashToBytes(buf[0..], &x);
@memcpy(out[i..], buf[0 .. out.len - i]);
}
}

View file

@ -389,7 +389,7 @@ pub fn innerParse(
switch (try source.peekNextTokenType()) {
.array_begin => {
// Typical array.
return internalParseArray(T, arrayInfo.child, arrayInfo.len, allocator, source, options);
return internalParseArray(T, arrayInfo.child, allocator, source, options);
},
.string => {
if (arrayInfo.child != u8) return error.UnexpectedToken;
@ -440,10 +440,11 @@ pub fn innerParse(
}
},
.vector => |vecInfo| {
.vector => |vector_info| {
switch (try source.peekNextTokenType()) {
.array_begin => {
return internalParseArray(T, vecInfo.child, vecInfo.len, allocator, source, options);
const A = [vector_info.len]vector_info.child;
return try internalParseArray(A, vector_info.child, allocator, source, options);
},
else => return error.UnexpectedToken,
}
@ -519,7 +520,6 @@ pub fn innerParse(
fn internalParseArray(
comptime T: type,
comptime Child: type,
comptime len: comptime_int,
allocator: Allocator,
source: anytype,
options: ParseOptions,
@ -527,9 +527,8 @@ fn internalParseArray(
assert(.array_begin == try source.next());
var r: T = undefined;
var i: usize = 0;
while (i < len) : (i += 1) {
r[i] = try innerParse(Child, allocator, source, options);
for (&r) |*elem| {
elem.* = try innerParse(Child, allocator, source, options);
}
if (.array_end != try source.next()) return error.UnexpectedToken;
@ -569,12 +568,12 @@ pub fn innerParseFromValue(
if (@round(f) != f) return error.InvalidNumber;
if (f > @as(@TypeOf(f), @floatFromInt(std.math.maxInt(T)))) return error.Overflow;
if (f < @as(@TypeOf(f), @floatFromInt(std.math.minInt(T)))) return error.Overflow;
return @as(T, @intFromFloat(f));
return @intFromFloat(f);
},
.integer => |i| {
if (i > std.math.maxInt(T)) return error.Overflow;
if (i < std.math.minInt(T)) return error.Overflow;
return @as(T, @intCast(i));
return @intCast(i);
},
.number_string, .string => |s| {
return sliceToInt(T, s);

View file

@ -742,13 +742,7 @@ pub fn eql(a: anytype, b: @TypeOf(a)) bool {
if (!eql(e, b[i])) return false;
return true;
},
.vector => |info| {
var i: usize = 0;
while (i < info.len) : (i += 1) {
if (!eql(a[i], b[i])) return false;
}
return true;
},
.vector => return @reduce(.And, a == b),
.pointer => |info| {
return switch (info.size) {
.one, .many, .c => a == b,

View file

@ -135,15 +135,9 @@ fn expectEqualInner(comptime T: type, expected: T, actual: T) !void {
.array => |array| try expectEqualSlices(array.child, &expected, &actual),
.vector => |info| {
var i: usize = 0;
while (i < info.len) : (i += 1) {
if (!std.meta.eql(expected[i], actual[i])) {
print("index {d} incorrect. expected {any}, found {any}\n", .{
i, expected[i], actual[i],
});
return error.TestExpectedEqual;
}
}
const expect_array: [info.len]info.child = expected;
const actual_array: [info.len]info.child = actual;
try expectEqualSlices(info.child, &expect_array, &actual_array);
},
.@"struct" => |structType| {
@ -828,8 +822,7 @@ fn expectEqualDeepInner(comptime T: type, expected: T, actual: T) error{TestExpe
print("Vector len not the same, expected {d}, found {d}\n", .{ info.len, @typeInfo(@TypeOf(actual)).vector.len });
return error.TestExpectedEqual;
}
var i: usize = 0;
while (i < info.len) : (i += 1) {
inline for (0..info.len) |i| {
expectEqualDeep(expected[i], actual[i]) catch |e| {
print("index {d} incorrect. expected {any}, found {any}\n", .{
i, expected[i], actual[i],

View file

@ -2728,12 +2728,12 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_val_node,
.elem_ptr_load,
.elem_val_imm,
.field_ptr,
.field_val,
.field_ptr_load,
.field_ptr_named,
.field_val_named,
.field_ptr_named_load,
.func,
.func_inferred,
.func_fancy,
@ -6160,7 +6160,7 @@ fn fieldAccess(
switch (ri.rl) {
.ref, .ref_coerced_ty => return addFieldAccess(.field_ptr, gz, scope, .{ .rl = .ref }, node),
else => {
const access = try addFieldAccess(.field_val, gz, scope, .{ .rl = .none }, node);
const access = try addFieldAccess(.field_ptr_load, gz, scope, .{ .rl = .ref }, node);
return rvalue(gz, ri, access, node);
},
}
@ -6210,14 +6210,14 @@ fn arrayAccess(
},
else => {
const lhs_node, const rhs_node = tree.nodeData(node).node_and_node;
const lhs = try expr(gz, scope, .{ .rl = .none }, lhs_node);
const lhs = try expr(gz, scope, .{ .rl = .ref }, lhs_node);
const cursor = maybeAdvanceSourceCursorToMainToken(gz, node);
const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = .usize_type } }, rhs_node);
try emitDbgStmt(gz, cursor);
return rvalue(gz, ri, try gz.addPlNode(.elem_val_node, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node);
return rvalue(gz, ri, try gz.addPlNode(.elem_ptr_load, node, Zir.Inst.Bin{ .lhs = lhs, .rhs = rhs }), node);
},
}
}
@ -9286,17 +9286,21 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.field => {
if (ri.rl == .ref or ri.rl == .ref_coerced_ty) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
});
switch (ri.rl) {
.ref, .ref_coerced_ty => {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
});
},
else => {
const result = try gz.addPlNode(.field_ptr_named_load, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
});
return rvalue(gz, ri, result, node);
},
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, params[1], .field_name),
});
return rvalue(gz, ri, result, node);
},
.FieldType => {
const ty_inst = try typeExpr(gz, scope, params[0]);

View file

@ -420,6 +420,7 @@ pub const Inst = struct {
/// is the local's value.
dbg_var_val,
/// Uses a name to identify a Decl and takes a pointer to it.
///
/// Uses the `str_tok` union field.
decl_ref,
/// Uses a name to identify a Decl and uses it as a value.
@ -440,12 +441,17 @@ pub const Inst = struct {
/// Payload is `Bin`.
/// No OOB safety check is emitted.
elem_ptr,
/// Given an array, slice, or pointer, returns the element at the provided index.
/// Given a pointer to an array, slice, or pointer, loads the element
/// at the provided index.
///
/// Uses the `pl_node` union field. AST node is a[b] syntax. Payload is `Bin`.
elem_val_node,
/// Same as `elem_val_node` but used only for for loop.
/// Uses the `pl_node` union field. AST node is the condition of a for loop.
/// Payload is `Bin`.
elem_ptr_load,
/// Given an array, slice, or pointer, returns the element at the
/// provided index.
///
/// Uses the `pl_node` union field. AST node is the condition of a for
/// loop. Payload is `Bin`.
///
/// No OOB safety check is emitted.
elem_val,
/// Same as `elem_val` but takes the index as an immediate value.
@ -472,19 +478,26 @@ pub const Inst = struct {
/// to the named field. The field name is stored in string_bytes. Used by a.b syntax.
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_ptr,
/// Given a struct or object that contains virtual fields, returns the named field.
/// Given a pointer to a struct or object that contains virtual fields, loads from the
/// named field.
///
/// The field name is stored in string_bytes. Used by a.b syntax.
///
/// This instruction also accepts a pointer.
///
/// Uses `pl_node` field. The AST node is the a.b syntax. Payload is Field.
field_val,
field_ptr_load,
/// Given a pointer to a struct or object that contains virtual fields, returns a pointer
/// to the named field. The field name is a comptime instruction. Used by @field.
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_ptr_named,
/// Given a struct or object that contains virtual fields, returns the named field.
/// Given a pointer to a struct or object that contains virtual fields,
/// loads from the named field.
///
/// The field name is a comptime instruction. Used by @field.
///
/// Uses `pl_node` field. The AST node is the builtin call. Payload is FieldNamed.
field_val_named,
field_ptr_named_load,
/// Returns a function type, or a function instance, depending on whether
/// the body_len is 0. Calling convention is auto.
/// Uses the `pl_node` union field. `payload_index` points to a `Func`.
@ -1138,16 +1151,16 @@ pub const Inst = struct {
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_val_node,
.elem_ptr_load,
.elem_val_imm,
.ensure_result_used,
.ensure_result_non_error,
.ensure_err_union_payload_void,
.@"export",
.field_ptr,
.field_val,
.field_ptr_load,
.field_ptr_named,
.field_val_named,
.field_ptr_named_load,
.func,
.func_inferred,
.func_fancy,
@ -1432,12 +1445,12 @@ pub const Inst = struct {
.elem_ptr,
.elem_val,
.elem_ptr_node,
.elem_val_node,
.elem_ptr_load,
.elem_val_imm,
.field_ptr,
.field_val,
.field_ptr_load,
.field_ptr_named,
.field_val_named,
.field_ptr_named_load,
.func,
.func_inferred,
.func_fancy,
@ -1679,7 +1692,7 @@ pub const Inst = struct {
.elem_ptr = .pl_node,
.elem_ptr_node = .pl_node,
.elem_val = .pl_node,
.elem_val_node = .pl_node,
.elem_ptr_load = .pl_node,
.elem_val_imm = .elem_val_imm,
.ensure_result_used = .un_node,
.ensure_result_non_error = .un_node,
@ -1688,9 +1701,9 @@ pub const Inst = struct {
.error_value = .str_tok,
.@"export" = .pl_node,
.field_ptr = .pl_node,
.field_val = .pl_node,
.field_ptr_load = .pl_node,
.field_ptr_named = .pl_node,
.field_val_named = .pl_node,
.field_ptr_named_load = .pl_node,
.func = .pl_node,
.func_inferred = .pl_node,
.func_fancy = .pl_node,
@ -4215,7 +4228,7 @@ fn findTrackableInner(
.div,
.elem_ptr_node,
.elem_ptr,
.elem_val_node,
.elem_ptr_load,
.elem_val,
.elem_val_imm,
.ensure_result_used,
@ -4225,9 +4238,9 @@ fn findTrackableInner(
.error_value,
.@"export",
.field_ptr,
.field_val,
.field_ptr_load,
.field_ptr_named,
.field_val_named,
.field_ptr_named_load,
.import,
.int,
.int_big,

View file

@ -157,13 +157,11 @@ pub fn valueArbitraryDepth(self: *Serializer, val: anytype, options: ValueOption
}
},
.array => {
var container = try self.beginTuple(
.{ .whitespace_style = .{ .fields = val.len } },
);
for (val) |item_val| {
try container.fieldArbitraryDepth(item_val, options);
}
try container.end();
try valueArbitraryDepthArray(self, @TypeOf(val), &val, options);
},
.vector => |vector| {
const array: [vector.len]vector.child = val;
try valueArbitraryDepthArray(self, @TypeOf(array), &array, options);
},
.@"struct" => |@"struct"| if (@"struct".is_tuple) {
var container = try self.beginTuple(
@ -231,20 +229,21 @@ pub fn valueArbitraryDepth(self: *Serializer, val: anytype, options: ValueOption
} else {
try self.writer.writeAll("null");
},
.vector => |vector| {
var container = try self.beginTuple(
.{ .whitespace_style = .{ .fields = vector.len } },
);
for (0..vector.len) |i| {
try container.fieldArbitraryDepth(val[i], options);
}
try container.end();
},
else => comptime unreachable,
}
}
fn valueArbitraryDepthArray(s: *Serializer, comptime A: type, array: *const A, options: ValueOptions) Error!void {
var container = try s.beginTuple(
.{ .whitespace_style = .{ .fields = array.len } },
);
for (array) |elem| {
try container.fieldArbitraryDepth(elem, options);
}
try container.end();
}
/// Serialize an integer.
pub fn int(self: *Serializer, val: anytype) Error!void {
try self.writer.printInt(val, 10, .lower, .{});

View file

@ -430,8 +430,12 @@ pub fn free(gpa: Allocator, value: anytype) void {
.many, .c => comptime unreachable,
}
},
.array => for (value) |item| {
free(gpa, item);
.array => {
freeArray(gpa, @TypeOf(value), &value);
},
.vector => |vector| {
const array: [vector.len]vector.child = value;
freeArray(gpa, @TypeOf(array), &array);
},
.@"struct" => |@"struct"| inline for (@"struct".fields) |field| {
free(gpa, @field(value, field.name));
@ -446,12 +450,15 @@ pub fn free(gpa: Allocator, value: anytype) void {
.optional => if (value) |some| {
free(gpa, some);
},
.vector => |vector| for (0..vector.len) |i| free(gpa, value[i]),
.void => {},
else => comptime unreachable,
}
}
fn freeArray(gpa: Allocator, comptime A: type, array: *const A) void {
for (array) |elem| free(gpa, elem);
}
fn requiresAllocator(T: type) bool {
_ = valid_types;
return switch (@typeInfo(T)) {
@ -521,12 +528,15 @@ const Parser = struct {
else => comptime unreachable,
},
.array => return self.parseArray(T, node),
.vector => |vector| {
const A = [vector.len]vector.child;
return try self.parseArray(A, node);
},
.@"struct" => |@"struct"| if (@"struct".is_tuple)
return self.parseTuple(T, node)
else
return self.parseStruct(T, node),
.@"union" => return self.parseUnion(T, node),
.vector => return self.parseVector(T, node),
else => comptime unreachable,
}
@ -786,6 +796,7 @@ const Parser = struct {
elem.* = try self.parseExpr(array_info.child, nodes.at(@intCast(i)));
}
if (array_info.sentinel()) |s| result[result.len] = s;
return result;
}
@ -998,37 +1009,6 @@ const Parser = struct {
}
}
fn parseVector(
self: *@This(),
T: type,
node: Zoir.Node.Index,
) !T {
const vector_info = @typeInfo(T).vector;
const nodes: Zoir.Node.Index.Range = switch (node.get(self.zoir)) {
.array_literal => |nodes| nodes,
.empty_literal => .{ .start = node, .len = 0 },
else => return error.WrongType,
};
var result: T = undefined;
if (nodes.len != vector_info.len) {
return self.failNodeFmt(
node,
"expected {} vector elements; found {}",
.{ vector_info.len, nodes.len },
);
}
for (0..vector_info.len) |i| {
errdefer for (0..i) |j| free(self.gpa, result[j]);
result[i] = try self.parseExpr(vector_info.child, nodes.at(@intCast(i)));
}
return result;
}
fn failTokenFmt(
self: @This(),
token: Ast.TokenIndex,
@ -3209,7 +3189,7 @@ test "std.zon vector" {
fromSlice(@Vector(2, f32), gpa, ".{0.5}", &diag, .{}),
);
try std.testing.expectFmt(
"1:2: error: expected 2 vector elements; found 1\n",
"1:2: error: expected 2 array elements; found 1\n",
"{f}",
.{diag},
);
@ -3224,7 +3204,7 @@ test "std.zon vector" {
fromSlice(@Vector(2, f32), gpa, ".{0.5, 1.5, 2.5}", &diag, .{}),
);
try std.testing.expectFmt(
"1:2: error: expected 2 vector elements; found 3\n",
"1:13: error: index 2 outside of array of length 2\n",
"{f}",
.{diag},
);

View file

@ -166,19 +166,25 @@ pub const Inst = struct {
mod,
/// Same as `mod` with optimized float mode.
mod_optimized,
/// Add an offset to a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is illegal behavior.
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
/// The pointer may be a slice.
/// Uses the `ty_pl` field. Payload is `Bin`.
/// Add an offset, in element type units, to a pointer, returning a new
/// pointer. Element type may not be zero bits.
///
/// Wrapping is illegal behavior. If the newly computed address is
/// outside the provenance of the operand, the result is undefined.
///
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
/// rhs is the offset. Result type is the same as lhs. The operand may
/// be a slice.
ptr_add,
/// Subtract an offset from a pointer, returning a new pointer.
/// The offset is in element type units, not bytes.
/// Wrapping is illegal behavior.
/// The lhs is the pointer, rhs is the offset. Result type is the same as lhs.
/// The pointer may be a slice.
/// Uses the `ty_pl` field. Payload is `Bin`.
/// Subtract an offset, in element type units, from a pointer,
/// returning a new pointer. Element type may not be zero bits.
///
/// Wrapping is illegal behavior. If the newly computed address is
/// outside the provenance of the operand, the result is undefined.
///
/// Uses the `ty_pl` field. Payload is `Bin`. The lhs is the pointer,
/// rhs is the offset. Result type is the same as lhs. The operand may
/// be a slice.
ptr_sub,
/// Given two operands which can be floats, integers, or vectors, returns the
/// greater of the operands. For vectors it operates element-wise.

View file

@ -2682,12 +2682,10 @@ const Block = struct {
},
.@"packed" => switch (agg_ty.zigTypeTag(zcu)) {
else => unreachable,
.@"struct" => switch (agg_ty.packedStructFieldPtrInfo(agg_ptr_ty, @intCast(field_index), pt)) {
.bit_ptr => |packed_offset| {
field_ptr_info.packed_offset = packed_offset;
break :field_ptr_align agg_ptr_align;
},
.byte_ptr => |ptr_info| ptr_info.alignment,
.@"struct" => {
const packed_offset = agg_ty.packedStructFieldPtrInfo(agg_ptr_ty, @intCast(field_index), pt);
field_ptr_info.packed_offset = packed_offset;
break :field_ptr_align agg_ptr_align;
},
.@"union" => {
field_ptr_info.packed_offset = .{

View file

@ -207,501 +207,6 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool
return (l.tomb_bits[usize_index] & mask) != 0;
}
const OperandCategory = enum {
/// The operand lives on, but this instruction cannot possibly mutate memory.
none,
/// The operand lives on and this instruction can mutate memory.
write,
/// The operand dies at this instruction.
tomb,
/// The operand lives on, and this instruction is noreturn.
noret,
/// This instruction is too complicated for analysis, no information is available.
complex,
};
/// Given an instruction that we are examining, and an operand that we are looking for,
/// returns a classification.
pub fn categorizeOperand(
l: Liveness,
air: Air,
zcu: *Zcu,
inst: Air.Inst.Index,
operand: Air.Inst.Index,
ip: *const InternPool,
) OperandCategory {
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
const operand_ref = operand.toRef();
switch (air_tags[@intFromEnum(inst)]) {
.add,
.add_safe,
.add_wrap,
.add_sat,
.add_optimized,
.sub,
.sub_safe,
.sub_wrap,
.sub_sat,
.sub_optimized,
.mul,
.mul_safe,
.mul_wrap,
.mul_sat,
.mul_optimized,
.div_float,
.div_trunc,
.div_floor,
.div_exact,
.rem,
.mod,
.bit_and,
.bit_or,
.xor,
.cmp_lt,
.cmp_lte,
.cmp_eq,
.cmp_gte,
.cmp_gt,
.cmp_neq,
.bool_and,
.bool_or,
.array_elem_val,
.slice_elem_val,
.ptr_elem_val,
.shl,
.shl_exact,
.shl_sat,
.shr,
.shr_exact,
.min,
.max,
.div_float_optimized,
.div_trunc_optimized,
.div_floor_optimized,
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
.neg_optimized,
.cmp_lt_optimized,
.cmp_lte_optimized,
.cmp_eq_optimized,
.cmp_gte_optimized,
.cmp_gt_optimized,
.cmp_neq_optimized,
=> {
const o = air_datas[@intFromEnum(inst)].bin_op;
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.store,
.store_safe,
.atomic_store_unordered,
.atomic_store_monotonic,
.atomic_store_release,
.atomic_store_seq_cst,
.set_union_tag,
.memset,
.memset_safe,
.memcpy,
.memmove,
=> {
const o = air_datas[@intFromEnum(inst)].bin_op;
if (o.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
if (o.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
return .write;
},
.vector_store_elem => {
const o = air_datas[@intFromEnum(inst)].vector_store_elem;
const extra = air.extraData(Air.Bin, o.payload).data;
if (o.vector_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
return .write;
},
.arg,
.alloc,
.inferred_alloc,
.inferred_alloc_comptime,
.ret_ptr,
.trap,
.breakpoint,
.repeat,
.switch_dispatch,
.dbg_stmt,
.dbg_empty_stmt,
.unreach,
.ret_addr,
.frame_addr,
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
.runtime_nav_ptr,
.c_va_start,
.work_item_id,
.work_group_size,
.work_group_id,
=> return .none,
.not,
.bitcast,
.load,
.fpext,
.fptrunc,
.intcast,
.intcast_safe,
.trunc,
.optional_payload,
.optional_payload_ptr,
.wrap_optional,
.unwrap_errunion_payload,
.unwrap_errunion_err,
.unwrap_errunion_payload_ptr,
.unwrap_errunion_err_ptr,
.wrap_errunion_payload,
.wrap_errunion_err,
.slice_ptr,
.slice_len,
.ptr_slice_len_ptr,
.ptr_slice_ptr_ptr,
.struct_field_ptr_index_0,
.struct_field_ptr_index_1,
.struct_field_ptr_index_2,
.struct_field_ptr_index_3,
.array_to_slice,
.int_from_float,
.int_from_float_optimized,
.int_from_float_safe,
.int_from_float_optimized_safe,
.float_from_int,
.get_union_tag,
.clz,
.ctz,
.popcount,
.byte_swap,
.bit_reverse,
.splat,
.error_set_has_value,
.addrspace_cast,
.c_va_arg,
.c_va_copy,
.abs,
=> {
const o = air_datas[@intFromEnum(inst)].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.optional_payload_ptr_set,
.errunion_payload_ptr_set,
=> {
const o = air_datas[@intFromEnum(inst)].ty_op;
if (o.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
return .write;
},
.is_null,
.is_non_null,
.is_null_ptr,
.is_non_null_ptr,
.is_err,
.is_non_err,
.is_err_ptr,
.is_non_err_ptr,
.is_named_enum_value,
.tag_name,
.error_name,
.sqrt,
.sin,
.cos,
.tan,
.exp,
.exp2,
.log,
.log2,
.log10,
.floor,
.ceil,
.round,
.trunc_float,
.neg,
.cmp_lt_errors_len,
.c_va_end,
=> {
const o = air_datas[@intFromEnum(inst)].un_op;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.ret,
.ret_safe,
.ret_load,
=> {
const o = air_datas[@intFromEnum(inst)].un_op;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .noret);
return .noret;
},
.set_err_return_trace => {
const o = air_datas[@intFromEnum(inst)].un_op;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
return .write;
},
.add_with_overflow,
.sub_with_overflow,
.mul_with_overflow,
.shl_with_overflow,
.ptr_add,
.ptr_sub,
.ptr_elem_ptr,
.slice_elem_ptr,
.slice,
=> {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const extra = air.extraData(Air.Bin, ty_pl.payload).data;
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.dbg_var_ptr,
.dbg_var_val,
.dbg_arg_inline,
=> {
const o = air_datas[@intFromEnum(inst)].pl_op.operand;
if (o == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.prefetch => {
const prefetch = air_datas[@intFromEnum(inst)].prefetch;
if (prefetch.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const inst_data = air_datas[@intFromEnum(inst)].pl_op;
const callee = inst_data.operand;
const extra = air.extraData(Air.Call, inst_data.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]));
if (args.len + 1 <= bpi - 1) {
if (callee == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
for (args, 0..) |arg, i| {
if (arg == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i + 1)), .write);
}
return .write;
}
var bt = l.iterateBigTomb(inst);
if (bt.feed()) {
if (callee == operand_ref) return .tomb;
} else {
if (callee == operand_ref) return .write;
}
for (args) |arg| {
if (bt.feed()) {
if (arg == operand_ref) return .tomb;
} else {
if (arg == operand_ref) return .write;
}
}
return .write;
},
.select => {
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
const extra = air.extraData(Air.Bin, pl_op.payload).data;
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
return .none;
},
.shuffle_one => {
const unwrapped = air.unwrapShuffleOne(zcu, inst);
if (unwrapped.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.shuffle_two => {
const unwrapped = air.unwrapShuffleTwo(zcu, inst);
if (unwrapped.operand_a == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (unwrapped.operand_b == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.reduce, .reduce_optimized => {
const reduce = air_datas[@intFromEnum(inst)].reduce;
if (reduce.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.cmp_vector, .cmp_vector_optimized => {
const extra = air.extraData(Air.VectorCmp, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
return .none;
},
.aggregate_init => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const aggregate_ty = ty_pl.ty.toType();
const len = @as(usize, @intCast(aggregate_ty.arrayLenIp(ip)));
const elements = @as([]const Air.Inst.Ref, @ptrCast(air.extra.items[ty_pl.payload..][0..len]));
if (elements.len <= bpi - 1) {
for (elements, 0..) |elem, i| {
if (elem == operand_ref) return matchOperandSmallIndex(l, inst, @as(OperandInt, @intCast(i)), .none);
}
return .none;
}
var bt = l.iterateBigTomb(inst);
for (elements) |elem| {
if (bt.feed()) {
if (elem == operand_ref) return .tomb;
} else {
if (elem == operand_ref) return .write;
}
}
return .write;
},
.union_init => {
const extra = air.extraData(Air.UnionInit, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.init == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.struct_field_ptr, .struct_field_val => {
const extra = air.extraData(Air.StructField, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.struct_operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.field_parent_ptr => {
const extra = air.extraData(Air.FieldParentPtr, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.field_ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.cmpxchg_strong, .cmpxchg_weak => {
const extra = air.extraData(Air.Cmpxchg, air_datas[@intFromEnum(inst)].ty_pl.payload).data;
if (extra.ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
if (extra.expected_value == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
if (extra.new_value == operand_ref) return matchOperandSmallIndex(l, inst, 2, .write);
return .write;
},
.mul_add => {
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
const extra = air.extraData(Air.Bin, pl_op.payload).data;
if (extra.lhs == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
if (extra.rhs == operand_ref) return matchOperandSmallIndex(l, inst, 1, .none);
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 2, .none);
return .none;
},
.atomic_load => {
const ptr = air_datas[@intFromEnum(inst)].atomic_load.ptr;
if (ptr == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
.atomic_rmw => {
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
const extra = air.extraData(Air.AtomicRmw, pl_op.payload).data;
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .write);
if (extra.operand == operand_ref) return matchOperandSmallIndex(l, inst, 1, .write);
return .write;
},
.br => {
const br = air_datas[@intFromEnum(inst)].br;
if (br.operand == operand_ref) return matchOperandSmallIndex(l, operand, 0, .noret);
return .noret;
},
.assembly => {
return .complex;
},
.block, .dbg_inline_block => |tag| {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const body: []const Air.Inst.Index = @ptrCast(switch (tag) {
inline .block, .dbg_inline_block => |comptime_tag| body: {
const extra = air.extraData(switch (comptime_tag) {
.block => Air.Block,
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
break :body air.extra.items[extra.end..][0..extra.data.body_len];
},
else => unreachable,
});
if (body.len == 1 and air_tags[@intFromEnum(body[0])] == .cond_br) {
// Peephole optimization for "panic-like" conditionals, which have
// one empty branch and another which calls a `noreturn` function.
// This allows us to infer that safety checks do not modify memory,
// as far as control flow successors are concerned.
const inst_data = air_datas[@intFromEnum(body[0])].pl_op;
const cond_extra = air.extraData(Air.CondBr, inst_data.payload);
if (inst_data.operand == operand_ref and operandDies(l, body[0], 0))
return .tomb;
if (cond_extra.data.then_body_len > 2 or cond_extra.data.else_body_len > 2)
return .complex;
const then_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end..][0..cond_extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(air.extra.items[cond_extra.end + cond_extra.data.then_body_len ..][0..cond_extra.data.else_body_len]);
if (then_body.len > 1 and air_tags[@intFromEnum(then_body[1])] != .unreach)
return .complex;
if (else_body.len > 1 and air_tags[@intFromEnum(else_body[1])] != .unreach)
return .complex;
var operand_live: bool = true;
for (&[_]Air.Inst.Index{ then_body[0], else_body[0] }) |cond_inst| {
if (l.categorizeOperand(air, zcu, cond_inst, operand, ip) == .tomb)
operand_live = false;
switch (air_tags[@intFromEnum(cond_inst)]) {
.br => { // Breaks immediately back to block
const br = air_datas[@intFromEnum(cond_inst)].br;
if (br.block_inst != inst)
return .complex;
},
.call => {}, // Calls a noreturn function
else => return .complex,
}
}
return if (operand_live) .none else .tomb;
}
return .complex;
},
.@"try",
.try_cold,
.try_ptr,
.try_ptr_cold,
.loop,
.cond_br,
.switch_br,
.loop_switch_br,
=> return .complex,
.wasm_memory_grow => {
const pl_op = air_datas[@intFromEnum(inst)].pl_op;
if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none);
return .none;
},
}
}
fn matchOperandSmallIndex(
l: Liveness,
inst: Air.Inst.Index,
operand: OperandInt,
default: OperandCategory,
) OperandCategory {
if (operandDies(l, inst, operand)) {
return .tomb;
} else {
return default;
}
}
/// Higher level API.
pub const CondBrSlices = struct {
then_deaths: []const Air.Inst.Index,

View file

@ -1193,7 +1193,7 @@ fn analyzeBodyInner(
.elem_ptr => try sema.zirElemPtr(block, inst),
.elem_ptr_node => try sema.zirElemPtrNode(block, inst),
.elem_val => try sema.zirElemVal(block, inst),
.elem_val_node => try sema.zirElemValNode(block, inst),
.elem_ptr_load => try sema.zirElemPtrLoad(block, inst),
.elem_val_imm => try sema.zirElemValImm(block, inst),
.elem_type => try sema.zirElemType(block, inst),
.indexable_ptr_elem_type => try sema.zirIndexablePtrElemType(block, inst),
@ -1211,8 +1211,8 @@ fn analyzeBodyInner(
.error_value => try sema.zirErrorValue(block, inst),
.field_ptr => try sema.zirFieldPtr(block, inst),
.field_ptr_named => try sema.zirFieldPtrNamed(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.field_val_named => try sema.zirFieldValNamed(block, inst),
.field_ptr_load => try sema.zirFieldPtrLoad(block, inst),
.field_ptr_named_load => try sema.zirFieldPtrNamedLoad(block, inst),
.func => try sema.zirFunc(block, inst, false),
.func_inferred => try sema.zirFunc(block, inst, true),
.func_fancy => try sema.zirFuncFancy(block, inst),
@ -3756,9 +3756,9 @@ fn zirAllocExtended(
const pt = sema.pt;
const gpa = sema.gpa;
const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
const var_src = block.nodeOffset(extra.data.src_node);
const ty_src = block.src(.{ .node_offset_var_decl_ty = extra.data.src_node });
const align_src = block.src(.{ .node_offset_var_decl_align = extra.data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = extra.data.src_node });
const small: Zir.Inst.AllocExtended.Small = @bitCast(extended.small);
var extra_index: usize = extra.end;
@ -3777,7 +3777,7 @@ fn zirAllocExtended(
if (block.isComptime() or small.is_comptime) {
if (small.has_type) {
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
return sema.analyzeComptimeAlloc(block, var_src, var_ty, alignment);
} else {
try sema.air_instructions.append(gpa, .{
.tag = .inferred_alloc_comptime,
@ -3792,7 +3792,7 @@ fn zirAllocExtended(
}
if (small.has_type and try var_ty.comptimeOnlySema(pt)) {
return sema.analyzeComptimeAlloc(block, init_src, var_ty, alignment);
return sema.analyzeComptimeAlloc(block, var_src, var_ty, alignment);
}
if (small.has_type) {
@ -3802,8 +3802,8 @@ fn zirAllocExtended(
const target = pt.zcu.getTarget();
try var_ty.resolveLayout(pt);
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
const var_src = block.src(.{ .node_offset_store_ptr = extra.data.src_node });
return sema.fail(block, var_src, "local variable in naked function", .{});
const store_src = block.src(.{ .node_offset_store_ptr = extra.data.src_node });
return sema.fail(block, store_src, "local variable in naked function", .{});
}
const ptr_type = try sema.pt.ptrTypeSema(.{
.child = var_ty.toIntern(),
@ -3842,9 +3842,9 @@ fn zirAllocComptime(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_src = block.nodeOffset(inst_data.src_node);
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
}
fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -4254,11 +4254,11 @@ fn zirAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.I
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_src = block.nodeOffset(inst_data.src_node);
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.isComptime() or try var_ty.comptimeOnlySema(pt)) {
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
}
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
const mut_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
@ -4284,14 +4284,14 @@ fn zirAllocMut(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node;
const ty_src = block.src(.{ .node_offset_var_decl_ty = inst_data.src_node });
const init_src = block.src(.{ .node_offset_var_decl_init = inst_data.src_node });
const var_src = block.nodeOffset(inst_data.src_node);
const var_ty = try sema.resolveType(block, ty_src, inst_data.operand);
if (block.isComptime()) {
return sema.analyzeComptimeAlloc(block, init_src, var_ty, .none);
return sema.analyzeComptimeAlloc(block, var_src, var_ty, .none);
}
if (sema.func_is_naked and try var_ty.hasRuntimeBitsSema(pt)) {
const var_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
return sema.fail(block, var_src, "local variable in naked function", .{});
const store_src = block.src(.{ .node_offset_store_ptr = inst_data.src_node });
return sema.fail(block, store_src, "local variable in naked function", .{});
}
try sema.validateVarType(block, ty_src, var_ty, false);
const target = pt.zcu.getTarget();
@ -9711,7 +9711,7 @@ fn zirIntFromPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
return block.addBitCast(dest_ty, operand);
}
fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
fn zirFieldPtrLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -9727,8 +9727,8 @@ fn zirFieldVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
sema.code.nullTerminatedString(extra.field_name_start),
.no_embedded_nulls,
);
const object = try sema.resolveInst(extra.lhs);
return sema.fieldVal(block, src, object, field_name, field_name_src);
const object_ptr = try sema.resolveInst(extra.lhs);
return fieldPtrLoad(sema, block, src, object_ptr, field_name, field_name_src);
}
fn zirFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -9779,7 +9779,7 @@ fn zirStructInitFieldPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
}
}
fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
fn zirFieldPtrNamedLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -9787,9 +9787,9 @@ fn zirFieldValNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErr
const src = block.nodeOffset(inst_data.src_node);
const field_name_src = block.builtinCallArgSrc(inst_data.src_node, 1);
const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data;
const object = try sema.resolveInst(extra.lhs);
const object_ptr = try sema.resolveInst(extra.lhs);
const field_name = try sema.resolveConstStringIntern(block, field_name_src, extra.field_name, .{ .simple = .field_name });
return sema.fieldVal(block, src, object, field_name, field_name_src);
return fieldPtrLoad(sema, block, src, object_ptr, field_name, field_name_src);
}
fn zirFieldPtrNamed(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -10102,7 +10102,7 @@ fn zirElemVal(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
return sema.elemVal(block, src, array, elem_index, src, false);
}
fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
fn zirElemPtrLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
@ -10110,10 +10110,18 @@ fn zirElemValNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
const src = block.nodeOffset(inst_data.src_node);
const elem_index_src = block.src(.{ .node_offset_array_access_index = inst_data.src_node });
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
const array = try sema.resolveInst(extra.lhs);
const array_ptr = try sema.resolveInst(extra.lhs);
const uncoerced_elem_index = try sema.resolveInst(extra.rhs);
if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| {
const array_ptr_ty = sema.typeOf(array_ptr);
if (try sema.pointerDeref(block, src, array_ptr_val, array_ptr_ty)) |array_val| {
const array: Air.Inst.Ref = .fromValue(array_val);
return elemVal(sema, block, src, array, uncoerced_elem_index, elem_index_src, true);
}
}
const elem_index = try sema.coerce(block, .usize, uncoerced_elem_index, elem_index_src);
return sema.elemVal(block, src, array, elem_index, elem_index_src, true);
const elem_ptr = try elemPtr(sema, block, src, array_ptr, elem_index, elem_index_src, false, true);
return analyzeLoad(sema, block, src, elem_ptr, elem_index_src);
}
fn zirElemValImm(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -13612,7 +13620,6 @@ fn maybeErrorUnwrap(
.str,
.as_node,
.panic,
.field_val,
=> {},
else => return false,
}
@ -13631,7 +13638,6 @@ fn maybeErrorUnwrap(
},
.str => try sema.zirStr(inst),
.as_node => try sema.zirAsNode(block, inst),
.field_val => try sema.zirFieldVal(block, inst),
.@"unreachable" => {
try safetyPanicUnwrapError(sema, block, operand_src, operand);
return true;
@ -15996,7 +16002,6 @@ fn splat(sema: *Sema, ty: Type, val: Value) !Value {
fn analyzeArithmetic(
sema: *Sema,
block: *Block,
/// TODO performance investigation: make this comptime?
zir_tag: Zir.Inst.Tag,
lhs: Air.Inst.Ref,
rhs: Air.Inst.Ref,
@ -16195,6 +16200,11 @@ fn analyzePtrArithmetic(
const ptr_info = ptr_ty.ptrInfo(zcu);
assert(ptr_info.flags.size == .many or ptr_info.flags.size == .c);
if ((try sema.typeHasOnePossibleValue(.fromInterned(ptr_info.child))) != null) {
// Offset will be multiplied by zero, so result is the same as the base pointer.
return ptr;
}
const new_ptr_ty = t: {
// Calculate the new pointer alignment.
// This code is duplicated in `Type.elemPtrType`.
@ -26673,6 +26683,33 @@ fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
}
}
fn fieldPtrLoad(
sema: *Sema,
block: *Block,
src: LazySrcLoc,
object_ptr: Air.Inst.Ref,
field_name: InternPool.NullTerminatedString,
field_name_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const object_ptr_ty = sema.typeOf(object_ptr);
const pointee_ty = object_ptr_ty.childType(zcu);
if (try typeHasOnePossibleValue(sema, pointee_ty)) |opv| {
const object: Air.Inst.Ref = .fromValue(opv);
return fieldVal(sema, block, src, object, field_name, field_name_src);
}
if (try sema.resolveDefinedValue(block, src, object_ptr)) |object_ptr_val| {
if (try sema.pointerDeref(block, src, object_ptr_val, object_ptr_ty)) |object_val| {
const object: Air.Inst.Ref = .fromValue(object_val);
return fieldVal(sema, block, src, object, field_name, field_name_src);
}
}
const field_ptr = try sema.fieldPtr(block, src, object_ptr, field_name, field_name_src, false);
return analyzeLoad(sema, block, src, field_ptr, field_name_src);
}
fn fieldVal(
sema: *Sema,
block: *Block,
@ -26892,7 +26929,7 @@ fn fieldPtr(
const ptr_info = object_ty.ptrInfo(zcu);
const new_ptr_ty = try pt.ptrTypeSema(.{
.child = Type.fromInterned(ptr_info.child).childType(zcu).toIntern(),
.sentinel = if (object_ty.sentinel(zcu)) |s| s.toIntern() else .none,
.sentinel = if (inner_ty.sentinel(zcu)) |s| s.toIntern() else .none,
.flags = .{
.size = .many,
.alignment = ptr_info.flags.alignment,
@ -27420,15 +27457,9 @@ fn structFieldPtrByIndex(
if (struct_type.layout == .@"packed") {
assert(!field_is_comptime);
switch (struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt)) {
.bit_ptr => |packed_offset| {
ptr_ty_data.flags.alignment = parent_align;
ptr_ty_data.packed_offset = packed_offset;
},
.byte_ptr => |ptr_info| {
ptr_ty_data.flags.alignment = ptr_info.alignment;
},
}
const packed_offset = struct_ty.packedStructFieldPtrInfo(struct_ptr_ty, field_index, pt);
ptr_ty_data.flags.alignment = parent_align;
ptr_ty_data.packed_offset = packed_offset;
} else if (struct_type.layout == .@"extern") {
assert(!field_is_comptime);
// For extern structs, field alignment might be bigger than type's
@ -27972,6 +28003,7 @@ fn elemVal(
}
}
/// Called when the index or indexable is runtime known.
fn validateRuntimeElemAccess(
sema: *Sema,
block: *Block,
@ -28236,6 +28268,10 @@ fn elemPtrArray(
try sema.validateRuntimeValue(block, array_ptr_src, array_ptr);
}
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
}
// Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) {
const len_inst = try pt.intRef(.usize, array_len);
@ -30634,6 +30670,19 @@ fn checkPtrAttributes(sema: *Sema, dest_ty: Type, inst_ty: Type, in_memory_resul
} };
return false;
}
if (inst_info.packed_offset.host_size != dest_info.packed_offset.host_size or
inst_info.packed_offset.bit_offset != dest_info.packed_offset.bit_offset)
{
in_memory_result.* = .{ .ptr_bit_range = .{
.actual_host = inst_info.packed_offset.host_size,
.wanted_host = dest_info.packed_offset.host_size,
.actual_offset = inst_info.packed_offset.bit_offset,
.wanted_offset = dest_info.packed_offset.bit_offset,
} };
return false;
}
return true;
}
@ -31425,19 +31474,6 @@ fn analyzeLoad(
}
}
if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
const ptr_inst = ptr.toIndex().?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
return block.addBinOp(.ptr_elem_val, bin_op.lhs, bin_op.rhs);
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{f}'", .{
ptr_ty.fmt(pt),
});
}
return block.addTyOp(.load, elem_ty, ptr);
}
@ -34954,7 +34990,7 @@ fn resolveInferredErrorSet(
const resolved_ty = func.resolvedErrorSetUnordered(ip);
if (resolved_ty != .none) return resolved_ty;
if (zcu.analysis_in_progress.contains(AnalUnit.wrap(.{ .func = func_index }))) {
if (zcu.analysis_in_progress.contains(.wrap(.{ .func = func_index }))) {
return sema.fail(block, src, "unable to resolve inferred error set", .{});
}

View file

@ -3514,22 +3514,17 @@ pub fn arrayBase(ty: Type, zcu: *const Zcu) struct { Type, u64 } {
return .{ cur_ty, cur_len };
}
pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx: u32, pt: Zcu.PerThread) union(enum) {
/// The result is a bit-pointer with the same value and a new packed offset.
bit_ptr: InternPool.Key.PtrType.PackedOffset,
/// The result is a standard pointer.
byte_ptr: struct {
/// The byte offset of the field pointer from the parent pointer value.
offset: u64,
/// The alignment of the field pointer type.
alignment: InternPool.Alignment,
},
} {
/// Returns a bit-pointer with the same value and a new packed offset.
pub fn packedStructFieldPtrInfo(
struct_ty: Type,
parent_ptr_ty: Type,
field_idx: u32,
pt: Zcu.PerThread,
) InternPool.Key.PtrType.PackedOffset {
comptime assert(Type.packed_struct_layout_version == 2);
const zcu = pt.zcu;
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
const field_ty = struct_ty.fieldType(field_idx, zcu);
var bit_offset: u16 = 0;
var running_bits: u16 = 0;
@ -3552,28 +3547,10 @@ pub fn packedStructFieldPtrInfo(struct_ty: Type, parent_ptr_ty: Type, field_idx:
bit_offset,
};
// If the field happens to be byte-aligned, simplify the pointer type.
// We can only do this if the pointee's bit size matches its ABI byte size,
// so that loads and stores do not interfere with surrounding packed bits.
//
// TODO: we do not attempt this with big-endian targets yet because of nested
// structs and floats. I need to double-check the desired behavior for big endian
// targets before adding the necessary complications to this code. This will not
// cause miscompilations; it only means the field pointer uses bit masking when it
// might not be strictly necessary.
if (res_bit_offset % 8 == 0 and field_ty.bitSize(zcu) == field_ty.abiSize(zcu) * 8 and zcu.getTarget().cpu.arch.endian() == .little) {
const byte_offset = res_bit_offset / 8;
const new_align = Alignment.fromLog2Units(@ctz(byte_offset | parent_ptr_ty.ptrAlignment(zcu).toByteUnits().?));
return .{ .byte_ptr = .{
.offset = byte_offset,
.alignment = new_align,
} };
}
return .{ .bit_ptr = .{
return .{
.host_size = res_host_size,
.bit_offset = res_bit_offset,
} };
};
}
pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {

View file

@ -2149,15 +2149,18 @@ pub fn makeBool(x: bool) Value {
return if (x) .true else .false;
}
/// `parent_ptr` must be a single-pointer to some optional.
/// `parent_ptr` must be a single-pointer or C pointer to some optional.
///
/// Returns a pointer to the payload of the optional.
///
/// May perform type resolution.
pub fn ptrOptPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
const parent_ptr_ty = parent_ptr.typeOf(zcu);
const opt_ty = parent_ptr_ty.childType(zcu);
const ptr_size = parent_ptr_ty.ptrSize(zcu);
assert(parent_ptr_ty.ptrSize(zcu) == .one);
assert(ptr_size == .one or ptr_size == .c);
assert(opt_ty.zigTypeTag(zcu) == .optional);
const result_ty = try pt.ptrTypeSema(info: {
@ -2212,9 +2215,12 @@ pub fn ptrEuPayload(parent_ptr: Value, pt: Zcu.PerThread) !Value {
} }));
}
/// `parent_ptr` must be a single-pointer to a struct, union, or slice.
/// `parent_ptr` must be a single-pointer or c pointer to a struct, union, or slice.
///
/// Returns a pointer to the aggregate field at the specified index.
///
/// For slices, uses `slice_ptr_index` and `slice_len_index`.
///
/// May perform type resolution.
pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
const zcu = pt.zcu;
@ -2222,7 +2228,7 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
const aggregate_ty = parent_ptr_ty.childType(zcu);
const parent_ptr_info = parent_ptr_ty.ptrInfo(zcu);
assert(parent_ptr_info.flags.size == .one);
assert(parent_ptr_info.flags.size == .one or parent_ptr_info.flags.size == .c);
// Exiting this `switch` indicates that the `field` pointer representation should be used.
// `field_align` may be `.none` to represent the natural alignment of `field_ty`, but is not necessarily.
@ -2249,32 +2255,18 @@ pub fn ptrField(parent_ptr: Value, field_idx: u32, pt: Zcu.PerThread) !Value {
});
return parent_ptr.getOffsetPtr(byte_off, result_ty, pt);
},
.@"packed" => switch (aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt)) {
.bit_ptr => |packed_offset| {
const result_ty = try pt.ptrType(info: {
var new = parent_ptr_info;
new.packed_offset = packed_offset;
new.child = field_ty.toIntern();
if (new.flags.alignment == .none) {
new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
}
break :info new;
});
return pt.getCoerced(parent_ptr, result_ty);
},
.byte_ptr => |ptr_info| {
const result_ty = try pt.ptrTypeSema(info: {
var new = parent_ptr_info;
new.child = field_ty.toIntern();
new.packed_offset = .{
.host_size = 0,
.bit_offset = 0,
};
new.flags.alignment = ptr_info.alignment;
break :info new;
});
return parent_ptr.getOffsetPtr(ptr_info.offset, result_ty, pt);
},
.@"packed" => {
const packed_offset = aggregate_ty.packedStructFieldPtrInfo(parent_ptr_ty, field_idx, pt);
const result_ty = try pt.ptrType(info: {
var new = parent_ptr_info;
new.packed_offset = packed_offset;
new.child = field_ty.toIntern();
if (new.flags.alignment == .none) {
new.flags.alignment = try aggregate_ty.abiAlignmentSema(pt);
}
break :info new;
});
return pt.getCoerced(parent_ptr, result_ty);
},
}
},

View file

@ -700,7 +700,7 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
const unit: AnalUnit = .wrap(.{ .memoized_state = stage });
try zcu.analysis_in_progress.put(gpa, unit, {});
try zcu.analysis_in_progress.putNoClobber(gpa, unit, {});
defer assert(zcu.analysis_in_progress.swapRemove(unit));
// Before we begin, collect:
@ -864,7 +864,7 @@ fn analyzeComptimeUnit(pt: Zcu.PerThread, cu_id: InternPool.ComptimeUnit.Id) Zcu
const file = zcu.fileByIndex(inst_resolved.file);
const zir = file.zir.?;
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
@ -958,6 +958,8 @@ pub fn ensureNavValUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu
log.debug("ensureNavValUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
assert(!zcu.analysis_in_progress.contains(anal_unit));
// Determine whether or not this `Nav`'s value is outdated. This also includes checking if the
// status is `.unresolved`, which indicates that the value is outdated because it has *never*
// been analyzed so far.
@ -1090,10 +1092,19 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
const inst_resolved = old_nav.analysis.?.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
const file = zcu.fileByIndex(inst_resolved.file);
const zir = file.zir.?;
const zir_decl = zir.getDeclaration(inst_resolved.inst);
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
// If there's no type body, we are also resolving the type here.
if (zir_decl.type_body == null) {
try zcu.analysis_in_progress.putNoClobber(gpa, .wrap(.{ .nav_ty = nav_id }), {});
}
errdefer if (zir_decl.type_body == null) {
_ = zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id }));
};
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
defer analysis_arena.deinit();
@ -1133,8 +1144,6 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
};
defer block.instructions.deinit(gpa);
const zir_decl = zir.getDeclaration(inst_resolved.inst);
const ty_src = block.src(.{ .node_offset_var_decl_ty = .zero });
const init_src = block.src(.{ .node_offset_var_decl_init = .zero });
const align_src = block.src(.{ .node_offset_var_decl_align = .zero });
@ -1305,6 +1314,9 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
// Mark the unit as completed before evaluating the export!
assert(zcu.analysis_in_progress.swapRemove(anal_unit));
if (zir_decl.type_body == null) {
assert(zcu.analysis_in_progress.swapRemove(.wrap(.{ .nav_ty = nav_id })));
}
if (zir_decl.linkage == .@"export") {
const export_src = block.src(.{ .token_offset = @enumFromInt(@intFromBool(zir_decl.is_pub)) });
@ -1347,6 +1359,8 @@ pub fn ensureNavTypeUpToDate(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zc
log.debug("ensureNavTypeUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
assert(!zcu.analysis_in_progress.contains(anal_unit));
const type_resolved_by_value: bool = from_val: {
const analysis = nav.analysis orelse break :from_val false;
const inst_resolved = analysis.zir_index.resolveFull(ip) orelse break :from_val false;
@ -1463,8 +1477,8 @@ fn analyzeNavType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileEr
const file = zcu.fileByIndex(inst_resolved.file);
const zir = file.zir.?;
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
defer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
defer assert(zcu.analysis_in_progress.swapRemove(anal_unit));
const zir_decl = zir.getDeclaration(inst_resolved.inst);
const type_body = zir_decl.type_body.?;
@ -1587,6 +1601,8 @@ pub fn ensureFuncBodyUpToDate(pt: Zcu.PerThread, func_index: InternPool.Index) Z
log.debug("ensureFuncBodyUpToDate {f}", .{zcu.fmtAnalUnit(anal_unit)});
assert(!zcu.analysis_in_progress.contains(anal_unit));
const func = zcu.funcInfo(func_index);
assert(func.ty == func.uncoerced_ty); // analyze the body of the original function, not a coerced one
@ -2781,7 +2797,7 @@ fn analyzeFnBodyInner(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaE
const file = zcu.fileByIndex(inst_info.file);
const zir = file.zir.?;
try zcu.analysis_in_progress.put(gpa, anal_unit, {});
try zcu.analysis_in_progress.putNoClobber(gpa, anal_unit, {});
errdefer _ = zcu.analysis_in_progress.swapRemove(anal_unit);
func.setAnalyzed(ip);

View file

@ -2291,7 +2291,7 @@ fn genBodyBlock(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
}
fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
@setEvalBranchQuota(29_400);
@setEvalBranchQuota(29_500);
const pt = cg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -86774,52 +86774,313 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const is_non_err = try cg.tempInit(.bool, .{ .eflags = .e });
try is_non_err.finish(inst, &.{un_op}, &ops, cg);
},
.load => fallback: {
.load => {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
const val_ty = ty_op.ty.toType();
const ptr_ty = cg.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo(zcu);
if (ptr_info.packed_offset.host_size > 0 and
(ptr_info.flags.vector_index == .none or val_ty.toIntern() == .bool_type))
break :fallback try cg.airLoad(inst);
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
const res = try ops[0].load(val_ty, .{
.disp = switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intCast(val_ty.abiSize(zcu) * @intFromEnum(vector_index)),
var res: [1]Temp = undefined;
cg.select(&res, &.{val_ty}, &ops, comptime &.{ .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .byte }, .any, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .none, .none } },
},
}, cg);
try res.finish(inst, &.{ty_op.operand}, &ops, cg);
.extra_temps = .{
.{ .type = .u8, .kind = .{ .mut_rc = .{ .ref = .src0, .rc = .general_purpose } } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.dst_temps = .{ .{ .cc = .c }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0b), ._, ._ },
.{ ._, ._, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .word }, .any, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .none, .none } },
},
.dst_temps = .{ .{ .cc = .c }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .bt, .lea(.src0w), .ua(.src0, .add_vector_index), ._, ._ },
} },
}, .{
.src_constraints = .{ .ptr_any_bool_vec_elem, .any, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .none, .none } },
},
.dst_temps = .{ .{ .cc = .c }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .bt, .leaa(.src0d, .add_vector_index_div_8_down_4), .ua(.src0, .add_vector_index_rem_32), ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => res[0] = try ops[0].load(val_ty, .{
.disp = switch (cg.typeOf(ty_op.operand).ptrInfo(zcu).flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intCast(val_ty.abiSize(zcu) * @intFromEnum(vector_index)),
},
}, cg),
else => |e| return e,
};
try res[0].finish(inst, &.{ty_op.operand}, &ops, cg);
},
.ret => try cg.airRet(inst, false),
.ret_safe => try cg.airRet(inst, true),
.ret_load => try cg.airRetLoad(inst),
.store, .store_safe => |air_tag| fallback: {
.store, .store_safe => |air_tag| {
const bin_op = air_datas[@intFromEnum(inst)].bin_op;
const ptr_ty = cg.typeOf(bin_op.lhs);
const ptr_info = ptr_ty.ptrInfo(zcu);
const val_ty = cg.typeOf(bin_op.rhs);
if (ptr_info.packed_offset.host_size > 0 and
(ptr_info.flags.vector_index == .none or val_ty.toIntern() == .bool_type))
break :fallback try cg.airStore(inst, switch (air_tag) {
else => unreachable,
.store => false,
.store_safe => true,
});
var ops = try cg.tempsFromOperands(inst, .{ bin_op.lhs, bin_op.rhs });
try ops[0].store(&ops[1], .{
.disp = switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intCast(val_ty.abiSize(zcu) * @intFromEnum(vector_index)),
cg.select(&.{}, &.{}, &ops, comptime &.{ .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .byte }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 0 }, .none } },
},
.safe = switch (air_tag) {
else => unreachable,
.store => false,
.store_safe => true,
.extra_temps = .{
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
}, cg);
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0b), ._, ._ },
.{ ._, ._r, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._, .mov, .lea(.src0b), .tmp0b, ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .byte }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 1 }, .none } },
},
.extra_temps = .{
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0b), ._, ._ },
.{ ._, ._s, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._, .mov, .lea(.src0b), .tmp0b, ._, ._ },
} },
}, .{
.required_features = .{ .cmov, null, null, null },
.src_constraints = .{ .{ .ptr_bool_vec_elem = .byte }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.extra_temps = .{
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0b), ._, ._ },
.{ ._, ._, .mov, .tmp1d, .tmp0d, ._, ._ },
.{ ._, ._r, .bt, .tmp1d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._s, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._z, .cmov, .tmp0d, .tmp1d, ._, ._ },
.{ ._, ._, .mov, .lea(.src0b), .tmp0b, ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .byte }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.extra_temps = .{
.{ .type = .u8, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0b), ._, ._ },
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._nz, .j, .@"0f", ._, ._, ._ },
.{ ._, ._r, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._mp, .j, .@"1f", ._, ._, ._ },
.{ .@"0:", ._s, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ .@"1:", ._, .mov, .lea(.src0b), .tmp0b, ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .word }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 0 }, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._r, .bt, .lea(.src0w), .ua(.src0, .add_vector_index), ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .word }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 1 }, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._s, .bt, .lea(.src0w), .ua(.src0, .add_vector_index), ._, ._ },
} },
}, .{
.required_features = .{ .cmov, null, null, null },
.src_constraints = .{ .{ .ptr_bool_vec_elem = .word }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.extra_temps = .{
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
.{ .type = .u16, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .movzx, .tmp0d, .lea(.src0w), ._, ._ },
.{ ._, ._, .mov, .tmp1d, .tmp0d, ._, ._ },
.{ ._, ._r, .bt, .tmp1d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._s, .bt, .tmp0d, .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._z, .cmov, .tmp0d, .tmp1d, ._, ._ },
.{ ._, ._, .mov, .lea(.src0w), .tmp0w, ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .ptr_bool_vec_elem = .word }, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._r, .bt, .lea(.src0w), .ua(.src0, .add_vector_index), ._, ._ },
.{ ._, ._mp, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._s, .bt, .lea(.src0w), .ua(.src0, .add_vector_index), ._, ._ },
} },
}, .{
.src_constraints = .{ .ptr_any_bool_vec_elem, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 0 }, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._r, .bt, .leaa(.src0d, .add_vector_index_div_8_down_4), .ua(.src0, .add_vector_index_rem_32), ._, ._ },
} },
}, .{
.src_constraints = .{ .ptr_any_bool_vec_elem, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .{ .imm = 1 }, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._s, .bt, .leaa(.src0d, .add_vector_index_div_8_down_4), .ua(.src0, .add_vector_index_rem_32), ._, ._ },
} },
}, .{
.required_features = .{ .cmov, null, null, null },
.src_constraints = .{ .ptr_any_bool_vec_elem, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.extra_temps = .{
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
.{ .type = .u32, .kind = .{ .rc = .general_purpose } },
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
.unused,
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .mov, .tmp0d, .leaa(.src0d, .add_vector_index_div_8_down_4), ._, ._ },
.{ ._, ._, .mov, .tmp1d, .tmp0d, ._, ._ },
.{ ._, ._r, .bt, .tmp1d, .ua(.src0, .add_vector_index_rem_32), ._, ._ },
.{ ._, ._s, .bt, .tmp0d, .ua(.src0, .add_vector_index_rem_32), ._, ._ },
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._z, .cmov, .tmp0d, .tmp1d, ._, ._ },
.{ ._, ._, .mov, .leaa(.src0d, .add_vector_index_div_8_down_4), .tmp0d, ._, ._ },
} },
}, .{
.src_constraints = .{ .ptr_any_bool_vec_elem, .bool, .any },
.patterns = &.{
.{ .src = .{ .to_gpr, .to_gpr, .none } },
},
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .@"test", .src1b, .si(1), ._, ._ },
.{ ._, ._nz, .j, .@"1f", ._, ._, ._ },
.{ ._, ._r, .bt, .leaa(.src0d, .add_vector_index_div_8_down_4), .ua(.src0, .add_vector_index_rem_32), ._, ._ },
.{ ._, ._mp, .j, .@"0f", ._, ._, ._ },
.{ .@"1:", ._s, .bt, .leaa(.src0d, .add_vector_index_div_8_down_4), .ua(.src0, .add_vector_index_rem_32), ._, ._ },
} },
} }) catch |err| switch (err) {
error.SelectFailed => try ops[0].store(&ops[1], .{
.disp = switch (cg.typeOf(bin_op.lhs).ptrInfo(zcu).flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intCast(cg.typeOf(bin_op.rhs).abiSize(zcu) * @intFromEnum(vector_index)),
},
.safe = switch (air_tag) {
else => unreachable,
.store => false,
.store_safe => true,
},
}, cg),
else => |e| return e,
};
for (ops) |op| try op.die(cg);
},
.unreach => {},
@ -100863,7 +101124,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.dst_temps = .{ .{ .cc = .c }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .bt, .src0d, .ua(.none, .add_src1_rem_32), ._, ._ },
.{ ._, ._, .bt, .src0d, .ua(.none, .add_src1), ._, ._ },
} },
}, .{
.src_constraints = .{ .{ .bool_vec = .dword }, .any, .any },
@ -100884,7 +101145,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.dst_temps = .{ .{ .cc = .c }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .bt, .src0q, .ua(.none, .add_src1_rem_64), ._, ._ },
.{ ._, ._, .bt, .src0q, .ua(.none, .add_src1), ._, ._ },
} },
}, .{
.required_features = .{ .@"64bit", null, null, null },
@ -174481,114 +174742,6 @@ fn reuseOperandAdvanced(
return true;
}
fn packedLoad(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(zcu);
const val_ty: Type = .fromInterned(ptr_info.child);
if (!val_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
const val_abi_size: u32 = @intCast(val_ty.abiSize(zcu));
const val_bit_size: u32 = @intCast(val_ty.bitSize(zcu));
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intFromEnum(vector_index) * val_bit_size,
};
if (ptr_bit_off % 8 == 0) {
{
const mat_ptr_mcv: MCValue = switch (ptr_mcv) {
.immediate, .register, .register_offset, .lea_frame => ptr_mcv,
else => .{ .register = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
};
const mat_ptr_lock = switch (mat_ptr_mcv) {
.register => |mat_ptr_reg| self.register_manager.lockReg(mat_ptr_reg),
else => null,
};
defer if (mat_ptr_lock) |lock| self.register_manager.unlockReg(lock);
try self.load(dst_mcv, ptr_ty, mat_ptr_mcv.offset(@intCast(@divExact(ptr_bit_off, 8))));
}
if (val_abi_size * 8 > val_bit_size) {
if (dst_mcv.isRegister()) {
try self.truncateRegister(val_ty, dst_mcv.getReg().?);
} else {
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
const hi_mcv = dst_mcv.address().offset(@intCast(val_bit_size / 64 * 8)).deref();
try self.genSetReg(tmp_reg, .usize, hi_mcv, .{});
try self.truncateRegister(val_ty, tmp_reg);
try self.genCopy(.usize, hi_mcv, .{ .register = tmp_reg }, .{});
}
}
return;
}
if (val_abi_size > 8) return self.fail("TODO implement packed load of {f}", .{val_ty.fmt(pt)});
const limb_abi_size: u31 = @min(val_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const val_byte_off: i32 = @intCast(ptr_bit_off / limb_abi_bits * limb_abi_size);
const val_bit_off = ptr_bit_off % limb_abi_bits;
const val_extra_bits = self.regExtraBits(val_ty);
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
const ptr_lock = self.register_manager.lockRegAssumeUnused(ptr_reg);
defer self.register_manager.unlockReg(ptr_lock);
const dst_reg = switch (dst_mcv) {
.register => |reg| reg,
else => try self.register_manager.allocReg(null, abi.RegisterClass.gp),
};
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const load_abi_size =
if (val_bit_off < val_extra_bits) val_abi_size else val_abi_size * 2;
if (load_abi_size <= 8) {
const load_reg = registerAlias(dst_reg, load_abi_size);
try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{
.base = .{ .reg = ptr_reg },
.mod = .{ .rm = .{
.size = .fromSize(load_abi_size),
.disp = val_byte_off,
} },
});
try self.spillEflagsIfOccupied();
try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, .u(val_bit_off));
} else {
const tmp_reg =
registerAlias(try self.register_manager.allocReg(null, abi.RegisterClass.gp), val_abi_size);
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
const dst_alias = registerAlias(dst_reg, val_abi_size);
try self.asmRegisterMemory(.{ ._, .mov }, dst_alias, .{
.base = .{ .reg = ptr_reg },
.mod = .{ .rm = .{
.size = .fromSize(val_abi_size),
.disp = val_byte_off,
} },
});
try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{
.base = .{ .reg = ptr_reg },
.mod = .{ .rm = .{
.size = .fromSize(val_abi_size),
.disp = val_byte_off + limb_abi_size,
} },
});
try self.spillEflagsIfOccupied();
try self.asmRegisterRegisterImmediate(.{ ._rd, .sh }, dst_alias, tmp_reg, .u(val_bit_off));
}
if (val_extra_bits > 0) try self.truncateRegister(val_ty, dst_reg);
try self.genCopy(val_ty, dst_mcv, .{ .register = dst_reg }, .{});
}
fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
@ -174636,174 +174789,6 @@ fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerE
}
}
fn airLoad(self: *CodeGen, inst: Air.Inst.Index) !void {
const pt = self.pt;
const zcu = pt.zcu;
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const elem_ty = self.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasRuntimeBitsIgnoreComptime(zcu)) break :result .none;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.typeOf(ty_op.operand);
const elem_size = elem_ty.abiSize(zcu);
const elem_rs = self.regSetForType(elem_ty);
const ptr_rs = self.regSetForType(ptr_ty);
const ptr_mcv = try self.resolveInst(ty_op.operand);
const dst_mcv = if (elem_size <= 8 and std.math.isPowerOfTwo(elem_size) and
elem_rs.supersetOf(ptr_rs) and self.reuseOperand(inst, ty_op.operand, 0, ptr_mcv))
// The MCValue that holds the pointer can be re-used as the value.
ptr_mcv
else
try self.allocRegOrMem(inst, true);
const ptr_info = ptr_ty.ptrInfo(zcu);
if (ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0) {
try self.packedLoad(dst_mcv, ptr_ty, ptr_mcv);
} else {
try self.load(dst_mcv, ptr_ty, ptr_mcv);
}
if (elem_ty.isAbiInt(zcu) and elem_size * 8 > elem_ty.bitSize(zcu)) {
const high_mcv: MCValue = switch (dst_mcv) {
.register => |dst_reg| .{ .register = dst_reg },
.register_pair => |dst_regs| .{ .register = dst_regs[1] },
else => dst_mcv.address().offset(@intCast((elem_size - 1) / 8 * 8)).deref(),
};
const high_reg = if (high_mcv.isRegister())
high_mcv.getReg().?
else
try self.copyToTmpRegister(.usize, high_mcv);
const high_lock = self.register_manager.lockReg(high_reg);
defer if (high_lock) |lock| self.register_manager.unlockReg(lock);
try self.truncateRegister(elem_ty, high_reg);
if (!high_mcv.isRegister()) try self.genCopy(
if (elem_size <= 8) elem_ty else .usize,
high_mcv,
.{ .register = high_reg },
.{},
);
}
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn packedStore(self: *CodeGen, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const pt = self.pt;
const zcu = pt.zcu;
const ptr_info = ptr_ty.ptrInfo(zcu);
const src_ty: Type = .fromInterned(ptr_info.child);
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) return;
const limb_abi_size: u16 = @min(ptr_info.packed_offset.host_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const limb_ty = try pt.intType(.unsigned, limb_abi_bits);
const src_bit_size = src_ty.bitSize(zcu);
const ptr_bit_off = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0,
.runtime => unreachable,
else => |vector_index| @intFromEnum(vector_index) * src_bit_size,
};
const src_byte_off: i32 = @intCast(ptr_bit_off / limb_abi_bits * limb_abi_size);
const src_bit_off = ptr_bit_off % limb_abi_bits;
const ptr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
const ptr_lock = self.register_manager.lockRegAssumeUnused(ptr_reg);
defer self.register_manager.unlockReg(ptr_lock);
const mat_src_mcv: MCValue = mat_src_mcv: switch (src_mcv) {
.register => if (src_bit_size > 64) {
const frame_index = try self.allocFrameIndex(.initSpill(src_ty, self.pt.zcu));
try self.genSetMem(.{ .frame = frame_index }, 0, src_ty, src_mcv, .{});
break :mat_src_mcv .{ .load_frame = .{ .index = frame_index } };
} else src_mcv,
else => src_mcv,
};
var limb_i: u16 = 0;
while (limb_i * limb_abi_bits < src_bit_off + src_bit_size) : (limb_i += 1) {
const part_bit_off = if (limb_i == 0) src_bit_off else 0;
const part_bit_size =
@min(src_bit_off + src_bit_size - limb_i * limb_abi_bits, limb_abi_bits) - part_bit_off;
const limb_mem: Memory = .{
.base = .{ .reg = ptr_reg },
.mod = .{ .rm = .{
.size = .fromSize(limb_abi_size),
.disp = src_byte_off + limb_i * limb_abi_size,
} },
};
const part_mask = (@as(u64, std.math.maxInt(u64)) >> @intCast(64 - part_bit_size)) <<
@intCast(part_bit_off);
const part_mask_not = part_mask ^ (@as(u64, std.math.maxInt(u64)) >> @intCast(64 - limb_abi_bits));
if (limb_abi_size <= 4) {
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, .u(part_mask_not));
} else if (std.math.cast(i32, @as(i64, @bitCast(part_mask_not)))) |small| {
try self.asmMemoryImmediate(.{ ._, .@"and" }, limb_mem, .s(small));
} else {
const part_mask_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
try self.asmRegisterImmediate(.{ ._, .mov }, part_mask_reg, .u(part_mask_not));
try self.asmMemoryRegister(.{ ._, .@"and" }, limb_mem, part_mask_reg);
}
if (src_bit_size <= 64) {
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const tmp_mcv = MCValue{ .register = tmp_reg };
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
try self.genSetReg(tmp_reg, limb_ty, mat_src_mcv, .{});
switch (limb_i) {
0 => try self.genShiftBinOpMir(
.{ ._l, .sh },
limb_ty,
tmp_mcv,
.u8,
.{ .immediate = src_bit_off },
),
1 => try self.genShiftBinOpMir(
.{ ._r, .sh },
limb_ty,
tmp_mcv,
.u8,
.{ .immediate = limb_abi_bits - src_bit_off },
),
else => unreachable,
}
try self.genBinOpMir(.{ ._, .@"and" }, limb_ty, tmp_mcv, .{ .immediate = part_mask });
try self.asmMemoryRegister(
.{ ._, .@"or" },
limb_mem,
registerAlias(tmp_reg, limb_abi_size),
);
} else if (src_bit_size <= 128 and src_bit_off == 0) {
const tmp_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const tmp_mcv = MCValue{ .register = tmp_reg };
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
defer self.register_manager.unlockReg(tmp_lock);
try self.genSetReg(tmp_reg, limb_ty, switch (limb_i) {
0 => mat_src_mcv,
else => mat_src_mcv.address().offset(limb_i * limb_abi_size).deref(),
}, .{});
try self.genBinOpMir(.{ ._, .@"and" }, limb_ty, tmp_mcv, .{ .immediate = part_mask });
try self.asmMemoryRegister(
.{ ._, .@"or" },
limb_mem,
registerAlias(tmp_reg, limb_abi_size),
);
} else return self.fail("TODO: implement packed store of {f}", .{src_ty.fmt(pt)});
}
}
fn store(
self: *CodeGen,
ptr_ty: Type,
@ -174857,35 +174842,6 @@ fn store(
}
}
fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
const pt = self.pt;
const zcu = pt.zcu;
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
result: {
if (!safety and (try self.resolveInst(bin_op.rhs)) == .undef) break :result;
try self.spillRegisters(&.{ .rdi, .rsi, .rcx });
const reg_locks = self.register_manager.lockRegsAssumeUnused(3, .{ .rdi, .rsi, .rcx });
defer for (reg_locks) |lock| self.register_manager.unlockReg(lock);
const ptr_ty = self.typeOf(bin_op.lhs);
const ptr_info = ptr_ty.ptrInfo(zcu);
const is_packed = ptr_info.flags.vector_index != .none or ptr_info.packed_offset.host_size > 0;
if (is_packed) try self.spillEflagsIfOccupied();
const src_mcv = try self.resolveInst(bin_op.rhs);
const ptr_mcv = try self.resolveInst(bin_op.lhs);
if (is_packed) {
try self.packedStore(ptr_ty, ptr_mcv, src_mcv);
} else {
try self.store(ptr_ty, ptr_mcv, src_mcv, .{ .safety = safety });
}
}
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn genUnOp(self: *CodeGen, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
@ -187019,15 +186975,21 @@ const Temp = struct {
},
.struct_type => {
assert(src_regs.len - part_index == std.math.divCeil(u32, src_abi_size, 8) catch unreachable);
break :part_ty .u64;
break :part_ty switch (src_abi_size) {
0, 3, 5...7 => unreachable,
1 => .u8,
2 => .u16,
4 => .u32,
else => .u64,
};
},
.tuple_type => |tuple_type| {
assert(tuple_type.types.len == src_regs.len);
break :part_ty .fromInterned(tuple_type.types.get(ip)[part_index]);
},
};
const part_size: u31 = @intCast(part_ty.abiSize(zcu));
const src_rc = src_reg.class();
const part_bit_size = switch (src_rc) {
else => 8 * part_size,
.x87 => part_ty.bitSize(zcu),
};
if (src_rc == .x87 or std.math.isPowerOfTwo(part_size)) {
// hack around linker relocation bugs
switch (ptr.tracking(cg).short) {
@ -187036,7 +186998,15 @@ const Temp = struct {
}
const strat = try cg.moveStrategy(part_ty, src_rc, false);
try strat.write(cg, try ptr.tracking(cg).short.deref().mem(cg, .{
.size = .fromBitSize(part_bit_size),
.size = switch (src_rc) {
else => .fromBitSize(8 * part_size),
.x87 => switch (abi.classifySystemV(src_ty, zcu, cg.target, .other)[part_index]) {
else => unreachable,
.float => .dword,
.float_combine, .sse => .qword,
.x87 => .tbyte,
},
},
.disp = part_disp,
}), registerAlias(src_reg, part_size));
} else {
@ -192157,6 +192127,8 @@ const Select = struct {
exact_bool_vec: u16,
ptr_any_bool_vec,
ptr_bool_vec: Memory.Size,
ptr_any_bool_vec_elem,
ptr_bool_vec_elem: Memory.Size,
remainder_bool_vec: OfIsSizes,
exact_remainder_bool_vec: struct { of: Memory.Size, is: u16 },
signed_int_vec: Memory.Size,
@ -192259,6 +192231,22 @@ const Select = struct {
.vector_type => |vector_type| vector_type.child == .bool_type and size.bitSize(cg.target) >= vector_type.len,
else => false,
},
.ptr_any_bool_vec_elem => {
const ptr_info = ty.ptrInfo(zcu);
return switch (ptr_info.flags.vector_index) {
.none => false,
.runtime => unreachable,
else => ptr_info.child == .bool_type,
};
},
.ptr_bool_vec_elem => |size| {
const ptr_info = ty.ptrInfo(zcu);
return switch (ptr_info.flags.vector_index) {
.none => false,
.runtime => unreachable,
else => ptr_info.child == .bool_type and size.bitSize(cg.target) >= ptr_info.packed_offset.host_size,
};
},
.remainder_bool_vec => |of_is| ty.isVector(zcu) and ty.scalarType(zcu).toIntern() == .bool_type and
of_is.is.bitSize(cg.target) >= (ty.vectorLen(zcu) - 1) % of_is.of.bitSize(cg.target) + 1,
.exact_remainder_bool_vec => |of_is| ty.isVector(zcu) and ty.scalarType(zcu).toIntern() == .bool_type and
@ -193252,7 +193240,7 @@ const Select = struct {
ref: Ref,
scale: Memory.Scale = .@"1",
} = .{ .ref = .none },
unused: u3 = 0,
unused: u2 = 0,
},
imm: i32 = 0,
@ -193265,9 +193253,9 @@ const Select = struct {
lea,
mem,
};
const Adjust = packed struct(u10) {
const Adjust = packed struct(u11) {
sign: enum(u1) { neg, pos },
lhs: enum(u5) {
lhs: enum(u6) {
none,
ptr_size,
ptr_bit_size,
@ -193289,6 +193277,7 @@ const Select = struct {
src0_elem_size,
dst0_elem_size,
src0_elem_size_mul_src1,
vector_index,
src1,
src1_sub_bit_size,
log2_src0_elem_size,
@ -193359,9 +193348,13 @@ const Select = struct {
const sub_src0_elem_size: Adjust = .{ .sign = .neg, .lhs = .src0_elem_size, .op = .mul, .rhs = .@"1" };
const add_src0_elem_size_mul_src1: Adjust = .{ .sign = .pos, .lhs = .src0_elem_size_mul_src1, .op = .mul, .rhs = .@"1" };
const sub_src0_elem_size_mul_src1: Adjust = .{ .sign = .neg, .lhs = .src0_elem_size_mul_src1, .op = .mul, .rhs = .@"1" };
const add_vector_index: Adjust = .{ .sign = .pos, .lhs = .vector_index, .op = .mul, .rhs = .@"1" };
const add_vector_index_rem_32: Adjust = .{ .sign = .pos, .lhs = .vector_index, .op = .rem_8_mul, .rhs = .@"4" };
const add_vector_index_div_8_down_4: Adjust = .{ .sign = .pos, .lhs = .vector_index, .op = .div_8_down, .rhs = .@"4" };
const add_dst0_elem_size: Adjust = .{ .sign = .pos, .lhs = .dst0_elem_size, .op = .mul, .rhs = .@"1" };
const sub_dst0_elem_size: Adjust = .{ .sign = .neg, .lhs = .dst0_elem_size, .op = .mul, .rhs = .@"1" };
const add_src1_div_8_down_4: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .div_8_down, .rhs = .@"4" };
const add_src1: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .mul, .rhs = .@"1" };
const add_src1_rem_32: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .rem_8_mul, .rhs = .@"4" };
const add_src1_rem_64: Adjust = .{ .sign = .pos, .lhs = .src1, .op = .rem_8_mul, .rhs = .@"8" };
const add_src1_sub_bit_size: Adjust = .{ .sign = .pos, .lhs = .src1_sub_bit_size, .op = .mul, .rhs = .@"1" };
@ -194244,6 +194237,10 @@ const Select = struct {
.dst0_elem_size => @intCast(Select.Operand.Ref.dst0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu)),
.src0_elem_size_mul_src1 => @intCast(Select.Operand.Ref.src0.typeOf(s).elemType2(s.cg.pt.zcu).abiSize(s.cg.pt.zcu) *
Select.Operand.Ref.src1.valueOf(s).immediate),
.vector_index => switch (op.flags.base.ref.typeOf(s).ptrInfo(s.cg.pt.zcu).flags.vector_index) {
.none, .runtime => unreachable,
else => |vector_index| @intFromEnum(vector_index),
},
.src1 => @intCast(Select.Operand.Ref.src1.valueOf(s).immediate),
.src1_sub_bit_size => @as(SignedImm, @intCast(Select.Operand.Ref.src1.valueOf(s).immediate)) -
@as(SignedImm, @intCast(s.cg.nonBoolScalarBitSize(op.flags.base.ref.typeOf(s)))),

View file

@ -5821,29 +5821,21 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.unwrap_errunion_err_ptr => {
if (isel.live_values.fetchRemove(air.inst_index)) |error_ptr_vi| unused: {
defer error_ptr_vi.value.deref(isel);
if (isel.live_values.fetchRemove(air.inst_index)) |error_vi| {
defer error_vi.value.deref(isel);
const ty_op = air.data(air.inst_index).ty_op;
switch (codegen.errUnionErrorOffset(
isel.air.typeOf(ty_op.operand, ip).childType(zcu).errorUnionPayload(zcu),
zcu,
)) {
0 => try error_ptr_vi.value.move(isel, ty_op.operand),
else => |error_offset| {
const error_ptr_ra = try error_ptr_vi.value.defReg(isel) orelse break :unused;
const error_union_ptr_vi = try isel.use(ty_op.operand);
const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
const lo12: u12 = @truncate(error_offset >> 0);
const hi12: u12 = @intCast(error_offset >> 12);
if (hi12 > 0) try isel.emit(.add(
error_ptr_ra.x(),
if (lo12 > 0) error_ptr_ra.x() else error_union_ptr_mat.ra.x(),
.{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
));
if (lo12 > 0) try isel.emit(.add(error_ptr_ra.x(), error_union_ptr_mat.ra.x(), .{ .immediate = lo12 }));
try error_union_ptr_mat.finish(isel);
},
}
const error_union_ptr_ty = isel.air.typeOf(ty_op.operand, ip);
const error_union_ptr_info = error_union_ptr_ty.ptrInfo(zcu);
const error_union_ptr_vi = try isel.use(ty_op.operand);
const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
_ = try error_vi.value.load(isel, ty_op.ty.toType(), error_union_ptr_mat.ra, .{
.offset = codegen.errUnionErrorOffset(
ZigType.fromInterned(error_union_ptr_info.child).errorUnionPayload(zcu),
zcu,
),
.@"volatile" = error_union_ptr_info.flags.is_volatile,
});
try error_union_ptr_mat.finish(isel);
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
@ -6147,6 +6139,26 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.ptr_slice_len_ptr => {
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
defer dst_vi.value.deref(isel);
const ty_op = air.data(air.inst_index).ty_op;
const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
const src_vi = try isel.use(ty_op.operand);
const src_mat = try src_vi.matReg(isel);
try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = 8 }));
try src_mat.finish(isel);
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.ptr_slice_ptr_ptr => {
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
defer dst_vi.value.deref(isel);
const ty_op = air.data(air.inst_index).ty_op;
try dst_vi.value.move(isel, ty_op.operand);
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.array_elem_val => {
if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
defer elem_vi.value.deref(isel);
@ -8011,6 +8023,7 @@ pub fn layout(
while (save_index < saves.len) {
if (save_index + 2 <= saves.len and saves[save_index + 1].needs_restore and
saves[save_index + 0].class == saves[save_index + 1].class and
saves[save_index + 0].size == saves[save_index + 1].size and
saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
{
try isel.emit(.ldp(
@ -8317,7 +8330,7 @@ fn elemPtr(
}),
2 => {
const shift: u6 = @intCast(@ctz(elem_size));
const temp_ra = temp_ra: switch (op) {
const temp_ra, const free_temp_ra = temp_ra: switch (op) {
.add => switch (base_ra) {
else => {
const temp_ra = try isel.allocIntReg();
@ -8326,7 +8339,7 @@ fn elemPtr(
.register = temp_ra.x(),
.shift = .{ .lsl = shift },
} }));
break :temp_ra temp_ra;
break :temp_ra .{ temp_ra, true };
},
.zr => {
if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
@ -8334,7 +8347,7 @@ fn elemPtr(
.immr = -%shift,
.imms = ~shift,
}));
break :temp_ra elem_ptr_ra;
break :temp_ra .{ elem_ptr_ra, false };
},
},
.sub => {
@ -8344,10 +8357,10 @@ fn elemPtr(
.register = temp_ra.x(),
.shift = .{ .lsl = shift },
} }));
break :temp_ra temp_ra;
break :temp_ra .{ temp_ra, true };
},
};
defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
defer if (free_temp_ra) isel.freeReg(temp_ra);
try isel.emit(.add(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
.register = index_mat.ra.x(),
.shift = .{ .lsl = @intCast(63 - @clz(elem_size) - shift) },
@ -9276,7 +9289,14 @@ pub const Value = struct {
part_offset -= part_size;
var wrapped_res_part_it = res_vi.field(ty, part_offset, part_size);
const wrapped_res_part_vi = try wrapped_res_part_it.only(isel);
const wrapped_res_part_ra = try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
const wrapped_res_part_ra = wrapped_res_part_ra: {
const overflow_ra_lock: RegLock = switch (opts.overflow) {
.ra => |ra| isel.lockReg(ra),
else => .empty,
};
defer overflow_ra_lock.unlock(isel);
break :wrapped_res_part_ra try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
};
const unwrapped_res_part_ra = unwrapped_res_part_ra: {
if (!need_wrap) break :unwrapped_res_part_ra wrapped_res_part_ra;
if (int_info.bits % 32 == 0) {

View file

@ -4980,8 +4980,8 @@ pub const FuncGen = struct {
.breakpoint => try self.airBreakpoint(inst),
.ret_addr => try self.airRetAddr(inst),
.frame_addr => try self.airFrameAddress(inst),
.@"try" => try self.airTry(body[i..], false),
.try_cold => try self.airTry(body[i..], true),
.@"try" => try self.airTry(inst, false),
.try_cold => try self.airTry(inst, true),
.try_ptr => try self.airTryPtr(inst, false),
.try_ptr_cold => try self.airTryPtr(inst, true),
.intcast => try self.airIntCast(inst, false),
@ -4989,7 +4989,7 @@ pub const FuncGen = struct {
.trunc => try self.airTrunc(inst),
.fptrunc => try self.airFptrunc(inst),
.fpext => try self.airFpext(inst),
.load => try self.airLoad(body[i..]),
.load => try self.airLoad(inst),
.not => try self.airNot(inst),
.store => try self.airStore(inst, false),
.store_safe => try self.airStore(inst, true),
@ -5045,7 +5045,7 @@ pub const FuncGen = struct {
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
.struct_field_ptr => try self.airStructFieldPtr(inst),
.struct_field_val => try self.airStructFieldVal(body[i..]),
.struct_field_val => try self.airStructFieldVal(inst),
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
@ -5054,18 +5054,18 @@ pub const FuncGen = struct {
.field_parent_ptr => try self.airFieldParentPtr(inst),
.array_elem_val => try self.airArrayElemVal(body[i..]),
.slice_elem_val => try self.airSliceElemVal(body[i..]),
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
.slice_elem_ptr => try self.airSliceElemPtr(inst),
.ptr_elem_val => try self.airPtrElemVal(body[i..]),
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.optional_payload => try self.airOptionalPayload(body[i..]),
.optional_payload => try self.airOptionalPayload(inst),
.optional_payload_ptr => try self.airOptionalPayloadPtr(inst),
.optional_payload_ptr_set => try self.airOptionalPayloadPtrSet(inst),
.unwrap_errunion_payload => try self.airErrUnionPayload(body[i..], false),
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(body[i..], true),
.unwrap_errunion_payload => try self.airErrUnionPayload(inst, false),
.unwrap_errunion_payload_ptr => try self.airErrUnionPayload(inst, true),
.unwrap_errunion_err => try self.airErrUnionErr(inst, false),
.unwrap_errunion_err_ptr => try self.airErrUnionErr(inst, true),
.errunion_payload_ptr_set => try self.airErrUnionPayloadPtrSet(inst),
@ -6266,19 +6266,14 @@ pub const FuncGen = struct {
// No need to reset the insert cursor since this instruction is noreturn.
}
fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index, err_cold: bool) !Builder.Value {
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
fn airTry(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, can_elide_load, is_unused, err_cold);
return lowerTry(self, err_union, body, err_union_ty, false, false, is_unused, err_cold);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
@ -6824,11 +6819,10 @@ pub const FuncGen = struct {
return self.wip.gepStruct(slice_llvm_ty, slice_ptr, index, "");
}
fn airSliceElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airSliceElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const slice_ty = self.typeOf(bin_op.lhs);
const slice = try self.resolveInst(bin_op.lhs);
@ -6838,9 +6832,6 @@ pub const FuncGen = struct {
const base_ptr = try self.wip.extractValue(slice, &.{0}, "");
const ptr = try self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
if (isByRef(elem_ty, zcu)) {
if (self.canElideLoad(body_tail))
return ptr;
self.maybeMarkAllowZeroAccess(slice_ty.ptrInfo(zcu));
const slice_align = (slice_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
@ -6867,11 +6858,10 @@ pub const FuncGen = struct {
return self.wip.gep(.inbounds, llvm_elem_ty, base_ptr, &.{index}, "");
}
fn airArrayElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airArrayElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const array_ty = self.typeOf(bin_op.lhs);
@ -6884,9 +6874,7 @@ pub const FuncGen = struct {
try o.builder.intValue(try o.lowerType(pt, Type.usize), 0), rhs,
};
if (isByRef(elem_ty, zcu)) {
const elem_ptr =
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
if (canElideLoad(self, body_tail)) return elem_ptr;
const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
} else {
@ -6900,11 +6888,10 @@ pub const FuncGen = struct {
return self.wip.extractElement(array_llvm_val, rhs, "");
}
fn airPtrElemVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airPtrElemVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
const ptr_ty = self.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType(zcu);
@ -6918,10 +6905,7 @@ pub const FuncGen = struct {
else
&.{rhs}, "");
if (isByRef(elem_ty, zcu)) {
if (self.canElideLoad(body_tail)) return ptr;
self.maybeMarkAllowZeroAccess(ptr_ty.ptrInfo(zcu));
const ptr_align = (ptr_ty.ptrAlignment(zcu).min(elem_ty.abiAlignment(zcu))).toLlvm();
return self.loadByRef(ptr, elem_ty, ptr_align, if (ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal);
}
@ -6974,11 +6958,10 @@ pub const FuncGen = struct {
return self.fieldPtr(inst, struct_ptr, struct_ptr_ty, field_index);
}
fn airStructFieldVal(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airStructFieldVal(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
const struct_ty = self.typeOf(struct_field.struct_operand);
@ -7052,9 +7035,6 @@ pub const FuncGen = struct {
.flags = .{ .alignment = alignment },
});
if (isByRef(field_ty, zcu)) {
if (canElideLoad(self, body_tail))
return field_ptr;
assert(alignment != .none);
const field_alignment = alignment.toLlvm();
return self.loadByRef(field_ptr, field_ty, field_alignment, .normal);
@ -7070,7 +7050,6 @@ pub const FuncGen = struct {
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
const payload_alignment = layout.payload_align.toLlvm();
if (isByRef(field_ty, zcu)) {
if (canElideLoad(self, body_tail)) return field_ptr;
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
} else {
return self.loadTruncate(.normal, field_ty, field_ptr, payload_alignment);
@ -7829,11 +7808,10 @@ pub const FuncGen = struct {
return self.wip.gepStruct(optional_llvm_ty, operand, 0, "");
}
fn airOptionalPayload(self: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airOptionalPayload(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.typeOf(ty_op.operand);
@ -7846,19 +7824,13 @@ pub const FuncGen = struct {
}
const opt_llvm_ty = try o.lowerType(pt, optional_ty);
const can_elide_load = if (isByRef(payload_ty, zcu)) self.canElideLoad(body_tail) else false;
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, can_elide_load);
return self.optPayloadHandle(opt_llvm_ty, operand, optional_ty, false);
}
fn airErrUnionPayload(
self: *FuncGen,
body_tail: []const Air.Inst.Index,
operand_is_ptr: bool,
) !Builder.Value {
fn airErrUnionPayload(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand = try self.resolveInst(ty_op.operand);
const operand_ty = self.typeOf(ty_op.operand);
@ -7877,7 +7849,6 @@ pub const FuncGen = struct {
const payload_alignment = payload_ty.abiAlignment(zcu).toLlvm();
const payload_ptr = try self.wip.gepStruct(err_union_llvm_ty, operand, offset, "");
if (isByRef(payload_ty, zcu)) {
if (self.canElideLoad(body_tail)) return payload_ptr;
return self.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
}
const payload_llvm_ty = err_union_llvm_ty.structFields(&o.builder)[offset];
@ -9740,45 +9711,14 @@ pub const FuncGen = struct {
return .none;
}
/// As an optimization, we want to avoid unnecessary copies of isByRef=true
/// types. Here, we scan forward in the current block, looking to see if
/// this load dies before any side effects occur. In such case, we can
/// safely return the operand without making a copy.
///
/// The first instruction of `body_tail` is the one whose copy we want to elide.
fn canElideLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) bool {
const zcu = fg.ng.pt.zcu;
const ip = &zcu.intern_pool;
for (body_tail[1..]) |body_inst| {
switch (fg.liveness.categorizeOperand(fg.air, zcu, body_inst, body_tail[0], ip)) {
.none => continue,
.write, .noret, .complex => return false,
.tomb => return true,
}
}
// The only way to get here is to hit the end of a loop instruction
// (implicit repeat).
return false;
}
fn airLoad(fg: *FuncGen, body_tail: []const Air.Inst.Index) !Builder.Value {
fn airLoad(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const pt = fg.ng.pt;
const zcu = pt.zcu;
const inst = body_tail[0];
const ty_op = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const ptr_ty = fg.typeOf(ty_op.operand);
const ptr_info = ptr_ty.ptrInfo(zcu);
const ptr = try fg.resolveInst(ty_op.operand);
elide: {
if (ptr_info.flags.alignment != .none) break :elide;
if (!isByRef(Type.fromInterned(ptr_info.child), zcu)) break :elide;
if (!canElideLoad(fg, body_tail)) break :elide;
return ptr;
}
fg.maybeMarkAllowZeroAccess(ptr_info);
return fg.load(ptr, ptr_ty);
}

View file

@ -406,7 +406,7 @@ const Writer = struct {
.memset,
.memmove,
.elem_ptr_node,
.elem_val_node,
.elem_ptr_load,
.elem_ptr,
.elem_val,
.array_type,
@ -450,14 +450,14 @@ const Writer = struct {
.switch_block_err_union => try self.writeSwitchBlockErrUnion(stream, inst),
.field_val,
.field_ptr_load,
.field_ptr,
.decl_literal,
.decl_literal_no_coerce,
=> try self.writePlNodeField(stream, inst),
.field_ptr_named,
.field_val_named,
.field_ptr_named_load,
=> try self.writePlNodeFieldNamed(stream, inst),
.as_node, .as_shift_operand => try self.writeAs(stream, inst),

View file

@ -392,7 +392,6 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky

View file

@ -1125,3 +1125,24 @@ test "splat with an error union or optional result type" {
_ = try S.doTest(@Vector(4, u32));
_ = try S.doTest([4]u32);
}
test "resist alias of explicit copy of array passed as arg" {
const S = struct {
const Thing = [1]u32;
fn destroy_and_replace(box_b: *Thing, a: Thing, box_a: *Thing) void {
box_a.* = undefined;
box_b.* = a;
}
};
var buf_a: S.Thing = .{1234};
var buf_b: S.Thing = .{5678};
const box_a = &buf_a;
const box_b = &buf_b;
const a = box_a.*; // explicit copy
S.destroy_and_replace(box_b, a, box_a);
try expect(buf_b[0] == 1234);
}

View file

@ -511,7 +511,6 @@ test "@bitCast of packed struct of bools all false" {
}
test "@bitCast of packed struct containing pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO

View file

@ -951,7 +951,6 @@ test "returning an error union containing a type with no runtime bits" {
}
test "try used in recursive function with inferred error set" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View file

@ -1032,6 +1032,7 @@ test "@fieldParentPtr packed struct first zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const C = packed struct {
a: u0 = 0,
@ -1137,6 +1138,7 @@ test "@fieldParentPtr packed struct middle zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,
@ -1242,6 +1244,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const C = packed struct {
a: f32 = 3.14,

View file

@ -1741,7 +1741,6 @@ test "comptime calls are only memoized when float arguments are bit-for-bit equa
test "result location forwarded through unary float builtins" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View file

@ -14,7 +14,6 @@ test "store to global array" {
var vpos = @Vector(2, f32){ 0.0, 0.0 };
test "store to global vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;

View file

@ -139,11 +139,7 @@ fn expectVectorsEqual(a: anytype, b: anytype) !void {
const len_a = @typeInfo(@TypeOf(a)).vector.len;
const len_b = @typeInfo(@TypeOf(b)).vector.len;
try expect(len_a == len_b);
var i: usize = 0;
while (i < len_a) : (i += 1) {
try expect(a[i] == b[i]);
}
try expect(@reduce(.And, a == b));
}
test "@ctz" {

View file

@ -122,7 +122,6 @@ test "memset with large array element, runtime known" {
}
test "memset with large array element, comptime known" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View file

@ -3,7 +3,6 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const native_endian = builtin.cpu.arch.endian();
test "flags in packed structs" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -163,26 +162,24 @@ test "correct sizeOf and offsets in packed structs" {
try expectEqual(22, @bitOffsetOf(PStruct, "u10_b"));
try expectEqual(4, @sizeOf(PStruct));
if (native_endian == .little) {
const s1 = @as(PStruct, @bitCast(@as(u32, 0x12345678)));
try expectEqual(false, s1.bool_a);
try expectEqual(false, s1.bool_b);
try expectEqual(false, s1.bool_c);
try expectEqual(true, s1.bool_d);
try expectEqual(true, s1.bool_e);
try expectEqual(true, s1.bool_f);
try expectEqual(1, s1.u1_a);
try expectEqual(false, s1.bool_g);
try expectEqual(0, s1.u1_b);
try expectEqual(3, s1.u3_a);
try expectEqual(0b1101000101, s1.u10_a);
try expectEqual(0b0001001000, s1.u10_b);
const s1 = @as(PStruct, @bitCast(@as(u32, 0x12345678)));
try expectEqual(false, s1.bool_a);
try expectEqual(false, s1.bool_b);
try expectEqual(false, s1.bool_c);
try expectEqual(true, s1.bool_d);
try expectEqual(true, s1.bool_e);
try expectEqual(true, s1.bool_f);
try expectEqual(1, s1.u1_a);
try expectEqual(false, s1.bool_g);
try expectEqual(0, s1.u1_b);
try expectEqual(3, s1.u3_a);
try expectEqual(0b1101000101, s1.u10_a);
try expectEqual(0b0001001000, s1.u10_b);
const s2 = @as(packed struct { x: u1, y: u7, z: u24 }, @bitCast(@as(u32, 0xd5c71ff4)));
try expectEqual(0, s2.x);
try expectEqual(0b1111010, s2.y);
try expectEqual(0xd5c71f, s2.z);
}
const s2 = @as(packed struct { x: u1, y: u7, z: u24 }, @bitCast(@as(u32, 0xd5c71ff4)));
try expectEqual(0, s2.x);
try expectEqual(0b1111010, s2.y);
try expectEqual(0xd5c71f, s2.z);
}
test "nested packed structs" {
@ -202,15 +199,13 @@ test "nested packed structs" {
try expectEqual(3, @offsetOf(S3, "y"));
try expectEqual(24, @bitOffsetOf(S3, "y"));
if (native_endian == .little) {
const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3;
try expectEqual(0xf4, s3.x.a);
try expectEqual(0x1f, s3.x.b);
try expectEqual(0xc7, s3.x.c);
try expectEqual(0xd5, s3.y.d);
try expectEqual(0x52, s3.y.e);
try expectEqual(0xe9, s3.y.f);
}
const s3 = @as(S3Padded, @bitCast(@as(u64, 0xe952d5c71ff4))).s3;
try expectEqual(0xf4, s3.x.a);
try expectEqual(0x1f, s3.x.b);
try expectEqual(0xc7, s3.x.c);
try expectEqual(0xd5, s3.y.d);
try expectEqual(0x52, s3.y.e);
try expectEqual(0xe9, s3.y.f);
const S4 = packed struct { a: i32, b: i8 };
const S5 = packed struct { a: i32, b: i8, c: S4 };
@ -230,10 +225,10 @@ test "nested packed structs" {
}
test "regular in irregular packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Irregular = packed struct {
bar: Regular = Regular{},
@ -253,7 +248,6 @@ test "nested packed struct unaligned" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
a: u4,
@ -321,10 +315,10 @@ test "nested packed struct unaligned" {
}
test "byte-aligned field pointer offsets" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
const A = packed struct {
@ -346,21 +340,12 @@ test "byte-aligned field pointer offsets" {
.c = 3,
.d = 4,
};
switch (comptime builtin.cpu.arch.endian()) {
.little => {
comptime assert(@TypeOf(&a.a) == *align(4) u8);
comptime assert(@TypeOf(&a.b) == *u8);
comptime assert(@TypeOf(&a.c) == *align(2) u8);
comptime assert(@TypeOf(&a.d) == *u8);
},
.big => {
// TODO re-evaluate packed struct endianness
comptime assert(@TypeOf(&a.a) == *align(4:0:4) u8);
comptime assert(@TypeOf(&a.b) == *align(4:8:4) u8);
comptime assert(@TypeOf(&a.c) == *align(4:16:4) u8);
comptime assert(@TypeOf(&a.d) == *align(4:24:4) u8);
},
}
comptime assert(@TypeOf(&a.a) == *align(4:0:4) u8);
comptime assert(@TypeOf(&a.b) == *align(4:8:4) u8);
comptime assert(@TypeOf(&a.c) == *align(4:16:4) u8);
comptime assert(@TypeOf(&a.d) == *align(4:24:4) u8);
try expect(a.a == 1);
try expect(a.b == 2);
try expect(a.c == 3);
@ -394,16 +379,10 @@ test "byte-aligned field pointer offsets" {
.a = 1,
.b = 2,
};
switch (comptime builtin.cpu.arch.endian()) {
.little => {
comptime assert(@TypeOf(&b.a) == *align(4) u16);
comptime assert(@TypeOf(&b.b) == *u16);
},
.big => {
comptime assert(@TypeOf(&b.a) == *align(4:0:4) u16);
comptime assert(@TypeOf(&b.b) == *align(4:16:4) u16);
},
}
comptime assert(@TypeOf(&b.a) == *align(4:0:4) u16);
comptime assert(@TypeOf(&b.b) == *align(4:16:4) u16);
try expect(b.a == 1);
try expect(b.b == 2);
@ -428,7 +407,6 @@ test "nested packed struct field pointers" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // ubsan unaligned pointer access
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S2 = packed struct {
base: u8,
@ -485,7 +463,6 @@ test "@intFromPtr on a packed struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest;
const S = struct {
const P = packed struct {
@ -500,14 +477,13 @@ test "@intFromPtr on a packed struct field" {
.z = 0,
};
};
try expect(@intFromPtr(&S.p0.z) - @intFromPtr(&S.p0.x) == 2);
try expect(@intFromPtr(&S.p0.z) - @intFromPtr(&S.p0.x) == 0);
}
test "@intFromPtr on a packed struct field unaligned and nested" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (native_endian != .little) return error.SkipZigTest; // Byte aligned packed struct field pointers have not been implemented yet
const S1 = packed struct {
a: u4,
@ -567,16 +543,16 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
else => {},
}
try expect(@intFromPtr(&S2.s.base) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.a) - @intFromPtr(&S2.s) == 1);
try expect(@intFromPtr(&S2.s.p0.b) - @intFromPtr(&S2.s) == 1);
try expect(@intFromPtr(&S2.s.p0.c) - @intFromPtr(&S2.s) == 2);
try expect(@intFromPtr(&S2.s.p0.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.b) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p0.c) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.bit0) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p1.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p2.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p2.b) - @intFromPtr(&S2.s) == 5);
try expect(@intFromPtr(&S2.s.p3.a) - @intFromPtr(&S2.s) == 6);
try expect(@intFromPtr(&S2.s.p3.b) - @intFromPtr(&S2.s) == 6);
try expect(@intFromPtr(&S2.s.p3.c) - @intFromPtr(&S2.s) == 7);
try expect(@intFromPtr(&S2.s.p2.b) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p3.a) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p3.b) - @intFromPtr(&S2.s) == 0);
try expect(@intFromPtr(&S2.s.p3.c) - @intFromPtr(&S2.s) == 0);
const S3 = packed struct {
pad: u8,
@ -599,7 +575,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
comptime assert(@TypeOf(&S3.v0.s.v) == *align(4:10:4) u3);
comptime assert(@TypeOf(&S3.v0.s.s.v) == *align(4:13:4) u2);
comptime assert(@TypeOf(&S3.v0.s.s.s.bit0) == *align(4:15:4) u1);
comptime assert(@TypeOf(&S3.v0.s.s.s.byte) == *align(2) u8);
comptime assert(@TypeOf(&S3.v0.s.s.s.byte) == *align(4:16:4) u8);
comptime assert(@TypeOf(&S3.v0.s.s.s.bit1) == *align(4:24:4) u1);
try expect(@intFromPtr(&S3.v0.v) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s) - @intFromPtr(&S3.v0) == 0);
@ -608,7 +584,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
try expect(@intFromPtr(&S3.v0.s.s.v) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s.bit0) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s.byte) - @intFromPtr(&S3.v0) == 2);
try expect(@intFromPtr(&S3.v0.s.s.s.byte) - @intFromPtr(&S3.v0) == 0);
try expect(@intFromPtr(&S3.v0.s.s.s.bit1) - @intFromPtr(&S3.v0) == 0);
}
@ -653,13 +629,13 @@ test "optional pointer in packed struct" {
}
test "nested packed struct field access test" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Vec2 = packed struct {
x: f32,
@ -774,9 +750,9 @@ test "nested packed struct field access test" {
}
test "nested packed struct at non-zero offset" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Pair = packed struct(u24) {
a: u16 = 0,
@ -871,7 +847,6 @@ test "nested packed struct at non-zero offset 2" {
}
test "runtime init of unnamed packed struct type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@ -916,21 +891,13 @@ test "overaligned pointer to packed struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = packed struct { a: u32, b: u32 };
var foo: S align(4) = .{ .a = 123, .b = 456 };
const ptr: *align(4) S = &foo;
switch (comptime builtin.cpu.arch.endian()) {
.little => {
const ptr_to_b: *u32 = &ptr.b;
try expect(ptr_to_b.* == 456);
},
.big => {
// Byte aligned packed struct field pointers have not been implemented yet.
const ptr_to_a: *align(4:0:8) u32 = &ptr.a;
try expect(ptr_to_a.* == 123);
},
}
const ptr_to_a: *align(4:0:8) u32 = &ptr.a;
try expect(ptr_to_a.* == 123);
}
test "packed struct initialized in bitcast" {
@ -1345,11 +1312,11 @@ test "assign packed struct initialized with RLS to packed struct literal field"
}
test "byte-aligned packed relocation" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
var global: u8 align(2) = 0;

View file

@ -419,7 +419,6 @@ test "pointer sentinel with enums" {
}
test "pointer sentinel with optional element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@ -779,3 +778,11 @@ test "pointers to elements of many-ptr to zero-bit type" {
try expect(a == b);
}
test "comptime C pointer to optional pointer" {
const opt: ?*u8 = @ptrFromInt(0x1000);
const outer_ptr: [*c]const ?*u8 = &opt;
const inner_ptr = &outer_ptr.*.?;
comptime assert(@TypeOf(inner_ptr) == [*c]const *u8);
comptime assert(@intFromPtr(inner_ptr.*) == 0x1000);
}

View file

@ -376,10 +376,10 @@ const APackedStruct = packed struct {
};
test "packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
var foo = APackedStruct{
.x = 1,
@ -744,11 +744,11 @@ const S0 = struct {
var g_foo: S0 = S0.init();
test "packed struct with fp fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = packed struct {
data0: f32,
@ -1924,7 +1924,6 @@ test "runtime value in nested initializer passed as pointer to function" {
}
test "struct field default value is a call" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -2154,3 +2153,26 @@ test "align 1 struct parameter dereferenced and returned" {
.little => try expect(s.a == 0x05040302),
}
}
test "avoid unused field function body compile error" {
const Case = struct {
const This = @This();
const S = struct {
a: usize = 1,
b: fn () void = This.functionThatDoesNotCompile,
};
const s: S = .{};
fn entry() usize {
return s.a;
}
pub fn functionThatDoesNotCompile() void {
@compileError("told you so");
}
};
try expect(Case.entry() == 1);
}

View file

@ -300,7 +300,6 @@ test "switch on error union catch capture" {
}
test "switch on error union if else capture" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View file

@ -122,7 +122,6 @@ test "'return try' through conditional" {
}
test "try ptr propagation const" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@ -155,7 +154,6 @@ test "try ptr propagation const" {
}
test "try ptr propagation mutate" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;

View file

@ -384,7 +384,6 @@ test "tuple initialized with a runtime known value" {
}
test "tuple of struct concatenation and coercion to array" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;

View file

@ -408,7 +408,6 @@ test "Type.Enum" {
}
test "Type.Union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;

View file

@ -1335,9 +1335,9 @@ test "union field ptr - zero sized field" {
}
test "packed union in packed struct" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = packed struct {
nested: packed union {
@ -1420,7 +1420,6 @@ test "union reassignment can use previous value" {
}
test "reinterpreting enum value inside packed union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const U = packed union {
@ -1612,7 +1611,6 @@ test "memset extern union" {
}
test "memset packed union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = packed union {

View file

@ -186,7 +186,6 @@ test "coerce reference to var arg" {
}
test "variadic functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO

View file

@ -394,7 +394,6 @@ test "vector @splat" {
}
test "load vector elements via comptime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -415,7 +414,6 @@ test "load vector elements via comptime index" {
}
test "store vector elements via comptime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -441,49 +439,6 @@ test "store vector elements via comptime index" {
try comptime S.doTheTest();
}
test "load vector elements via runtime index" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var v: @Vector(4, i32) = [_]i32{ 1, 2, 3, undefined };
_ = &v;
var i: u32 = 0;
try expect(v[i] == 1);
i += 1;
try expect(v[i] == 2);
i += 1;
try expect(v[i] == 3);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "store vector elements via runtime index" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
var v: @Vector(4, i32) = [_]i32{ 1, 5, 3, undefined };
var i: u32 = 2;
v[i] = 1;
try expect(v[1] == 5);
try expect(v[2] == 1);
i += 1;
v[i] = -364;
try expect(-364 == v[3]);
}
};
try S.doTheTest();
try comptime S.doTheTest();
}
test "initialize vector which is a struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -567,20 +522,20 @@ test "vector division operators" {
};
if (!is_signed_int) {
const d0 = x / y;
for (@as([4]T, d0), 0..) |v, i| {
inline for (@as([4]T, d0), 0..) |v, i| {
try expect(x[i] / y[i] == v);
}
}
const d1 = @divExact(x, y);
for (@as([4]T, d1), 0..) |v, i| {
inline for (@as([4]T, d1), 0..) |v, i| {
try expect(@divExact(x[i], y[i]) == v);
}
const d2 = @divFloor(x, y);
for (@as([4]T, d2), 0..) |v, i| {
inline for (@as([4]T, d2), 0..) |v, i| {
try expect(@divFloor(x[i], y[i]) == v);
}
const d3 = @divTrunc(x, y);
for (@as([4]T, d3), 0..) |v, i| {
inline for (@as([4]T, d3), 0..) |v, i| {
try expect(@divTrunc(x[i], y[i]) == v);
}
}
@ -592,16 +547,16 @@ test "vector division operators" {
};
if (!is_signed_int and @typeInfo(T) != .float) {
const r0 = x % y;
for (@as([4]T, r0), 0..) |v, i| {
inline for (@as([4]T, r0), 0..) |v, i| {
try expect(x[i] % y[i] == v);
}
}
const r1 = @mod(x, y);
for (@as([4]T, r1), 0..) |v, i| {
inline for (@as([4]T, r1), 0..) |v, i| {
try expect(@mod(x[i], y[i]) == v);
}
const r2 = @rem(x, y);
for (@as([4]T, r2), 0..) |v, i| {
inline for (@as([4]T, r2), 0..) |v, i| {
try expect(@rem(x[i], y[i]) == v);
}
}
@ -651,10 +606,15 @@ test "vector bitwise not operator" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64_be) {
// https://github.com/ziglang/zig/issues/24061
return error.SkipZigTest;
}
const S = struct {
fn doTheTestNot(comptime T: type, x: @Vector(4, T)) !void {
const y = ~x;
for (@as([4]T, y), 0..) |v, i| {
inline for (@as([4]T, y), 0..) |v, i| {
try expect(~x[i] == v);
}
}
@ -685,10 +645,15 @@ test "vector boolean not operator" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.cpu.arch == .aarch64_be) {
// https://github.com/ziglang/zig/issues/24061
return error.SkipZigTest;
}
const S = struct {
fn doTheTestNot(comptime T: type, x: @Vector(4, T)) !void {
const y = !x;
for (@as([4]T, y), 0..) |v, i| {
inline for (@as([4]T, y), 0..) |v, i| {
try expect(!x[i] == v);
}
}
@ -1359,7 +1324,6 @@ test "alignment of vectors" {
}
test "loading the second vector from a slice of vectors" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -1450,6 +1414,7 @@ test "zero multiplicand" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const zeros = @Vector(2, u32){ 0.0, 0.0 };
var ones = @Vector(2, u32){ 1.0, 1.0 };
@ -1530,8 +1495,7 @@ test "store packed vector element" {
var v = @Vector(4, u1){ 1, 1, 1, 1 };
try expectEqual(@Vector(4, u1){ 1, 1, 1, 1 }, v);
var index: usize = 0;
_ = &index;
const index: usize = 0;
v[index] = 0;
try expectEqual(@Vector(4, u1){ 0, 1, 1, 1 }, v);
}

View file

@ -52,22 +52,15 @@ fn accessVector(comptime init: anytype) !void {
var vector: Vector = undefined;
vector = init;
inline for (0..@typeInfo(Vector).vector.len) |ct_index| {
var rt_index: usize = undefined;
rt_index = ct_index;
if (&vector[rt_index] != &vector[ct_index]) return error.Unexpected;
if (vector[rt_index] != init[ct_index]) return error.Unexpected;
if (&vector[ct_index] != &vector[ct_index]) return error.Unexpected;
if (vector[ct_index] != init[ct_index]) return error.Unexpected;
vector[rt_index] = rt_vals[0];
if (vector[rt_index] != ct_vals[0]) return error.Unexpected;
vector[ct_index] = rt_vals[0];
if (vector[ct_index] != ct_vals[0]) return error.Unexpected;
vector[rt_index] = ct_vals[1];
if (vector[rt_index] != ct_vals[1]) return error.Unexpected;
vector[ct_index] = ct_vals[1];
if (vector[ct_index] != ct_vals[1]) return error.Unexpected;
vector[ct_index] = ct_vals[0];
if (vector[rt_index] != ct_vals[0]) return error.Unexpected;
if (vector[ct_index] != ct_vals[0]) return error.Unexpected;
vector[ct_index] = rt_vals[1];
if (vector[rt_index] != ct_vals[1]) return error.Unexpected;
if (vector[ct_index] != ct_vals[1]) return error.Unexpected;
}
}

View file

@ -75,31 +75,31 @@ export fn bax() void {
//
// :5:19: error: runtime value contains reference to comptime var
// :5:19: note: comptime var pointers are not available at runtime
// :4:27: note: 'runtime_value' points to comptime var declared here
// :4:14: note: 'runtime_value' points to comptime var declared here
// :12:40: error: runtime value contains reference to comptime var
// :12:40: note: comptime var pointers are not available at runtime
// :11:27: note: 'runtime_value' points to comptime var declared here
// :11:14: note: 'runtime_value' points to comptime var declared here
// :19:50: error: runtime value contains reference to comptime var
// :19:50: note: comptime var pointers are not available at runtime
// :18:27: note: 'runtime_value' points to comptime var declared here
// :18:14: note: 'runtime_value' points to comptime var declared here
// :28:9: error: runtime value contains reference to comptime var
// :28:9: note: comptime var pointers are not available at runtime
// :27:27: note: 'runtime_value' points to comptime var declared here
// :27:14: note: 'runtime_value' points to comptime var declared here
// :36:9: error: runtime value contains reference to comptime var
// :36:9: note: comptime var pointers are not available at runtime
// :35:27: note: 'runtime_value' points to comptime var declared here
// :35:14: note: 'runtime_value' points to comptime var declared here
// :41:12: error: runtime value contains reference to comptime var
// :41:12: note: comptime var pointers are not available at runtime
// :40:27: note: 'runtime_value' points to comptime var declared here
// :40:14: note: 'runtime_value' points to comptime var declared here
// :46:39: error: runtime value contains reference to comptime var
// :46:39: note: comptime var pointers are not available at runtime
// :45:27: note: 'runtime_value' points to comptime var declared here
// :45:14: note: 'runtime_value' points to comptime var declared here
// :55:18: error: runtime value contains reference to comptime var
// :55:18: note: comptime var pointers are not available at runtime
// :51:30: note: 'runtime_value' points to comptime var declared here
// :51:14: note: 'runtime_value' points to comptime var declared here
// :63:18: error: runtime value contains reference to comptime var
// :63:18: note: comptime var pointers are not available at runtime
// :59:27: note: 'runtime_value' points to comptime var declared here
// :59:14: note: 'runtime_value' points to comptime var declared here
// :71:19: error: runtime value contains reference to comptime var
// :71:19: note: comptime var pointers are not available at runtime
// :67:30: note: 'runtime_value' points to comptime var declared here
// :67:14: note: 'runtime_value' points to comptime var declared here

View file

@ -47,19 +47,19 @@ export var h: *[1]u32 = h: {
// error
//
// :1:27: error: global variable contains reference to comptime var
// :2:18: note: 'a' points to comptime var declared here
// :2:5: note: 'a' points to comptime var declared here
// :6:30: error: global variable contains reference to comptime var
// :7:18: note: 'b[0]' points to comptime var declared here
// :7:5: note: 'b[0]' points to comptime var declared here
// :11:30: error: global variable contains reference to comptime var
// :12:18: note: 'c' points to comptime var declared here
// :12:5: note: 'c' points to comptime var declared here
// :16:33: error: global variable contains reference to comptime var
// :17:18: note: 'd' points to comptime var declared here
// :17:5: note: 'd' points to comptime var declared here
// :22:24: error: global variable contains reference to comptime var
// :23:18: note: 'e.ptr' points to comptime var declared here
// :23:5: note: 'e.ptr' points to comptime var declared here
// :28:33: error: global variable contains reference to comptime var
// :29:18: note: 'f' points to comptime var declared here
// :29:5: note: 'f' points to comptime var declared here
// :34:40: error: global variable contains reference to comptime var
// :34:40: note: 'g' points to 'v0[0]', where
// :36:24: note: 'v0[1]' points to comptime var declared here
// :36:5: note: 'v0[1]' points to comptime var declared here
// :42:28: error: global variable contains reference to comptime var
// :43:22: note: 'h' points to comptime var declared here
// :43:5: note: 'h' points to comptime var declared here

View file

@ -21,5 +21,6 @@ comptime {
// error
//
// :7:16: error: captured value contains reference to comptime var
// :16:30: note: 'wrapper.ptr' points to comptime var declared here
// :7:16: note: 'wrapper' points to '@as(*const tmp.Wrapper, @ptrCast(&v0)).*', where
// :16:5: note: 'v0.ptr' points to comptime var declared here
// :17:29: note: called at comptime here

View file

@ -1,6 +1,8 @@
pub const Foo = enum(c_int) {
A = Foo.B,
C = D,
pub const B = 0;
};
export fn entry() void {
const s: Foo = Foo.E;

View file

@ -0,0 +1,13 @@
pub const Foo = enum(c_int) {
A = Foo.B,
C = D,
};
export fn entry() void {
const s: Foo = Foo.E;
_ = s;
}
const D = 1;
// error
//
// :1:5: error: dependency loop detected

View file

@ -9,4 +9,4 @@ export fn foo() void {
//
// :3:10: error: runtime value contains reference to comptime var
// :3:10: note: comptime var pointers are not available at runtime
// :2:34: note: 'runtime_value' points to comptime var declared here
// :2:14: note: 'runtime_value' points to comptime var declared here

View file

@ -1,19 +0,0 @@
const Foo = packed struct {
a: u8,
b: u32,
};
export fn entry() void {
var foo = Foo{ .a = 1, .b = 10 };
bar(&foo.b);
}
fn bar(x: *u32) void {
x.* += 1;
}
// error
//
// :8:9: error: expected type '*u32', found '*align(1) u32'
// :8:9: note: pointer alignment '1' cannot cast into pointer alignment '4'
// :11:11: note: parameter type declared here

View file

@ -1,19 +0,0 @@
const Foo = packed struct {
a: u8,
b: u32,
};
export fn entry() void {
var foo = Foo{ .a = 1, .b = 10 };
foo.b += 1;
bar(@as(*[1]u32, &foo.b)[0..]);
}
fn bar(x: []u32) void {
x[0] += 1;
}
// error
//
// :9:22: error: expected type '*[1]u32', found '*align(1) u32'
// :9:22: note: pointer alignment '1' cannot cast into pointer alignment '4'

View file

@ -12,4 +12,4 @@ fn loadv(ptr: anytype) i31 {
// error
//
// :10:15: error: unable to determine vector element index of type '*align(16:0:4:?) i31'
// :5:22: error: vector index not comptime known

View file

@ -12,4 +12,4 @@ fn storev(ptr: anytype, val: i31) void {
// error
//
// :10:8: error: unable to determine vector element index of type '*align(16:0:4:?) i31'
// :6:15: error: vector index not comptime known

View file

@ -6,5 +6,5 @@ pub fn main() void {}
// run
// backend=llvm
// target=arm-linux,armeb-linux,thumb-linux,thumbeb-linux,aarch64-linux,aarch64_be-linux,loongarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux,powerpc-linux,powerpcle-linux,powerpc64-linux,powerpc64le-linux,riscv32-linux,riscv64-linux,s390x-linux,x86-linux,x86_64-linux
// target=arm-linux,armeb-linux,thumb-linux,thumbeb-linux,aarch64-linux,aarch64_be-linux,loongarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux,powerpc-linux,powerpcle-linux,powerpc64-linux,powerpc64le-linux,riscv32-linux,riscv64-linux,x86-linux,x86_64-linux
// pie=true

View file

@ -1316,110 +1316,6 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
\\}
, "");
cases.add("basic vector expressions",
\\#include <stdlib.h>
\\#include <stdint.h>
\\typedef int16_t __v8hi __attribute__((__vector_size__(16)));
\\int main(int argc, char**argv) {
\\ __v8hi uninitialized;
\\ __v8hi empty_init = {};
\\ for (int i = 0; i < 8; i++) {
\\ if (empty_init[i] != 0) abort();
\\ }
\\ __v8hi partial_init = {0, 1, 2, 3};
\\
\\ __v8hi a = {0, 1, 2, 3, 4, 5, 6, 7};
\\ __v8hi b = (__v8hi) {100, 200, 300, 400, 500, 600, 700, 800};
\\
\\ __v8hi sum = a + b;
\\ for (int i = 0; i < 8; i++) {
\\ if (sum[i] != a[i] + b[i]) abort();
\\ }
\\ return 0;
\\}
, "");
cases.add("__builtin_shufflevector",
\\#include <stdlib.h>
\\#include <stdint.h>
\\typedef int16_t __v4hi __attribute__((__vector_size__(8)));
\\typedef int16_t __v8hi __attribute__((__vector_size__(16)));
\\int main(int argc, char**argv) {
\\ __v8hi v8_a = {0, 1, 2, 3, 4, 5, 6, 7};
\\ __v8hi v8_b = {100, 200, 300, 400, 500, 600, 700, 800};
\\ __v8hi shuffled = __builtin_shufflevector(v8_a, v8_b, 0, 1, 2, 3, 8, 9, 10, 11);
\\ for (int i = 0; i < 8; i++) {
\\ if (i < 4) {
\\ if (shuffled[i] != v8_a[i]) abort();
\\ } else {
\\ if (shuffled[i] != v8_b[i - 4]) abort();
\\ }
\\ }
\\ shuffled = __builtin_shufflevector(
\\ (__v8hi) {-1, -1, -1, -1, -1, -1, -1, -1},
\\ (__v8hi) {42, 42, 42, 42, 42, 42, 42, 42},
\\ 0, 1, 2, 3, 8, 9, 10, 11
\\ );
\\ for (int i = 0; i < 8; i++) {
\\ if (i < 4) {
\\ if (shuffled[i] != -1) abort();
\\ } else {
\\ if (shuffled[i] != 42) abort();
\\ }
\\ }
\\ __v4hi shuffled_to_fewer_elements = __builtin_shufflevector(v8_a, v8_b, 0, 1, 8, 9);
\\ for (int i = 0; i < 4; i++) {
\\ if (i < 2) {
\\ if (shuffled_to_fewer_elements[i] != v8_a[i]) abort();
\\ } else {
\\ if (shuffled_to_fewer_elements[i] != v8_b[i - 2]) abort();
\\ }
\\ }
\\ __v4hi v4_a = {0, 1, 2, 3};
\\ __v4hi v4_b = {100, 200, 300, 400};
\\ __v8hi shuffled_to_more_elements = __builtin_shufflevector(v4_a, v4_b, 0, 1, 2, 3, 4, 5, 6, 7);
\\ for (int i = 0; i < 4; i++) {
\\ if (shuffled_to_more_elements[i] != v4_a[i]) abort();
\\ if (shuffled_to_more_elements[i + 4] != v4_b[i]) abort();
\\ }
\\ return 0;
\\}
, "");
cases.add("__builtin_convertvector",
\\#include <stdlib.h>
\\#include <stdint.h>
\\typedef int16_t __v8hi __attribute__((__vector_size__(16)));
\\typedef uint16_t __v8hu __attribute__((__vector_size__(16)));
\\int main(int argc, char**argv) {
\\ __v8hi signed_vector = { 1, 2, 3, 4, -1, -2, -3,-4};
\\ __v8hu unsigned_vector = __builtin_convertvector(signed_vector, __v8hu);
\\
\\ for (int i = 0; i < 8; i++) {
\\ if (unsigned_vector[i] != (uint16_t)signed_vector[i]) abort();
\\ }
\\ return 0;
\\}
, "");
cases.add("vector casting",
\\#include <stdlib.h>
\\#include <stdint.h>
\\typedef int8_t __v8qi __attribute__((__vector_size__(8)));
\\typedef uint8_t __v8qu __attribute__((__vector_size__(8)));
\\int main(int argc, char**argv) {
\\ __v8qi signed_vector = { 1, 2, 3, 4, -1, -2, -3,-4};
\\
\\ uint64_t big_int = (uint64_t) signed_vector;
\\ if (big_int != 0x01020304FFFEFDFCULL && big_int != 0xFCFDFEFF04030201ULL) abort();
\\ __v8qu unsigned_vector = (__v8qu) big_int;
\\ for (int i = 0; i < 8; i++) {
\\ if (unsigned_vector[i] != (uint8_t)signed_vector[i] && unsigned_vector[i] != (uint8_t)signed_vector[7 - i]) abort();
\\ }
\\ return 0;
\\}
, "");
cases.add("break from switch statement. Issue #8387",
\\#include <stdlib.h>
\\int switcher(int x) {

View file

@ -37,7 +37,7 @@ noinline fn frame0(expected: *[4]usize, unwound: *[4]usize) void {
}
// No-OS entrypoint
export fn _start() callconv(.c) noreturn {
export fn _start() callconv(.withStackAlign(.c, 1)) noreturn {
var expected: [4]usize = undefined;
var unwound: [4]usize = undefined;
frame0(&expected, &unwound);

View file

@ -1344,27 +1344,33 @@ const test_targets = blk: {
// SPIR-V Targets
.{
.target = std.Target.Query.parse(.{
.arch_os_abi = "spirv64-vulkan",
.cpu_features = "vulkan_v1_2+float16+float64",
}) catch unreachable,
.use_llvm = false,
.use_lld = false,
.skip_modules = &.{ "c-import", "zigc", "std" },
},
// Disabled due to no active maintainer (feel free to fix the failures
// and then re-enable at any time). The failures occur due to changing AIR
// from the frontend, and backend being incomplete.
//.{
// .target = std.Target.Query.parse(.{
// .arch_os_abi = "spirv64-vulkan",
// .cpu_features = "vulkan_v1_2+float16+float64",
// }) catch unreachable,
// .use_llvm = false,
// .use_lld = false,
// .skip_modules = &.{ "c-import", "zigc", "std" },
//},
// WASI Targets
.{
.target = .{
.cpu_arch = .wasm32,
.os_tag = .wasi,
.abi = .none,
},
.use_llvm = false,
.use_lld = false,
},
// Disabled due to no active maintainer (feel free to fix the failures
// and then re-enable at any time). The failures occur due to backend
// miscompilation of different AIR from the frontend.
//.{
// .target = .{
// .cpu_arch = .wasm32,
// .os_tag = .wasi,
// .abi = .none,
// },
// .use_llvm = false,
// .use_lld = false,
//},
.{
.target = .{
.cpu_arch = .wasm32,