Air: add "unwrap" functions for loading extra data

This commit is contained in:
Mathieu Suen 2026-01-20 15:15:57 +01:00 committed by Matthew Lugg
parent d84a638e8b
commit 36b65ab59e
No known key found for this signature in database
GPG key ID: 3F5B7DCCBF4AF02E
14 changed files with 864 additions and 893 deletions

View file

@ -281,16 +281,21 @@ pub const Inst = struct {
/// also supports enums and pointers.
/// Uses the `ty_op` field.
bitcast,
/// Uses the `ty_pl` field with payload `Block`. A block runs its body which always ends
/// with a `noreturn` instruction, so the only way to proceed to the code after the `block`
/// is to encounter a `br` that targets this `block`. If the `block` type is `noreturn`,
/// A block runs its body which always ends with a `noreturn` instruction,
/// so the only way to proceed to the code after the `block` is to encounter a `br`
/// that targets this `block`. If the `block` type is `noreturn`,
/// then there do not exist any `br` instructions targeting this `block`.
/// Uses the `ty_pl` field with payload `Block`.
///
/// See `unwrapBlock` for a way to load this tag's data.
block,
/// A labeled block of code that loops forever. The body must be `noreturn`: loops
/// occur through an explicit `repeat` instruction pointing back to this one.
/// Result type is always `noreturn`; no instructions in a block follow this one.
/// There is always at least one `repeat` instruction referencing the loop.
/// Uses the `ty_pl` field. Payload is `Block`.
///
/// See `unwrapBlock` for a way to load this tag's data.
loop,
/// Sends control flow back to the beginning of a parent `loop` body.
/// Uses the `repeat` field.
@ -319,6 +324,8 @@ pub const Inst = struct {
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
/// Triggers `resolveTypeLayout` on the return type of the callee.
///
/// See `unwrapCall` for a way to load this tag's data.
call,
/// Same as `call` except with the `always_tail` attribute.
call_always_tail,
@ -436,14 +443,20 @@ pub const Inst = struct {
/// Conditional branch.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
///
/// See `unwrapCondBr` for a way to load this tags's data.
cond_br,
/// Switch branch.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
///
/// See `unwrapSwitch` for a way to load this tags's data.
switch_br,
/// Switch branch which can dispatch back to itself with a different operand.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
///
/// See `unwrapSwitch` for a way to load this tags's data.
loop_switch_br,
/// Dispatches back to a branch of a parent `loop_switch_br`.
/// Result type is always noreturn; no instructions in a block follow this one.
@ -458,6 +471,8 @@ pub const Inst = struct {
/// payload value, as if `unwrap_errunion_payload` was executed on the operand.
/// The error branch is considered to have a branch hint of `.unlikely`.
/// Uses the `pl_op` field. Payload is `Try`.
///
/// See `unwrapTry` for a way to load this tag's data.
@"try",
/// Same as `try` except the error branch hint is `.cold`.
try_cold,
@ -465,6 +480,8 @@ pub const Inst = struct {
/// result is a pointer to the payload. Result is as if `unwrap_errunion_payload_ptr`
/// was executed on the operand.
/// Uses the `ty_pl` field. Payload is `TryPtr`.
///
/// See `unwrapTryPtr` for a way to load this tag's data.
try_ptr,
/// Same as `try_ptr` except the error branch hint is `.cold`.
try_ptr_cold,
@ -476,6 +493,8 @@ pub const Inst = struct {
dbg_empty_stmt,
/// A block that represents an inlined function call.
/// Uses the `ty_pl` field. Payload is `DbgInlineBlock`.
///
/// See `unwrapBlock` for a way to load this tag's data.
dbg_inline_block,
/// Marks the beginning of a local variable. The operand is a pointer pointing
/// to the storage for the variable. The local may be a const or a var.
@ -715,7 +734,7 @@ pub const Inst = struct {
/// Uses the `ty_pl` field, where the payload index points to:
/// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty`
/// 2. operand: Ref // guaranteed not to be an interned value
/// See `unwrapShuffleOne`.
/// See `unwrapShuffleOne` for a way to load this tag's data.
shuffle_one,
/// Constructs a vector by selecting elements from two vectors based on a mask. Each mask
/// element is either an index into one of the vectors, or "undef".
@ -723,7 +742,7 @@ pub const Inst = struct {
/// 1. mask_elem: ShuffleOneMask // for each `mask_len`, which comes from `ty_pl.ty`
/// 2. operand_a: Ref // guaranteed not to be an interned value
/// 3. operand_b: Ref // guaranteed not to be an interned value
/// See `unwrapShuffleTwo`.
/// See `unwrapShuffleTwo` for a way to load this tag's data..
shuffle_two,
/// Constructs a vector element-wise from `a` or `b` based on `pred`.
/// Uses the `pl_op` field with `pred` as operand, and payload `Bin`.
@ -944,6 +963,8 @@ pub const Inst = struct {
/// The calling convention is given by `func.@"callconv"(target)`.
/// The return type (and hence the result type of this instruction) is `func.returnType()`.
/// The parameter types are the types of the arguments given in `Air.Call`.
///
/// See `unwrapCompilerRtCall` for a way to load this tag's data.
legalize_compiler_rt_call,
pub fn fromCmpOp(op: std.math.CompareOperator, optimized: bool) Tag {
@ -1445,18 +1466,18 @@ pub const ShuffleTwoMask = enum(u32) {
/// Trailing:
/// 0. `Inst.Ref` for every outputs_len
/// 1. `Inst.Ref` for every inputs_len
/// 2. for every outputs_len
/// - constraint: memory at this position is reinterpreted as a null
/// terminated string.
/// - name: memory at this position is reinterpreted as a null
/// terminated string. pad to the next u32 after the null byte.
/// 3. for every inputs_len
/// - constraint: memory at this position is reinterpreted as a null
/// terminated string.
/// - name: memory at this position is reinterpreted as a null
/// terminated string. pad to the next u32 after the null byte.
/// 4. A number of u32 elements follow according to the equation `(source_len + 3) / 4`.
/// 2. A number of u32 elements follow according to the equation `(source_len + 3) / 4`.
/// Memory starting at this position is reinterpreted as the source bytes.
/// 3. for every outputs_len
/// - constraint: memory at this position is reinterpreted as a null
/// terminated string.
/// - name: memory at this position is reinterpreted as a null
/// terminated string. pad to the next u32 after the null byte.
/// 4. for every inputs_len
/// - constraint: memory at this position is reinterpreted as a null
/// terminated string.
/// - name: memory at this position is reinterpreted as a null
/// terminated string. pad to the next u32 after the null byte.
pub const Asm = struct {
/// Length of the assembly source in bytes.
source_len: u32,
@ -2157,11 +2178,229 @@ pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch {
};
}
pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
pub const UnwrappedDbgInlineBlock = struct {
func: InternPool.Index,
body: []const Inst.Index,
ty: Type,
};
pub fn unwrapDbgBlock(air: *const Air, inst_index: Inst.Index) UnwrappedDbgInlineBlock {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .dbg_inline_block);
const payload = data.ty_pl.payload;
const extra = air.extraData(Air.DbgInlineBlock, payload);
return .{
.func = extra.data.func,
.ty = data.ty_pl.ty.toType(),
.body = @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
};
}
pub const UnwrappedBlock = struct {
body: []const Inst.Index,
ty: Type,
};
pub fn unwrapBlock(air: *const Air, inst_index: Inst.Index) UnwrappedBlock {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
const payload = switch (tag) {
.block, .loop => data.ty_pl.payload,
else => unreachable,
};
const extra = air.extraData(Air.Block, payload);
return .{
.ty = data.ty_pl.ty.toType(),
.body = @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
};
}
pub const UnwrappedCall = struct {
callee: Inst.Ref,
args: []const Air.Inst.Ref,
};
pub fn unwrapCall(air: *const Air, inst_index: Inst.Index) UnwrappedCall {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
const payload = switch (tag) {
.call, .call_always_tail, .call_never_tail, .call_never_inline => data.pl_op.payload,
else => unreachable,
};
const extra = air.extraData(Air.Call, payload);
return .{
.callee = data.pl_op.operand,
.args = @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]),
};
}
pub const UnwrappedCompilerRtCall = struct {
func: CompilerRtFunc,
args: []const Air.Inst.Ref,
};
pub fn unwrapCompilerRtCall(air: *const Air, inst_index: Inst.Index) UnwrappedCompilerRtCall {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .legalize_compiler_rt_call);
const payload = data.legalize_compiler_rt_call.payload;
const extra = air.extraData(Air.Call, payload);
return .{
.func = data.legalize_compiler_rt_call.func,
.args = @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]),
};
}
pub const UnwrappedCondBr = struct {
condition: Inst.Ref,
then_body: []const Inst.Index,
else_body: []const Inst.Index,
branch_hints: CondBr.BranchHints,
};
pub fn unwrapCondBr(air: *const Air, inst_index: Inst.Index) UnwrappedCondBr {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .cond_br);
const payload = data.pl_op.payload;
const extra = air.extraData(Air.CondBr, payload);
return .{
.condition = data.pl_op.operand,
.then_body = @ptrCast(air.extra.items[extra.end..][0..extra.data.then_body_len]),
.else_body = @ptrCast(air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
.branch_hints = extra.data.branch_hints,
};
}
pub const UnwrappedTry = struct {
error_union: Inst.Ref,
else_body: []const Inst.Index,
};
pub fn unwrapTry(air: *const Air, inst_index: Inst.Index) UnwrappedTry {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .@"try" or tag == .try_cold);
const payload = data.pl_op.payload;
const extra = air.extraData(Air.Try, payload);
return .{
.error_union = data.pl_op.operand,
.else_body = @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
};
}
pub const UnwrappedTryPtr = struct {
error_union_payload_ptr_ty: Inst.Ref,
error_union_ptr: Inst.Ref,
else_body: []const Inst.Index,
};
pub fn unwrapTryPtr(air: *const Air, inst_index: Inst.Index) UnwrappedTryPtr {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .try_ptr or tag == .try_ptr_cold);
const payload = data.ty_pl.payload;
const extra = air.extraData(Air.TryPtr, payload);
return .{
.error_union_ptr = extra.data.ptr,
.error_union_payload_ptr_ty = data.ty_pl.ty,
.else_body = @ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
};
}
pub const UnwrappedAsm = struct {
outputs: []const Air.Inst.Ref,
inputs: []const Air.Inst.Ref,
source: [:0]u8,
input_constraint_names: []const u32,
output_constraint_names: []const u32,
clobbers: InternPool.Index,
is_volatile: bool,
const AsmIterator = struct {
current: u32,
operands: []const Air.Inst.Ref,
constraint_names: []const u32,
pub fn next(self: *AsmIterator) ?struct { constraint: []const u8, operand: Inst.Ref, name: []const u8, index: u32 } {
if (self.current >= self.operands.len) {
return null;
}
defer {
self.current += 1;
}
const constraint_name = std.mem.sliceAsBytes(self.constraint_names);
const constraint = std.mem.sliceTo(constraint_name, 0);
const name = std.mem.sliceTo(constraint_name[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
const next_offset = std.math.divCeil(usize, constraint.len + 1 + name.len + 1, @sizeOf(u32)) catch unreachable;
self.constraint_names = self.constraint_names[next_offset..];
return .{
.constraint = constraint,
.operand = self.operands[self.current],
.name = name,
.index = self.current,
};
}
};
pub fn iterateInputs(self: *const UnwrappedAsm) AsmIterator {
return .{
.current = 0,
.operands = self.inputs,
.constraint_names = self.input_constraint_names,
};
}
pub fn iterateOutputs(self: *const UnwrappedAsm) AsmIterator {
return .{
.current = 0,
.operands = self.outputs,
.constraint_names = self.output_constraint_names,
};
}
};
pub fn unwrapAsm(air: *const Air, inst_index: Inst.Index) UnwrappedAsm {
const data = air.instructions.items(.data)[@intFromEnum(inst_index)];
const tag = air.instructions.items(.tag)[@intFromEnum(inst_index)];
assert(tag == .assembly);
const payload = data.ty_pl.payload;
const extra = air.extraData(Air.Asm, payload);
const source_start = extra.end + extra.data.flags.outputs_len + extra.data.inputs_len;
const output_constraint_name_start = source_start + (extra.data.source_len / 4) + 1;
const output_constraint_name = air.extra.items[output_constraint_name_start..];
const outputs: []Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..extra.data.flags.outputs_len]);
// Get the input names and constraints offset place after the output.
var it = UnwrappedAsm.AsmIterator{
.current = 0,
.constraint_names = output_constraint_name,
.operands = outputs,
};
while (it.next()) |_| {}
return .{
.clobbers = extra.data.clobbers,
.is_volatile = extra.data.flags.is_volatile,
.inputs = @ptrCast(air.extra.items[extra.end + extra.data.flags.outputs_len ..][0..extra.data.inputs_len]),
.outputs = outputs,
.source = std.mem.sliceAsBytes(air.extra.items[source_start..])[0..extra.data.source_len :0],
.output_constraint_names = output_constraint_name,
.input_constraint_names = it.constraint_names,
};
}
pub const UnwrappedShuffleOne = struct {
result_ty: Type,
operand: Inst.Ref,
mask: []const ShuffleOneMask,
} {
};
pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) UnwrappedShuffleOne {
const inst = air.instructions.get(@intFromEnum(inst_index));
switch (inst.tag) {
.shuffle_one => {},
@ -2177,12 +2416,14 @@ pub fn unwrapShuffleOne(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index
};
}
pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) struct {
pub const UnwrappedShuffleTwo = struct {
result_ty: Type,
operand_a: Inst.Ref,
operand_b: Inst.Ref,
mask: []const ShuffleTwoMask,
} {
};
pub fn unwrapShuffleTwo(air: *const Air, zcu: *const Zcu, inst_index: Inst.Index) UnwrappedShuffleTwo {
const inst = air.instructions.get(@intFromEnum(inst_index));
switch (inst.tag) {
.shuffle_two => {},

View file

@ -17,6 +17,7 @@ const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const InternPool = @import("../InternPool.zig");
const Zcu = @import("../Zcu.zig");
const Type = @import("../Type.zig");
pub const Verify = @import("Liveness/Verify.zig");
@ -609,13 +610,11 @@ fn analyzeInst(
},
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const inst_data = inst_datas[@intFromEnum(inst)].pl_op;
const callee = inst_data.operand;
const extra = a.air.extraData(Air.Call, inst_data.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra.end..][0..extra.data.args_len]));
const call = a.air.unwrapCall(inst);
const args = call.args;
if (args.len + 1 <= bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
buf[0] = callee;
buf[0] = call.callee;
@memcpy(buf[1..][0..args.len], args);
return analyzeOperands(a, pass, data, inst, buf);
}
@ -627,7 +626,7 @@ fn analyzeInst(
i -= 1;
try big.feed(args[i]);
}
try big.feed(callee);
try big.feed(call.callee);
return big.finish();
},
.select => {
@ -708,18 +707,15 @@ fn analyzeInst(
.switch_dispatch => return analyzeInstSwitchDispatch(a, pass, data, inst),
.assembly => {
const extra = a.air.extraData(Air.Asm, inst_datas[@intFromEnum(inst)].ty_pl.payload);
const outputs_len = extra.data.flags.outputs_len;
var extra_i: usize = extra.end;
const outputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra_i..][0..outputs_len]));
extra_i += outputs.len;
const inputs = @as([]const Air.Inst.Ref, @ptrCast(a.air.extra.items[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
const unwrapped_asm = a.air.unwrapAsm(inst);
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
const num_operands = simple: {
var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1);
var buf_index: usize = 0;
for (outputs) |output| {
for (unwrapped_asm.outputs) |output| {
if (output != .none) {
if (buf_index < buf.len) buf[buf_index] = output;
buf_index += 1;
@ -748,15 +744,13 @@ fn analyzeInst(
}
return big.finish();
},
inline .block, .dbg_inline_block => |comptime_tag| {
const ty_pl = inst_datas[@intFromEnum(inst)].ty_pl;
const extra = a.air.extraData(switch (comptime_tag) {
.block => Air.Block,
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
return analyzeInstBlock(a, pass, data, inst, ty_pl.ty, @ptrCast(a.air.extra.items[extra.end..][0..extra.data.body_len]));
.dbg_inline_block => {
const block = a.air.unwrapDbgBlock(inst);
return analyzeInstBlock(a, pass, data, inst, block.ty, block.body);
},
.block => {
const block = a.air.unwrapBlock(inst);
return analyzeInstBlock(a, pass, data, inst, block.ty, block.body);
},
.loop => return analyzeInstLoop(a, pass, data, inst),
@ -778,8 +772,8 @@ fn analyzeInst(
},
.legalize_compiler_rt_call => {
const extra = a.air.extraData(Air.Call, inst_datas[@intFromEnum(inst)].legalize_compiler_rt_call.payload);
const args: []const Air.Inst.Ref = @ptrCast(a.air.extra.items[extra.end..][0..extra.data.args_len]);
const rt_call = a.air.unwrapCompilerRtCall(inst);
const args = rt_call.args;
if (args.len <= bpi - 1) {
var buf: [bpi - 1]Air.Inst.Ref = @splat(.none);
@memcpy(buf[0..args.len], args);
@ -972,7 +966,7 @@ fn analyzeInstBlock(
comptime pass: LivenessPass,
data: *LivenessPassData(pass),
inst: Air.Inst.Index,
ty: Air.Inst.Ref,
ty: Type,
body: []const Air.Inst.Index,
) !void {
const gpa = a.gpa;
@ -1005,7 +999,7 @@ fn analyzeInstBlock(
// If the block is noreturn, block deaths not only aren't useful, they're impossible to
// find: there could be more stuff alive after the block than before it!
if (!a.intern_pool.isNoReturn(ty.toType().toIntern())) {
if (!a.intern_pool.isNoReturn(ty.toIntern())) {
// The block kills the difference in the live sets
const block_scope = data.block_scopes.get(inst).?;
const num_deaths = data.live_set.count() - block_scope.live_set.count();
@ -1139,9 +1133,8 @@ fn analyzeInstLoop(
data: *LivenessPassData(pass),
inst: Air.Inst.Index,
) !void {
const inst_datas = a.air.instructions.items(.data);
const extra = a.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(a.air.extra.items[extra.end..][0..extra.data.body_len]);
const block = a.air.unwrapBlock(inst);
const body = block.body;
const gpa = a.gpa;
try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none });
@ -1187,44 +1180,38 @@ fn analyzeInstCondBr(
inst: Air.Inst.Index,
comptime inst_type: enum { cond_br, @"try", try_ptr },
) !void {
const inst_datas = a.air.instructions.items(.data);
const gpa = a.gpa;
const extra = switch (inst_type) {
.cond_br => a.air.extraData(Air.CondBr, inst_datas[@intFromEnum(inst)].pl_op.payload),
.@"try" => a.air.extraData(Air.Try, inst_datas[@intFromEnum(inst)].pl_op.payload),
.try_ptr => a.air.extraData(Air.TryPtr, inst_datas[@intFromEnum(inst)].ty_pl.payload),
const unwrapped_cond = switch (inst_type) {
.cond_br => a.air.unwrapCondBr(inst),
.@"try" => a.air.unwrapTry(inst),
.try_ptr => a.air.unwrapTryPtr(inst),
};
const condition = switch (inst_type) {
.cond_br, .@"try" => inst_datas[@intFromEnum(inst)].pl_op.operand,
.try_ptr => extra.data.ptr,
.cond_br => unwrapped_cond.condition,
.@"try" => unwrapped_cond.error_union,
.try_ptr => unwrapped_cond.error_union_ptr,
};
const then_body: []const Air.Inst.Index = switch (inst_type) {
.cond_br => @ptrCast(a.air.extra.items[extra.end..][0..extra.data.then_body_len]),
else => &.{}, // we won't use this
const then_body = switch (inst_type) {
.cond_br => unwrapped_cond.then_body,
// The "then body" is just the remainder of this block
else => &.{},
};
const else_body: []const Air.Inst.Index = @ptrCast(switch (inst_type) {
.cond_br => a.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len],
.@"try", .try_ptr => a.air.extra.items[extra.end..][0..extra.data.body_len],
});
const else_body = switch (inst_type) {
.cond_br, .@"try", .try_ptr => unwrapped_cond.else_body,
};
switch (pass) {
.loop_analysis => {
switch (inst_type) {
.cond_br => try analyzeBody(a, pass, data, then_body),
.@"try", .try_ptr => {},
}
try analyzeBody(a, pass, data, then_body);
try analyzeBody(a, pass, data, else_body);
},
.main_analysis => {
switch (inst_type) {
.cond_br => try analyzeBody(a, pass, data, then_body),
.@"try", .try_ptr => {}, // The "then body" is just the remainder of this block
}
try analyzeBody(a, pass, data, then_body);
var then_live = data.live_set.move();
defer then_live.deinit(gpa);

View file

@ -345,37 +345,26 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyInst(inst);
},
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const pl_op = data[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @as(
[]const Air.Inst.Ref,
@ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]),
);
const call = self.air.unwrapCall(inst);
const args = call.args;
var bt = self.liveness.iterateBigTomb(inst);
try self.verifyOperand(inst, pl_op.operand, bt.feed());
try self.verifyOperand(inst, call.callee, bt.feed());
for (args) |arg| {
try self.verifyOperand(inst, arg, bt.feed());
}
try self.verifyInst(inst);
},
.assembly => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const outputs_len = extra.data.flags.outputs_len;
var extra_i = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const unwrapped_asm = self.air.unwrapAsm(inst);
var bt = self.liveness.iterateBigTomb(inst);
for (outputs) |output| {
for (unwrapped_asm.outputs) |output| {
if (output != .none) {
try self.verifyOperand(inst, output, bt.feed());
}
}
for (inputs) |input| {
for (unwrapped_asm.inputs) |input| {
try self.verifyOperand(inst, input, bt.feed());
}
try self.verifyInst(inst);
@ -383,13 +372,12 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// control flow
.@"try", .try_cold => {
const pl_op = data[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = self.air.unwrapTry(inst);
const try_body = unwrapped_try.else_body;
const cond_br_liveness = self.liveness.getCondBr(inst);
try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
try self.verifyOperand(inst, unwrapped_try.error_union, self.liveness.operandDies(inst, 0));
var live = try self.live.clone(self.gpa);
defer live.deinit(self.gpa);
@ -405,13 +393,12 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyInst(inst);
},
.try_ptr, .try_ptr_cold => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const try_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = self.air.unwrapTryPtr(inst);
const try_body = unwrapped_try.else_body;
const cond_br_liveness = self.liveness.getCondBr(inst);
try self.verifyOperand(inst, extra.data.ptr, self.liveness.operandDies(inst, 0));
try self.verifyOperand(inst, unwrapped_try.error_union_ptr, self.liveness.operandDies(inst, 0));
var live = try self.live.clone(self.gpa);
defer live.deinit(self.gpa);
@ -458,17 +445,11 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.block, .dbg_inline_block => |tag| {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const block_ty = ty_pl.ty.toType();
const block_body: []const Air.Inst.Index = @ptrCast(switch (tag) {
inline .block, .dbg_inline_block => |comptime_tag| body: {
const extra = self.air.extraData(switch (comptime_tag) {
.block => Air.Block,
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
break :body self.air.extra.items[extra.end..][0..extra.data.body_len];
},
const block_body = switch (tag) {
.block => self.air.unwrapBlock(inst).body,
.dbg_inline_block => self.air.unwrapDbgBlock(inst).body,
else => unreachable,
});
};
const block_liveness = self.liveness.getBlock(inst);
var orig_live = try self.live.clone(self.gpa);
@ -501,9 +482,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyInstOperands(inst, .{ .none, .none, .none });
},
.loop => {
const ty_pl = data[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
const loop_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const block = self.air.unwrapBlock(inst);
// The same stuff should be alive after the loop as before it.
const gop = try self.loops.getOrPut(self.gpa, inst);
@ -514,18 +493,17 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
}
gop.value_ptr.* = try self.live.clone(self.gpa);
try self.verifyBody(loop_body);
try self.verifyBody(block.body);
try self.verifyInstOperands(inst, .{ .none, .none, .none });
},
.cond_br => {
const pl_op = data[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = self.air.unwrapCondBr(inst);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const cond_br_liveness = self.liveness.getCondBr(inst);
try self.verifyOperand(inst, pl_op.operand, self.liveness.operandDies(inst, 0));
try self.verifyOperand(inst, cond_br.condition, self.liveness.operandDies(inst, 0));
var live = try self.live.clone(self.gpa);
defer live.deinit(self.gpa);
@ -589,8 +567,8 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyInstOperands(inst, .{ pl_op.operand, bin.lhs, bin.rhs });
},
.legalize_compiler_rt_call => {
const extra = self.air.extraData(Air.Call, data[@intFromEnum(inst)].legalize_compiler_rt_call.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const rt_call = self.air.unwrapCompilerRtCall(inst);
const args = rt_call.args;
var bt = self.liveness.iterateBigTomb(inst);
for (args) |arg| {
try self.verifyOperand(inst, arg, bt.feed());

View file

@ -395,25 +395,17 @@ const Writer = struct {
fn writeBlock(w: *Writer, s: *std.Io.Writer, tag: Air.Inst.Tag, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
try w.writeType(s, ty_pl.ty.toType());
const body: []const Air.Inst.Index = @ptrCast(switch (tag) {
inline .block, .dbg_inline_block => |comptime_tag| body: {
const extra = w.air.extraData(switch (comptime_tag) {
.block => Air.Block,
.dbg_inline_block => Air.DbgInlineBlock,
else => unreachable,
}, ty_pl.payload);
switch (comptime_tag) {
.block => {},
.dbg_inline_block => {
try s.writeAll(", ");
try w.writeInstRef(s, Air.internedToRef(extra.data.func), false);
},
else => unreachable,
}
break :body w.air.extra.items[extra.end..][0..extra.data.body_len];
const body = switch (tag) {
.block => w.air.unwrapBlock(inst).body,
.dbg_inline_block => body: {
const dbg_block = w.air.unwrapDbgBlock(inst);
try s.writeAll(", ");
try w.writeInstRef(s, Air.internedToRef(dbg_block.func), false);
break :body dbg_block.body;
},
else => unreachable,
});
};
if (w.skip_body) return s.writeAll(", ...");
const liveness_block: Air.Liveness.BlockSlices = if (w.liveness) |liveness|
liveness.getBlock(inst)
@ -434,16 +426,14 @@ const Writer = struct {
}
fn writeLoop(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
const block = w.air.unwrapBlock(inst);
try w.writeType(s, ty_pl.ty.toType());
try w.writeType(s, block.ty);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
w.indent += 2;
try w.writeBody(s, body);
try w.writeBody(s, block.body);
w.indent = old_indent;
try s.splatByteAll(' ', w.indent);
try s.writeAll("}");
@ -532,11 +522,10 @@ const Writer = struct {
}
fn writeLegalizeCompilerRtCall(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const inst_data = w.air.instructions.items(.data)[@intFromEnum(inst)].legalize_compiler_rt_call;
const extra = w.air.extraData(Air.Call, inst_data.payload);
const args: []const Air.Inst.Ref = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.args_len]);
const rt_call = w.air.unwrapCompilerRtCall(inst);
const args = rt_call.args;
try s.print("{t}, [", .{inst_data.func});
try s.print("{t}, [", .{rt_call.func});
for (args, 0..) |arg, i| {
if (i != 0) try s.writeAll(", ");
try w.writeOperand(s, inst, i, arg);
@ -666,11 +655,8 @@ const Writer = struct {
}
fn writeAssembly(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = extra.data.flags.is_volatile;
const outputs_len = extra.data.flags.outputs_len;
var extra_i: usize = extra.end;
const unwrapped_asm = w.air.unwrapAsm(inst);
const is_volatile = unwrapped_asm.is_volatile;
var op_index: usize = 0;
const ret_ty = w.typeOfIndex(inst);
@ -680,49 +666,33 @@ const Writer = struct {
try s.writeAll(", volatile");
}
const outputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra_i..][0..outputs_len]));
extra_i += outputs.len;
const inputs = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra_i..][0..extra.data.inputs_len]));
extra_i += inputs.len;
for (outputs) |output| {
const extra_bytes = std.mem.sliceAsBytes(w.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the strings and their null terminators, we still use the next u32
// for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (output == .none) {
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |out| {
const name = out.name;
const constraint = out.constraint;
if (out.operand == .none) {
try s.print(", [{s}] -> {s}", .{ name, constraint });
} else {
try s.print(", [{s}] out {s} = (", .{ name, constraint });
try w.writeOperand(s, inst, op_index, output);
try w.writeOperand(s, inst, op_index, out.operand);
op_index += 1;
try s.writeByte(')');
}
}
for (inputs) |input| {
const extra_bytes = std.mem.sliceAsBytes(w.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the strings and their null terminators, we still use the next u32
// for the null terminator.
extra_i += (constraint.len + name.len + 1) / 4 + 1;
it = unwrapped_asm.iterateInputs();
while (it.next()) |in| {
const name = in.name;
const constraint = in.constraint;
try s.print(", [{s}] in {s} = (", .{ name, constraint });
try w.writeOperand(s, inst, op_index, input);
try w.writeOperand(s, inst, op_index, in.operand);
op_index += 1;
try s.writeByte(')');
}
const zcu = w.pt.zcu;
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(extra.data.clobbers).aggregate;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| {
@ -750,7 +720,7 @@ const Writer = struct {
try s.print(", {x}", .{bytes});
},
}
const asm_source = std.mem.sliceAsBytes(w.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
try s.print(", \"{f}\"", .{std.zig.fmtString(asm_source)});
}
@ -767,10 +737,9 @@ const Writer = struct {
}
fn writeCall(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Call, pl_op.payload);
const args = @as([]const Air.Inst.Ref, @ptrCast(w.air.extra.items[extra.end..][0..extra.data.args_len]));
try w.writeOperand(s, inst, 0, pl_op.operand);
const call = w.air.unwrapCall(inst);
const args = call.args;
try w.writeOperand(s, inst, 0, call.callee);
try s.writeAll(", [");
for (args, 0..) |arg, i| {
if (i != 0) try s.writeAll(", ");
@ -792,15 +761,14 @@ const Writer = struct {
}
fn writeTry(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = w.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
.{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
try w.writeOperand(s, inst, 0, unwrapped_try.error_union);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
@ -826,18 +794,17 @@ const Writer = struct {
}
fn writeTryPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ty_pl = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = w.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = w.air.unwrapTryPtr(inst);
const body = unwrapped_try.else_body;
const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
.{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, extra.data.ptr);
try w.writeOperand(s, inst, 0, unwrapped_try.error_union_ptr);
try s.writeAll(", ");
try w.writeType(s, ty_pl.ty.toType());
try w.writeType(s, unwrapped_try.error_union_payload_ptr_ty.toType());
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(", {\n");
const old_indent = w.indent;
@ -863,23 +830,22 @@ const Writer = struct {
}
fn writeCondBr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = w.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(w.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = w.air.unwrapCondBr(inst);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const liveness_condbr: Air.Liveness.CondBrSlices = if (w.liveness) |liveness|
liveness.getCondBr(inst)
else
.{ .then_deaths = &.{}, .else_deaths = &.{} };
try w.writeOperand(s, inst, 0, pl_op.operand);
try w.writeOperand(s, inst, 0, cond_br.condition);
if (w.skip_body) return s.writeAll(", ...");
try s.writeAll(",");
if (extra.data.branch_hints.true != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.true)});
if (cond_br.branch_hints.true != .none) {
try s.print(" {s}", .{@tagName(cond_br.branch_hints.true)});
}
if (extra.data.branch_hints.then_cov != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.then_cov)});
if (cond_br.branch_hints.then_cov != .none) {
try s.print(" {s}", .{@tagName(cond_br.branch_hints.then_cov)});
}
try s.writeAll(" {\n");
const old_indent = w.indent;
@ -897,11 +863,11 @@ const Writer = struct {
try w.writeBody(s, then_body);
try s.splatByteAll(' ', old_indent);
try s.writeAll("},");
if (extra.data.branch_hints.false != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.false)});
if (cond_br.branch_hints.false != .none) {
try s.print(" {s}", .{@tagName(cond_br.branch_hints.false)});
}
if (extra.data.branch_hints.else_cov != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.else_cov)});
if (cond_br.branch_hints.else_cov != .none) {
try s.print(" {s}", .{@tagName(cond_br.branch_hints.else_cov)});
}
try s.writeAll(" {\n");

View file

@ -170,21 +170,21 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
.block,
.loop,
=> {
const extra = air.extraData(Air.Block, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
const block = air.unwrapBlock(inst);
if (!checkType(block.ty, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
block.body,
zcu,
)) return false;
},
.dbg_inline_block => {
const extra = air.extraData(Air.DbgInlineBlock, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
const block = air.unwrapDbgBlock(inst);
if (!checkType(block.ty, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
block.body,
zcu,
)) return false;
},
@ -342,9 +342,9 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
.call_never_tail,
.call_never_inline,
=> {
const extra = air.extraData(Air.Call, data.pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]);
if (!checkRef(data.pl_op.operand, zcu)) return false;
const call = air.unwrapCall(inst);
const args = call.args;
if (!checkRef(call.callee, zcu)) return false;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},
@ -356,37 +356,37 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
},
.@"try", .try_cold => {
const extra = air.extraData(Air.Try, data.pl_op.payload);
if (!checkRef(data.pl_op.operand, zcu)) return false;
const unwrapped_try = air.unwrapTry(inst);
if (!checkRef(unwrapped_try.error_union, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
unwrapped_try.else_body,
zcu,
)) return false;
},
.try_ptr, .try_ptr_cold => {
const extra = air.extraData(Air.TryPtr, data.ty_pl.payload);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
if (!checkRef(extra.data.ptr, zcu)) return false;
const unwrapped_try = air.unwrapTryPtr(inst);
if (!checkType(unwrapped_try.error_union_payload_ptr_ty.toType(), zcu)) return false;
if (!checkRef(unwrapped_try.error_union_ptr, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end..][0..extra.data.body_len]),
unwrapped_try.else_body,
zcu,
)) return false;
},
.cond_br => {
const extra = air.extraData(Air.CondBr, data.pl_op.payload);
if (!checkRef(data.pl_op.operand, zcu)) return false;
const cond_br = air.unwrapCondBr(inst);
if (!checkRef(cond_br.condition, zcu)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end..][0..extra.data.then_body_len]),
cond_br.then_body,
zcu,
)) return false;
if (!checkBody(
air,
@ptrCast(air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]),
cond_br.else_body,
zcu,
)) return false;
},
@ -407,20 +407,20 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
},
.assembly => {
const extra = air.extraData(Air.Asm, data.ty_pl.payload);
const unwrapped_asm = air.unwrapAsm(inst);
if (!checkType(data.ty_pl.ty.toType(), zcu)) return false;
// Luckily, we only care about the inputs and outputs, so we don't have to do
// the whole null-terminated string dance.
const outputs_len = extra.data.flags.outputs_len;
const outputs: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..outputs_len]);
const inputs: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end + outputs_len ..][0..extra.data.inputs_len]);
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
for (outputs) |output| if (output != .none and !checkRef(output, zcu)) return false;
for (inputs) |input| if (input != .none and !checkRef(input, zcu)) return false;
},
.legalize_compiler_rt_call => {
const extra = air.extraData(Air.Call, data.legalize_compiler_rt_call.payload);
const args: []const Air.Inst.Ref = @ptrCast(air.extra.items[extra.end..][0..extra.data.args_len]);
const rt_call = air.unwrapCompilerRtCall(inst);
const args = rt_call.args;
for (args) |arg| if (!checkRef(arg, zcu)) return false;
},

View file

@ -16226,6 +16226,12 @@ fn zirAsm(
});
sema.appendRefsAssumeCapacity(out_args);
sema.appendRefsAssumeCapacity(args);
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
buffer[asm_source.len] = 0;
sema.air_extra.items.len += asm_source.len / 4 + 1;
}
for (outputs) |o| {
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..o.c.len], o.c);
@ -16242,12 +16248,6 @@ fn zirAsm(
buffer[input.c.len + 1 + input.n.len] = 0;
sema.air_extra.items.len += (input.c.len + input.n.len + (2 + 3)) / 4;
}
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
buffer[asm_source.len] = 0;
sema.air_extra.items.len += asm_source.len / 4 + 1;
}
return asm_air;
}

View file

@ -274,10 +274,9 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
},
.assembly => {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
const operands: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0 .. extra.data.flags.outputs_len + extra.data.inputs_len]);
const unwrapped_asm = isel.air.unwrapAsm(air_inst_index);
for (operands) |operand| if (operand != .none) try isel.analyzeUse(operand);
for (unwrapped_asm.outputs) |operand| if (operand != .none) try isel.analyzeUse(operand);
if (ty_pl.ty != .void_type) try isel.def_order.putNoClobber(gpa, air_inst_index, {});
air_body_index += 1;
@ -355,23 +354,23 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
inline .block, .dbg_inline_block => |air_tag| {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(switch (air_tag) {
const air_body_block = switch (air_tag) {
else => comptime unreachable,
.block => Air.Block,
.dbg_inline_block => Air.DbgInlineBlock,
}, ty_pl.payload);
const result_ty = ty_pl.ty.toInterned().?;
.block => isel.air.unwrapBlock(air_inst_index),
.dbg_inline_block => isel.air.unwrapDbgBlock(air_inst_index),
};
const result_ty = air_body_block.ty.toIntern();
if (result_ty == .noreturn_type) {
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.analyze(air_body_block.body);
air_body_index += 1;
break :air_tag;
}
assert(!(try isel.blocks.getOrPut(gpa, air_inst_index)).found_existing);
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.analyze(air_body_block.body);
const block_entry = isel.blocks.pop().?;
assert(block_entry.key == air_inst_index);
@ -382,8 +381,7 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.loop => {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(Air.Block, ty_pl.payload);
const air_body_block = isel.air.unwrapBlock(air_inst_index);
const initial_dom_start = isel.dom_start;
const initial_dom_len = isel.dom_len;
@ -399,7 +397,7 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
.repeat_list = undefined,
});
try isel.dom.appendNTimes(gpa, 0, std.math.divCeil(usize, isel.dom_len, @bitSizeOf(DomInt)) catch unreachable);
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.analyze(air_body_block.body);
for (
isel.dom.items[initial_dom_start..].ptr,
isel.dom.items[isel.dom_start..][0 .. std.math.divCeil(usize, initial_dom_len, @bitSizeOf(DomInt)) catch unreachable],
@ -429,18 +427,17 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
.call_never_tail,
.call_never_inline,
=> {
const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
const extra = isel.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
const air_call = isel.air.unwrapCall(air_inst_index);
const args = air_call.args;
isel.saved_registers.insert(.lr);
const callee_ty = isel.air.typeOf(pl_op.operand, ip);
const callee_ty = isel.air.typeOf(air_call.callee, ip);
const func_info = switch (ip.indexToKey(callee_ty.toIntern())) {
else => unreachable,
.func_type => |func_type| func_type,
.ptr_type => |ptr_type| ip.indexToKey(ptr_type.child).func_type,
};
try isel.analyzeUse(pl_op.operand);
try isel.analyzeUse(air_call.callee);
var param_it: CallAbiIterator = .init;
for (args, 0..) |arg, arg_index| {
const restore_values_len = isel.values.items.len;
@ -549,13 +546,12 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.cond_br => {
const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
const cond_br = isel.air.unwrapCondBr(air_inst_index);
try isel.analyzeUse(pl_op.operand);
try isel.analyzeUse(cond_br.condition);
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
try isel.analyze(cond_br.then_body);
try isel.analyze(cond_br.else_body);
air_body_index += 1;
},
@ -610,11 +606,10 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
air_body_index += 1;
},
.@"try", .try_cold => {
const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
const extra = isel.air.extraData(Air.Try, pl_op.payload);
const unwrapped_try = isel.air.unwrapTry(air_inst_index);
try isel.analyzeUse(pl_op.operand);
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.analyzeUse(unwrapped_try.error_union);
try isel.analyze(unwrapped_try.else_body);
try isel.def_order.putNoClobber(gpa, air_inst_index, {});
air_body_index += 1;
@ -622,11 +617,10 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.try_ptr, .try_ptr_cold => {
const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
const extra = isel.air.extraData(Air.TryPtr, ty_pl.payload);
const unwrapped_try = isel.air.unwrapTryPtr(air_inst_index);
try isel.analyzeUse(extra.data.ptr);
try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.analyzeUse(unwrapped_try.error_union_ptr);
try isel.analyze(unwrapped_try.else_body);
try isel.def_order.putNoClobber(gpa, air_inst_index, {});
air_body_index += 1;
@ -2698,12 +2692,8 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.assembly => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
var extra_index = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.flags.outputs_len]);
extra_index += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.inputs_len]);
extra_index += inputs.len;
const unwrapped_asm = isel.air.unwrapAsm(air.inst_index);
const inputs = unwrapped_asm.inputs;
var as: codegen.aarch64.Assemble = .{
.source = undefined,
@ -2711,15 +2701,12 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
};
defer as.operands.deinit(gpa);
for (outputs) |output| {
const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_index += (constraint.len + name.len + (2 + 3)) / 4;
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
const name = output.name;
switch (output) {
switch (output.operand) {
else => return isel.fail("invalid constraint: '{s}'", .{constraint}),
.none => if (std.mem.startsWith(u8, constraint, "={") and std.mem.endsWith(u8, constraint, "}")) {
const output_reg = Register.parse(constraint["={".len .. constraint.len - "}".len]) orelse
@ -2760,54 +2747,51 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
const input_mats = try gpa.alloc(Value.Materialize, inputs.len);
var index: u32 = 0;
defer gpa.free(input_mats);
const inputs_extra_index = extra_index;
for (inputs, input_mats) |input, *input_mat| {
const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_index += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| : (index += 1) {
const constraint = input.constraint;
const name = input.name;
if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
const input_reg = Register.parse(constraint["{".len .. constraint.len - "}".len]) orelse
return isel.fail("invalid constraint: '{s}'", .{constraint});
input_mat.* = .{ .vi = try isel.use(input), .ra = input_reg.alias };
input_mats[index] = .{ .vi = try isel.use(input.operand), .ra = input_reg.alias };
if (!std.mem.eql(u8, name, "_")) {
const operand_gop = try as.operands.getOrPut(gpa, name);
if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
const input_ty = isel.air.typeOf(input, ip);
const input_ty = isel.air.typeOf(input.operand, ip);
operand_gop.value_ptr.* = .{ .register = switch (input_ty.abiSize(zcu)) {
0 => unreachable,
1...4 => input_reg.alias.w(),
5...8 => input_reg.alias.x(),
else => return isel.fail("too big input type: '{f}'", .{
isel.fmtType(isel.air.typeOf(input, ip)),
isel.fmtType(isel.air.typeOf(input.operand, ip)),
}),
} };
}
} else if (std.mem.eql(u8, constraint, "r")) {
const input_vi = try isel.use(input);
input_mat.* = try input_vi.matReg(isel);
const input_vi = try isel.use(input.operand);
input_mats[index] = try input_vi.matReg(isel);
if (!std.mem.eql(u8, name, "_")) {
const operand_gop = try as.operands.getOrPut(gpa, name);
if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
operand_gop.value_ptr.* = .{ .register = switch (input_vi.size(isel)) {
0 => unreachable,
1...4 => input_mat.ra.w(),
5...8 => input_mat.ra.x(),
1...4 => input_mats[index].ra.w(),
5...8 => input_mats[index].ra.x(),
else => return isel.fail("too big input type: '{f}'", .{
isel.fmtType(isel.air.typeOf(input, ip)),
isel.fmtType(isel.air.typeOf(input.operand, ip)),
}),
} };
}
} else if (std.mem.eql(u8, name, "_")) {
input_mat.vi = try isel.use(input);
input_mats[index].vi = try isel.use(input.operand);
} else return isel.fail("invalid constraint: '{s}'", .{constraint});
}
const clobbers = ip.indexToKey(extra.data.clobbers).aggregate;
const clobbers = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const clobbers_ty: ZigType = .fromInterned(clobbers.ty);
for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
switch (switch (clobbers.storage) {
@ -2858,7 +2842,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}
}
as.source = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..])[0..extra.data.source_len :0];
as.source = unwrapped_asm.source;
const asm_start = isel.instructions.items.len;
while (as.nextInstruction() catch |err| switch (err) {
error.InvalidSyntax => {
@ -2872,21 +2856,18 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
}) |instruction| try isel.emit(instruction);
std.mem.reverse(codegen.aarch64.encoding.Instruction, isel.instructions.items[asm_start..]);
extra_index = inputs_extra_index;
for (input_mats) |input_mat| {
const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_index += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
index = 0;
while (it.next()) |input| : (index += 1) {
const constraint = input.constraint;
const name = input.name;
if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
try input_mat.vi.liveOut(isel, input_mat.ra);
try input_mats[index].vi.liveOut(isel, input_mats[index].ra);
} else if (std.mem.eql(u8, constraint, "r")) {
try input_mat.finish(isel);
try input_mats[index].finish(isel);
} else if (std.mem.eql(u8, name, "_")) {
try input_mat.vi.mat(isel);
try input_mats[index].vi.mat(isel);
} else unreachable;
}
@ -3515,16 +3496,16 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.block => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.Block, ty_pl.payload);
try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
isel.air.extra.items[extra.end..][0..extra.data.body_len],
));
const unwrapped_block = isel.air.unwrapBlock(air.inst_index);
try isel.block(
air.inst_index,
unwrapped_block.ty,
unwrapped_block.body,
);
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.loop => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.Block, ty_pl.payload);
const unwrapped_block = isel.air.unwrapBlock(air.inst_index);
const loops = isel.loops.values();
const loop_index = isel.loops.getIndex(air.inst_index).?;
const loop = &loops[loop_index];
@ -3558,7 +3539,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
loop.live_registers = isel.live_registers;
loop.repeat_list = Loop.empty_list;
try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.body(unwrapped_block.body);
try isel.merge(&loop.live_registers, .{ .fill_extra = true });
var repeat_label = loop.repeat_list;
@ -3608,10 +3589,9 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.call => {
const pl_op = air.data(air.inst_index).pl_op;
const extra = isel.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
const callee_ty = isel.air.typeOf(pl_op.operand, ip);
const air_call = isel.air.unwrapCall(air.inst_index);
const args = air_call.args;
const callee_ty = isel.air.typeOf(air_call.callee, ip);
const func_info = switch (ip.indexToKey(callee_ty.toIntern())) {
else => unreachable,
.func_type => |func_type| func_type,
@ -3649,7 +3629,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
try call.finishReturn(isel);
try call.prepareCallee(isel);
if (pl_op.operand.toInterned()) |ct_callee| {
if (air_call.callee.toInterned()) |ct_callee| {
try isel.nav_relocs.append(gpa, switch (ip.indexToKey(ct_callee)) {
else => unreachable,
inline .@"extern", .func => |func| .{
@ -3666,7 +3646,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
});
try isel.emit(.bl(0));
} else {
const callee_vi = try isel.use(pl_op.operand);
const callee_vi = try isel.use(air_call.callee);
const callee_mat = try callee_vi.matReg(isel);
try isel.emit(.blr(callee_mat.ra.x()));
try callee_mat.finish(isel);
@ -4523,16 +4503,15 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.cond_br => {
const pl_op = air.data(air.inst_index).pl_op;
const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
const cond_br = isel.air.unwrapCondBr(air.inst_index);
try isel.body(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
try isel.body(cond_br.then_body);
const else_label = isel.instructions.items.len;
const else_live_registers = isel.live_registers;
try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
try isel.body(cond_br.else_body);
try isel.merge(&else_live_registers, .{});
const cond_vi = try isel.use(pl_op.operand);
const cond_vi = try isel.use(cond_br.condition);
const cond_mat = try cond_vi.matReg(isel);
try isel.emit(.tbz(
cond_mat.ra.x(),
@ -4819,13 +4798,12 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.@"try", .try_cold => {
const pl_op = air.data(air.inst_index).pl_op;
const extra = isel.air.extraData(Air.Try, pl_op.payload);
const error_union_ty = isel.air.typeOf(pl_op.operand, ip);
const unwrapped_try = isel.air.unwrapTry(air.inst_index);
const error_union_ty = isel.air.typeOf(unwrapped_try.error_union, &zcu.intern_pool);
const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
const error_union_vi = try isel.use(pl_op.operand);
const error_union_vi = try isel.use(unwrapped_try.error_union);
if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| {
defer payload_vi.value.deref(isel);
@ -4840,7 +4818,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
const cont_label = isel.instructions.items.len;
const cont_live_registers = isel.live_registers;
try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.body(unwrapped_try.else_body);
try isel.merge(&cont_live_registers, .{});
var error_set_part_it = error_union_vi.field(
@ -4859,18 +4837,17 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.try_ptr, .try_ptr_cold => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.TryPtr, ty_pl.payload);
const error_union_ty = isel.air.typeOf(extra.data.ptr, ip).childType(zcu);
const unwrapped_try = isel.air.unwrapTryPtr(air.inst_index);
const error_union_ty = isel.air.typeOf(unwrapped_try.error_union_ptr, ip).childType(zcu);
const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
const error_union_ptr_vi = try isel.use(extra.data.ptr);
const error_union_ptr_vi = try isel.use(unwrapped_try.error_union_ptr);
const error_union_ptr_mat = try error_union_ptr_vi.matReg(isel);
if (isel.live_values.fetchRemove(air.inst_index)) |payload_ptr_vi| unused: {
defer payload_ptr_vi.value.deref(isel);
switch (codegen.errUnionPayloadOffset(ty_pl.ty.toType().childType(zcu), zcu)) {
0 => try payload_ptr_vi.value.move(isel, extra.data.ptr),
switch (codegen.errUnionPayloadOffset(unwrapped_try.error_union_payload_ptr_ty.toType().childType(zcu), zcu)) {
0 => try payload_ptr_vi.value.move(isel, unwrapped_try.error_union_ptr),
else => |payload_offset| {
const payload_ptr_ra = try payload_ptr_vi.value.defReg(isel) orelse break :unused;
const lo12: u12 = @truncate(payload_offset >> 0);
@ -4887,7 +4864,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
const cont_label = isel.instructions.items.len;
const cont_live_registers = isel.live_registers;
try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
try isel.body(unwrapped_try.else_body);
try isel.merge(&cont_live_registers, .{});
const error_set_ra = try isel.allocIntReg();
@ -4913,11 +4890,8 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory,
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.dbg_inline_block => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
isel.air.extra.items[extra.end..][0..extra.data.body_len],
));
const dbg_block = isel.air.unwrapDbgBlock(air.inst_index);
try isel.block(air.inst_index, dbg_block.ty, dbg_block.body);
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => {

View file

@ -4622,9 +4622,8 @@ fn airCall(
const gpa = f.object.dg.gpa;
const w = &f.object.code.writer;
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.args_len]);
const call = f.air.unwrapCall(inst);
const args = call.args;
const resolved_args = try gpa.alloc(CValue, args.len);
defer gpa.free(resolved_args);
@ -4653,15 +4652,15 @@ fn airCall(
}
}
const callee = try f.resolveInst(pl_op.operand);
const callee = try f.resolveInst(call.callee);
{
var bt = iterateBigTomb(f, inst);
try bt.feed(pl_op.operand);
try bt.feed(call.callee);
for (args) |arg| try bt.feed(arg);
}
const callee_ty = f.typeOf(pl_op.operand);
const callee_ty = f.typeOf(call.callee);
const callee_is_ptr = switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => false,
.pointer => true,
@ -4698,7 +4697,7 @@ fn airCall(
callee: {
known: {
const callee_val = (try f.air.value(pl_op.operand, pt)) orelse break :known;
const callee_val = (try f.air.value(call.callee, pt)) orelse break :known;
const fn_nav, const need_cast = switch (ip.indexToKey(callee_val.toIntern())) {
.@"extern" => |@"extern"| .{ @"extern".owner_nav, false },
.func => |func| .{ func.owner_nav, Type.fromInterned(func.ty).fnCallingConvention(zcu) != .naked and
@ -4796,13 +4795,12 @@ fn airDbgInlineBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const owner_nav = ip.getNav(zcu.funcInfo(extra.data.func).owner_nav);
const block = f.air.unwrapDbgBlock(inst);
const owner_nav = ip.getNav(zcu.funcInfo(block.func).owner_nav);
const w = &f.object.code.writer;
try w.print("/* inline:{f} */", .{owner_nav.fqn.fmt(&zcu.intern_pool)});
try f.object.newline();
return lowerBlock(f, inst, @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]));
return lowerBlock(f, inst, block.body);
}
fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
@ -4822,9 +4820,8 @@ fn airDbgVar(f: *Function, inst: Air.Inst.Index) !CValue {
}
fn airBlock(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Block, ty_pl.payload);
return lowerBlock(f, inst, @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = f.air.unwrapBlock(inst);
return lowerBlock(f, inst, block.body);
}
fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index) !CValue {
@ -4873,21 +4870,19 @@ fn lowerBlock(f: *Function, inst: Air.Inst.Index, body: []const Air.Inst.Index)
}
fn airTry(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = f.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = f.typeOf(pl_op.operand);
return lowerTry(f, inst, pl_op.operand, body, err_union_ty, false);
const pt = f.object.dg.pt;
const unwrapped_try = f.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const err_union_ty = f.air.typeOf(unwrapped_try.error_union, &pt.zcu.intern_pool);
return lowerTry(f, inst, unwrapped_try.error_union, body, err_union_ty, false);
}
fn airTryPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = f.typeOf(extra.data.ptr).childType(zcu);
return lowerTry(f, inst, extra.data.ptr, body, err_union_ty, true);
const unwrapped_try = f.air.unwrapTryPtr(inst);
const body = unwrapped_try.else_body;
const err_union_ty = f.air.typeOf(unwrapped_try.error_union_ptr, &pt.zcu.intern_pool).childType(pt.zcu);
return lowerTry(f, inst, unwrapped_try.error_union_ptr, body, err_union_ty, true);
}
fn lowerTry(
@ -5216,9 +5211,7 @@ fn airUnreach(o: *Object) !void {
}
fn airLoop(f: *Function, inst: Air.Inst.Index) !void {
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = f.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[loop.end..][0..loop.data.body_len]);
const block = f.air.unwrapBlock(inst);
const w = &f.object.code.writer;
// `repeat` instructions matching this loop will branch to
@ -5227,16 +5220,15 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !void {
// construct at all!
try w.print("zig_loop_{d}:", .{@intFromEnum(inst)});
try f.object.newline();
try genBodyInner(f, body); // no need to restore state, we're noreturn
try genBodyInner(f, block.body); // no need to restore state, we're noreturn
}
fn airCondBr(f: *Function, inst: Air.Inst.Index) !void {
const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try f.resolveInst(pl_op.operand);
try reap(f, inst, &.{pl_op.operand});
const extra = f.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(f.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = f.air.unwrapCondBr(inst);
const cond = try f.resolveInst(cond_br.condition);
try reap(f, inst, &.{cond_br.condition});
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const liveness_condbr = f.liveness.getCondBr(inst);
const w = &f.object.code.writer;
@ -5439,16 +5431,11 @@ fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool
fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;
const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = f.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = extra.data.flags.is_volatile;
const outputs_len = extra.data.flags.outputs_len;
const unwrapped_asm = f.air.unwrapAsm(inst);
const is_volatile = unwrapped_asm.is_volatile;
const gpa = f.object.dg.gpa;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(f.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
const result = result: {
const w = &f.object.code.writer;
@ -5469,14 +5456,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
} else .none;
const locals_begin: LocalIndex = @intCast(f.locals.items.len);
const constraints_extra_begin = extra_i;
for (outputs) |output| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
if (constraint.len < 2 or constraint[0] != '=' or
(constraint[1] == '{' and constraint[constraint.len - 1] != '}'))
@ -5486,7 +5468,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
const is_reg = constraint[1] == '{';
if (is_reg) {
const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(zcu);
const output_ty = if (output.operand == .none) inst_ty else f.typeOf(output.operand).childType(zcu);
try w.writeAll("register ");
const output_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(output_ty, .complete),
@ -5505,13 +5487,10 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try f.object.newline();
}
}
for (inputs) |input| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
if (constraint.len < 1 or mem.indexOfScalar(u8, "=+&%", constraint[0]) != null or
(constraint[0] == '{' and constraint[constraint.len - 1] != '}'))
@ -5520,9 +5499,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
const is_reg = constraint[0] == '{';
const input_val = try f.resolveInst(input);
const input_val = try f.resolveInst(input.operand);
if (asmInputNeedsLocal(f, constraint, input_val)) {
const input_ty = f.typeOf(input);
const input_ty = f.typeOf(input.operand);
if (is_reg) try w.writeAll("register ");
const input_local = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(input_ty, .complete),
@ -5545,7 +5524,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
{
const asm_source = mem.sliceAsBytes(f.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
var stack = std.heap.stackFallback(256, f.object.dg.gpa);
const allocator = stack.get();
@ -5599,18 +5578,15 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try w.print("({f}", .{fmtStringLiteral(fixed_asm_source[0..dst_i], null)});
}
extra_i = constraints_extra_begin;
var locals_index = locals_begin;
try w.writeByte(':');
for (outputs, 0..) |output, index| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (index > 0) try w.writeByte(',');
it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
const name = output.name;
if (output.index > 0) try w.writeByte(',');
try w.writeByte(' ');
if (!mem.eql(u8, name, "_")) try w.print("[{s}]", .{name});
const is_reg = constraint[1] == '{';
@ -5618,28 +5594,26 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
if (is_reg) {
try f.writeCValue(w, .{ .local = locals_index }, .Other);
locals_index += 1;
} else if (output == .none) {
} else if (output.operand == .none) {
try f.writeCValue(w, inst_local, .FunctionArgument);
} else {
try f.writeCValueDeref(w, try f.resolveInst(output));
try f.writeCValueDeref(w, try f.resolveInst(output.operand));
}
try w.writeByte(')');
}
try w.writeByte(':');
for (inputs, 0..) |input, index| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (index > 0) try w.writeByte(',');
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
const name = input.name;
if (input.index > 0) try w.writeByte(',');
try w.writeByte(' ');
if (!mem.eql(u8, name, "_")) try w.print("[{s}]", .{name});
const is_reg = constraint[0] == '{';
const input_val = try f.resolveInst(input);
const input_val = try f.resolveInst(input.operand);
try w.print("{f}(", .{fmtStringLiteral(if (is_reg) "r" else constraint, null)});
try f.writeCValue(w, if (asmInputNeedsLocal(f, constraint, input_val)) local: {
const input_local_idx = locals_index;
@ -5650,7 +5624,7 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
}
try w.writeByte(':');
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(extra.data.clobbers).aggregate;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| switch (elem) {
@ -5697,22 +5671,17 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
try w.writeAll(");");
try f.object.newline();
extra_i = constraints_extra_begin;
locals_index = locals_begin;
for (outputs) |output| {
const extra_bytes = mem.sliceAsBytes(f.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(extra_bytes, 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
const is_reg = constraint[1] == '{';
if (is_reg) {
try f.writeCValueDeref(w, if (output == .none)
try f.writeCValueDeref(w, if (output.operand == .none)
.{ .local_ref = inst_local.new_local }
else
try f.resolveInst(output));
try f.resolveInst(output.operand));
try w.writeAll(" = ");
try f.writeCValue(w, .{ .local = locals_index }, .Other);
locals_index += 1;

View file

@ -5258,14 +5258,13 @@ pub const FuncGen = struct {
};
fn airCall(self: *FuncGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const air_call = self.air.unwrapCall(inst);
const args = air_call.args;
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const callee_ty = self.typeOf(pl_op.operand);
const callee_ty = self.typeOf(air_call.callee);
const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => callee_ty,
.pointer => callee_ty.childType(zcu),
@ -5273,7 +5272,7 @@ pub const FuncGen = struct {
};
const fn_info = zcu.typeToFunc(zig_fn_ty).?;
const return_type = Type.fromInterned(fn_info.return_type);
const llvm_fn = try self.resolveInst(pl_op.operand);
const llvm_fn = try self.resolveInst(air_call.callee);
const target = zcu.getTarget();
const sret = firstParamSRet(fn_info, zcu, target);
@ -5934,9 +5933,8 @@ pub const FuncGen = struct {
}
fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
return self.lowerBlock(inst, null, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = self.air.unwrapBlock(inst);
return self.lowerBlock(inst, null, block.body);
}
fn lowerBlock(
@ -6216,11 +6214,10 @@ pub const FuncGen = struct {
}
fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = self.air.unwrapCondBr(inst);
const cond = try self.resolveInst(cond_br.condition);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const Hint = enum {
none,
@ -6230,22 +6227,22 @@ pub const FuncGen = struct {
then_cold,
else_cold,
};
const hint: Hint = switch (extra.data.branch_hints.true) {
.none => switch (extra.data.branch_hints.false) {
const hint: Hint = switch (cond_br.branch_hints.true) {
.none => switch (cond_br.branch_hints.false) {
.none => .none,
.likely => .else_likely,
.unlikely => .then_likely,
.cold => .else_cold,
.unpredictable => .unpredictable,
},
.likely => switch (extra.data.branch_hints.false) {
.likely => switch (cond_br.branch_hints.false) {
.none => .then_likely,
.likely => .unpredictable,
.unlikely => .then_likely,
.cold => .else_cold,
.unpredictable => .unpredictable,
},
.unlikely => switch (extra.data.branch_hints.false) {
.unlikely => switch (cond_br.branch_hints.false) {
.none => .else_likely,
.likely => .else_likely,
.unlikely => .unpredictable,
@ -6267,35 +6264,33 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = then_block };
if (hint == .then_cold) _ = try self.wip.callIntrinsicAssumeCold();
try self.genBodyDebugScope(null, then_body, extra.data.branch_hints.then_cov);
try self.genBodyDebugScope(null, then_body, cond_br.branch_hints.then_cov);
self.wip.cursor = .{ .block = else_block };
if (hint == .else_cold) _ = try self.wip.callIntrinsicAssumeCold();
try self.genBodyDebugScope(null, else_body, extra.data.branch_hints.else_cov);
try self.genBodyDebugScope(null, else_body, cond_br.branch_hints.else_cov);
// No need to reset the insert cursor since this instruction is noreturn.
}
fn airTry(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(pl_op.operand);
const unwrapped_try = self.air.unwrapTry(inst);
const err_union = try self.resolveInst(unwrapped_try.error_union);
const body = unwrapped_try.else_body;
const err_union_ty = self.typeOf(unwrapped_try.error_union);
const is_unused = self.liveness.isUnused(inst);
return lowerTry(self, err_union, body, err_union_ty, false, false, is_unused, err_cold);
}
fn airTryPtr(self: *FuncGen, inst: Air.Inst.Index, err_cold: bool) !Builder.Value {
const zcu = self.ng.pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try self.resolveInst(extra.data.ptr);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = self.typeOf(extra.data.ptr).childType(zcu);
const unwrapped_try = self.air.unwrapTryPtr(inst);
const err_union_ptr = try self.resolveInst(unwrapped_try.error_union_ptr);
const body = unwrapped_try.else_body;
const err_union_ty = self.typeOf(unwrapped_try.error_union_ptr).childType(zcu);
const is_unused = self.liveness.isUnused(inst);
self.maybeMarkAllowZeroAccess(self.typeOf(extra.data.ptr).ptrInfo(zcu));
self.maybeMarkAllowZeroAccess(self.typeOf(unwrapped_try.error_union_ptr).ptrInfo(zcu));
return lowerTry(self, err_union_ptr, body, err_union_ty, true, true, is_unused, err_cold);
}
@ -6627,9 +6622,8 @@ pub const FuncGen = struct {
}
fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end..][0..loop.data.body_len]);
const block = self.air.unwrapBlock(inst);
const body = block.body;
const loop_block = try self.wip.block(1, "Loop"); // `airRepeat` will increment incoming each time
_ = try self.wip.br(loop_block);
@ -7137,10 +7131,9 @@ pub const FuncGen = struct {
}
fn airDbgInlineBlock(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const block = self.air.unwrapDbgBlock(inst);
self.arg_inline_index = 0;
return self.lowerBlock(inst, extra.data.func, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
return self.lowerBlock(inst, block.func, block.body);
}
fn airDbgVarPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
@ -7262,17 +7255,12 @@ pub const FuncGen = struct {
// this implementation feeds the inline assembly code directly to LLVM.
const o = self.ng.object;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = extra.data.flags.is_volatile;
const outputs_len = extra.data.flags.outputs_len;
const unwrapped_asm = self.air.unwrapAsm(inst);
const is_volatile = unwrapped_asm.is_volatile;
const gpa = self.gpa;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
var llvm_constraints: std.ArrayList(u8) = .empty;
defer llvm_constraints.deinit(gpa);
@ -7305,14 +7293,10 @@ pub const FuncGen = struct {
var name_map: std.StringArrayHashMapUnmanaged(u16) = .empty;
try name_map.ensureUnusedCapacity(arena, max_param_count);
var rw_extra_i = extra_i;
for (outputs, llvm_ret_indirect, llvm_rw_vals) |output, *is_indirect, *llvm_rw_val| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
const name = output.name;
try llvm_constraints.ensureUnusedCapacity(gpa, constraint.len + 3);
if (total_i != 0) {
@ -7320,15 +7304,15 @@ pub const FuncGen = struct {
}
llvm_constraints.appendAssumeCapacity('=');
if (output != .none) {
const output_inst = try self.resolveInst(output);
const output_ty = self.typeOf(output);
if (output.operand != .none) {
const output_inst = try self.resolveInst(output.operand);
const output_ty = self.typeOf(output.operand);
assert(output_ty.zigTypeTag(zcu) == .pointer);
const elem_llvm_ty = try o.lowerPtrElemTy(pt, output_ty.childType(zcu));
switch (constraint[0]) {
'=' => {},
'+' => llvm_rw_val.* = output_inst,
'+' => llvm_rw_vals[output.index] = output_inst,
else => return self.todo("unsupported output constraint on output type '{c}'", .{
constraint[0],
}),
@ -7337,8 +7321,8 @@ pub const FuncGen = struct {
self.maybeMarkAllowZeroAccess(output_ty.ptrInfo(zcu));
// Pass any non-return outputs indirectly, if the constraint accepts a memory location
is_indirect.* = constraintAllowsMemory(constraint);
if (is_indirect.*) {
llvm_ret_indirect[output.index] = constraintAllowsMemory(constraint);
if (llvm_ret_indirect[output.index]) {
// Pass the result by reference as an indirect output (e.g. "=*m")
llvm_constraints.appendAssumeCapacity('*');
@ -7359,7 +7343,7 @@ pub const FuncGen = struct {
}),
}
is_indirect.* = false;
llvm_ret_indirect[output.index] = false;
const ret_ty = self.typeOfIndex(inst);
llvm_ret_types[llvm_ret_i] = try o.lowerType(pt, ret_ty);
@ -7387,16 +7371,13 @@ pub const FuncGen = struct {
total_i += 1;
}
for (inputs) |input| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
const name = input.name;
const arg_llvm_value = try self.resolveInst(input);
const arg_ty = self.typeOf(input);
const arg_llvm_value = try self.resolveInst(input.operand);
const arg_ty = self.typeOf(input.operand);
const is_by_ref = isByRef(arg_ty, zcu);
if (is_by_ref) {
if (constraintAllowsMemory(constraint)) {
@ -7452,27 +7433,23 @@ pub const FuncGen = struct {
total_i += 1;
}
for (outputs, llvm_ret_indirect, llvm_rw_vals, 0..) |output, is_indirect, llvm_rw_val, output_index| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[rw_extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[rw_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
rw_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
if (constraint[0] != '+') continue;
const rw_ty = self.typeOf(output);
const rw_ty = self.typeOf(output.operand);
const llvm_elem_ty = try o.lowerPtrElemTy(pt, rw_ty.childType(zcu));
if (is_indirect) {
llvm_param_values[llvm_param_i] = llvm_rw_val;
llvm_param_types[llvm_param_i] = llvm_rw_val.typeOfWip(&self.wip);
if (llvm_ret_indirect[output.index]) {
llvm_param_values[llvm_param_i] = llvm_rw_vals[output.index];
llvm_param_types[llvm_param_i] = llvm_rw_vals[output.index].typeOfWip(&self.wip);
} else {
const alignment = rw_ty.abiAlignment(zcu).toLlvm();
const loaded = try self.wip.load(
if (rw_ty.isVolatilePtr(zcu)) .@"volatile" else .normal,
llvm_elem_ty,
llvm_rw_val,
llvm_rw_vals[output.index],
alignment,
"",
);
@ -7480,18 +7457,18 @@ pub const FuncGen = struct {
llvm_param_types[llvm_param_i] = llvm_elem_ty;
}
try llvm_constraints.print(gpa, ",{d}", .{output_index});
try llvm_constraints.print(gpa, ",{d}", .{output.index});
// In the case of indirect inputs, LLVM requires the callsite to have
// an elementtype(<ty>) attribute.
llvm_param_attrs[llvm_param_i] = if (is_indirect) llvm_elem_ty else .none;
llvm_param_attrs[llvm_param_i] = if (llvm_ret_indirect[output.index]) llvm_elem_ty else .none;
llvm_param_i += 1;
total_i += 1;
}
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(extra.data.clobbers).aggregate;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
if (total_i != 0) try llvm_constraints.append(gpa, ',');
switch (aggregate.storage) {
@ -7539,7 +7516,7 @@ pub const FuncGen = struct {
if (std.mem.endsWith(u8, llvm_constraints.items, ",")) llvm_constraints.items.len -= 1;
const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
// hackety hacks until stage2 has proper inline asm in the frontend.
var rendered_template = std.array_list.Managed(u8).init(gpa);

View file

@ -3627,11 +3627,11 @@ fn airRuntimeNavPtr(func: *Func, inst: Air.Inst.Index) !void {
}
fn airTry(func: *Func, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = func.typeOf(pl_op.operand);
const result = try func.genTry(inst, pl_op.operand, body, operand_ty, false);
const zcu = func.pt.zcu;
const unwrapped_try = func.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const operand_ty = func.air.typeOf(unwrapped_try.error_union, &zcu.intern_pool);
const result = try func.genTry(inst, unwrapped_try.error_union, body, operand_ty, false);
return func.finishAir(inst, result, .{ .none, .none, .none });
}
@ -4801,10 +4801,8 @@ fn airFrameAddress(func: *Func, inst: Air.Inst.Index) !void {
fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
if (modifier == .always_tail) return func.fail("TODO implement tail calls for riscv64", .{});
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = func.air.extraData(Air.Call, pl_op.payload);
const arg_refs: []const Air.Inst.Ref = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.args_len]);
const call = func.air.unwrapCall(inst);
const arg_refs = call.args;
const expected_num_args = 8;
const ExpectedContents = extern struct {
@ -4822,10 +4820,10 @@ fn airCall(func: *Func, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
defer allocator.free(arg_vals);
for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref };
const call_ret = try func.genCall(.{ .air = callee }, arg_tys, arg_vals);
const call_ret = try func.genCall(.{ .air = call.callee }, arg_tys, arg_vals);
var bt = func.liveness.iterateBigTomb(inst);
try func.feed(&bt, pl_op.operand);
try func.feed(&bt, call.callee);
for (arg_refs) |arg_ref| try func.feed(&bt, arg_ref);
const result = if (func.liveness.isUnused(inst)) .unreach else call_ret;
@ -5218,9 +5216,8 @@ fn airDbgStmt(func: *Func, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
try func.lowerBlock(inst, @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = func.air.unwrapDbgBlock(inst);
try func.lowerBlock(inst, block.body);
}
fn airDbgVar(func: *Func, inst: Air.Inst.Index) InnerError!void {
@ -5271,19 +5268,18 @@ fn genVarDbgInfo(
}
fn airCondBr(func: *Func, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try func.resolveInst(pl_op.operand);
const cond_ty = func.typeOf(pl_op.operand);
const extra = func.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = func.air.unwrapCondBr(inst);
const cond = try func.resolveInst(cond_br.condition);
const cond_ty = func.typeOf(cond_br.condition);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const liveness_cond_br = func.liveness.getCondBr(inst);
// If the condition dies here in this condbr instruction, process
// that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches
if (func.liveness.operandDies(inst, 0)) {
if (pl_op.operand.toIndex()) |op_inst| try func.processDeath(op_inst);
if (cond_br.condition.toIndex()) |op_inst| try func.processDeath(op_inst);
}
func.scope_generation += 1;
@ -5633,10 +5629,7 @@ fn airIsNonErrPtr(func: *Func, inst: Air.Inst.Index) !void {
fn airLoop(func: *Func, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = func.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(func.air.extra.items[loop.end..][0..loop.data.body_len]);
const body = func.air.unwrapBlock(inst);
func.scope_generation += 1;
const state = try func.saveState();
@ -5646,7 +5639,7 @@ fn airLoop(func: *Func, inst: Air.Inst.Index) !void {
});
defer assert(func.loops.remove(inst));
try func.genBody(body);
try func.genBody(body.body);
func.finishAirBookkeeping();
}
@ -5663,9 +5656,8 @@ fn jump(func: *Func, index: Mir.Inst.Index) !Mir.Inst.Index {
}
fn airBlock(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Block, ty_pl.payload);
try func.lowerBlock(inst, @ptrCast(func.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = func.air.unwrapBlock(inst);
try func.lowerBlock(inst, block.body);
}
fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@ -6053,15 +6045,9 @@ fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void {
}
fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = func.air.extraData(Air.Asm, ty_pl.payload);
const outputs_len = extra.data.flags.outputs_len;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref =
@ptrCast(func.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(func.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const unwrapped_asm = func.air.unwrapAsm(inst);
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
var result: MCValue = .none;
var args = std.array_list.Managed(MCValue).init(func.gpa);
@ -6076,19 +6062,15 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
try arg_map.ensureTotalCapacity(@intCast(outputs.len + inputs.len));
defer arg_map.deinit();
var outputs_extra_i = extra_i;
for (outputs) |output| {
const extra_bytes = mem.sliceAsBytes(func.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(mem.sliceAsBytes(func.air.extra.items[extra_i..]), 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
const name = output.name;
const is_read = switch (constraint[0]) {
'=' => false,
'+' => read: {
if (output == .none) return func.fail(
if (output.operand == .none) return func.fail(
"read-write constraint unsupported for asm result: '{s}'",
.{constraint},
);
@ -6100,7 +6082,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const rest = constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
const arg_mcv: MCValue = arg_mcv: {
const arg_maybe_reg: ?Register = if (mem.eql(u8, rest, "m"))
if (output != .none) null else return func.fail(
if (output.operand != .none) null else return func.fail(
"memory constraint unsupported for asm result: '{s}'",
.{constraint},
)
@ -6115,7 +6097,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
break :arg_mcv args.items[index];
} else return func.fail("invalid constraint: '{s}'", .{constraint});
break :arg_mcv if (arg_maybe_reg) |reg| .{ .register = reg } else arg: {
const ptr_mcv = try func.resolveInst(output);
const ptr_mcv = try func.resolveInst(output.operand);
switch (ptr_mcv) {
.immediate => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg ptr_mcv.deref(),
@ -6131,20 +6113,17 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
if (!mem.eql(u8, name, "_"))
arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
args.appendAssumeCapacity(arg_mcv);
if (output == .none) result = arg_mcv;
if (is_read) try func.load(arg_mcv, .{ .air_ref = output }, func.typeOf(output));
if (output.operand == .none) result = arg_mcv;
if (is_read) try func.load(arg_mcv, .{ .air_ref = output.operand }, func.typeOf(output.operand));
}
for (inputs) |input| {
const input_bytes = mem.sliceAsBytes(func.air.extra.items[extra_i..]);
const constraint = mem.sliceTo(input_bytes, 0);
const name = mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
const name = input.name;
const ty = func.typeOf(input);
const input_mcv = try func.resolveInst(input);
const ty = func.typeOf(input.operand);
const input_mcv = try func.resolveInst(input.operand);
const arg_mcv: MCValue = if (mem.eql(u8, constraint, "X"))
input_mcv
else if (mem.startsWith(u8, constraint, "{") and mem.endsWith(u8, constraint, "}")) arg: {
@ -6171,7 +6150,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
const zcu = func.pt.zcu;
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(extra.data.clobbers).aggregate;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| {
@ -6231,7 +6210,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
labels.deinit(func.gpa);
}
const asm_source = std.mem.sliceAsBytes(func.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
var line_it = mem.tokenizeAny(u8, asm_source, "\n\r;");
next_line: while (line_it.next()) |line| {
var mnem_it = mem.tokenizeAny(u8, line, " \t");
@ -6499,19 +6478,14 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
while (label_it.next()) |label| if (label.value_ptr.pending_relocs.items.len > 0)
return func.fail("undefined label: '{s}'", .{label.key_ptr.*});
for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
const extra_bytes = mem.sliceAsBytes(func.air.extra.items[outputs_extra_i..]);
const constraint =
mem.sliceTo(mem.sliceAsBytes(func.air.extra.items[outputs_extra_i..]), 0);
const name = mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
outputs_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateOutputs();
while (it.next()) |output| {
const constraint = output.constraint;
if (output == .none) continue;
if (arg_mcv != .register) continue;
if (output.operand == .none) continue;
if (args.items[output.index] != .register) continue;
if (constraint.len == 2 and std.ascii.isDigit(constraint[1])) continue;
try func.store(.{ .air_ref = output }, arg_mcv, func.typeOf(output));
try func.store(.{ .air_ref = output.operand }, args.items[output.index], func.typeOf(output.operand));
}
simple: {

View file

@ -877,15 +877,10 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const is_volatile = extra.data.flags.is_volatile;
const outputs_len = extra.data.flags.outputs_len;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i .. extra_i + outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i .. extra_i + extra.data.inputs_len]);
extra_i += inputs.len;
const unwrapped_asm = self.air.unwrapAsm(inst);
const is_volatile = unwrapped_asm.is_volatile;
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
const dead = !is_volatile and self.liveness.isUnused(inst);
const result: MCValue = if (dead) .dead else result: {
@ -893,27 +888,18 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement codegen for asm with more than 1 output", .{});
}
const output_constraint: ?[]const u8 = for (outputs) |output| {
if (output != .none) {
var it = unwrapped_asm.iterateOutputs();
const output_constraint: ?[]const u8 = while (it.next()) |output| {
if (output.operand != .none) {
return self.fail("TODO implement codegen for non-expr asm", .{});
}
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
break constraint;
break output.constraint;
} else null;
for (inputs) |input| {
const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |input| {
const constraint = input.constraint;
if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') {
return self.fail("unrecognized asm input constraint: '{s}'", .{constraint});
@ -922,15 +908,15 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
const arg_mcv = try self.resolveInst(input);
const arg_mcv = try self.resolveInst(input.operand);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.typeOf(input), reg, arg_mcv);
try self.genSetReg(self.typeOf(input.operand), reg, arg_mcv);
}
// TODO honor the clobbers
_ = extra.data.clobbers;
_ = unwrapped_asm.clobbers;
const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
if (mem.eql(u8, asm_source, "ta 0x6d")) {
_ = try self.addInst(.{
@ -1109,9 +1095,8 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = self.air.unwrapBlock(inst);
try self.lowerBlock(inst, block.body);
}
fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
@ -1276,11 +1261,9 @@ fn airByteSwap(self: *Self, inst: Air.Inst.Index) !void {
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch});
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra.end .. extra.end + extra.data.args_len]);
const ty = self.typeOf(callee);
const call = self.air.unwrapCall(inst);
const args = call.args;
const ty = self.typeOf(call.callee);
const pt = self.pt;
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@ -1327,7 +1310,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (try self.air.value(callee, pt)) |func_value| switch (ip.indexToKey(func_value.toIntern())) {
if (try self.air.value(call.callee, pt)) |func_value| switch (ip.indexToKey(func_value.toIntern())) {
.func => {
return self.fail("TODO implement calling functions", .{});
},
@ -1339,7 +1322,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
},
} else {
assert(ty.zigTypeTag(zcu) == .pointer);
const mcv = try self.resolveInst(callee);
const mcv = try self.resolveInst(call.callee);
try self.genSetReg(ty, .o7, mcv);
_ = try self.addInst(.{
@ -1365,13 +1348,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (args.len + 1 <= Air.Liveness.bpi - 1) {
var buf = [1]Air.Inst.Ref{.none} ** (Air.Liveness.bpi - 1);
buf[0] = callee;
buf[0] = call.callee;
@memcpy(buf[1..][0..args.len], args);
return self.finishAir(inst, result, buf);
}
var bt = try self.iterateBigTomb(inst, 1 + args.len);
bt.feed(callee);
bt.feed(call.callee);
for (args) |arg| {
bt.feed(arg);
}
@ -1451,9 +1434,7 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Block, ty_pl.payload);
_ = extra;
_ = inst;
return self.fail("TODO implement airCmpxchg for {}", .{
self.target.cpu.arch,
@ -1461,11 +1442,10 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try self.resolveInst(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = self.air.unwrapCondBr(inst);
const condition = try self.resolveInst(cond_br.condition);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const liveness_condbr = self.liveness.getCondBr(inst);
// Here we emit a branch to the false section.
@ -1475,7 +1455,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
if (pl_op.operand.toIndex()) |op_index| {
if (cond_br.condition.toIndex()) |op_index| {
self.processDeath(op_index);
}
}
@ -1613,10 +1593,9 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const block = self.air.unwrapDbgBlock(inst);
// TODO emit debug info for function change
try self.lowerBlock(inst, @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]));
try self.lowerBlock(inst, block.body);
}
fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
@ -1780,12 +1759,10 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
// A loop is a setup to be able to jump back to the beginning.
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[loop.end .. loop.end + loop.data.body_len]);
const block = self.air.unwrapBlock(inst);
const start: u32 = @intCast(self.mir_instructions.len);
try self.genBody(body);
try self.genBody(block.body);
try self.jump(start);
return self.finishAirBookkeeping();
@ -2606,12 +2583,11 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = self.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const result: MCValue = result: {
const error_union_ty = self.typeOf(pl_op.operand);
const error_union = try self.resolveInst(pl_op.operand);
const error_union_ty = self.air.typeOf(unwrapped_try.error_union, &self.pt.zcu.intern_pool);
const error_union = try self.resolveInst(unwrapped_try.error_union);
const is_err_result = try self.isErr(error_union_ty, error_union);
const reloc = try self.condBr(is_err_result);
@ -2620,7 +2596,7 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void {
try self.performReloc(reloc);
break :result try self.errUnionPayload(error_union, error_union_ty);
};
return self.finishAir(inst, result, .{ pl_op.operand, .none, .none });
return self.finishAir(inst, result, .{ unwrapped_try.error_union, .none, .none });
}
fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void {

View file

@ -5002,9 +5002,8 @@ fn genStructuredBody(
}
fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
const inst_datas = cg.air.instructions.items(.data);
const extra = cg.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload);
return cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = cg.air.unwrapBlock(inst);
return cg.lowerBlock(inst, block.body);
}
fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, body: []const Air.Inst.Index) !?Id {
@ -5188,11 +5187,10 @@ fn airBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
const gpa = cg.module.gpa;
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond_br = cg.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[cond_br.end..][0..cond_br.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]);
const condition_id = try cg.resolve(pl_op.operand);
const cond_br = cg.air.unwrapCondBr(inst);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const condition_id = try cg.resolve(cond_br.condition);
const then_label = cg.module.allocId();
const else_label = cg.module.allocId();
@ -5251,9 +5249,7 @@ fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) !void {
fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) !void {
const gpa = cg.module.gpa;
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = cg.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[loop.end..][0..loop.data.body_len]);
const block = cg.air.unwrapBlock(inst);
const body_label = cg.module.allocId();
@ -5284,7 +5280,7 @@ fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) !void {
const next_block = try cg.genStructuredBody(.{ .loop = .{
.merge_label = merge_label,
.continue_label = continue_label,
} }, body);
} }, block.body);
try cg.structuredBreak(next_block);
try cg.beginSpvBlock(continue_label);
@ -5294,7 +5290,7 @@ fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) !void {
.unstructured => {
try cg.body.emit(gpa, .OpBranch, .{ .target_label = body_label });
try cg.beginSpvBlock(body_label);
try cg.genBody(body);
try cg.genBody(block.body);
try cg.body.emit(gpa, .OpBranch, .{ .target_label = body_label });
},
@ -5375,12 +5371,11 @@ fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) !void {
fn airTry(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
const gpa = cg.module.gpa;
const zcu = cg.module.zcu;
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union_id = try cg.resolve(pl_op.operand);
const extra = cg.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
const unwrapped_try = cg.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const err_union_ty = cg.typeOf(pl_op.operand);
const err_union_id = try cg.resolve(unwrapped_try.error_union);
const err_union_ty = cg.air.typeOf(unwrapped_try.error_union, &zcu.intern_pool);
const payload_ty = cg.typeOfIndex(inst);
const bool_ty_id = try cg.resolveType(.bool, .direct);
@ -5882,12 +5877,11 @@ fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) !void {
fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
const zcu = cg.module.zcu;
const inst_datas = cg.air.instructions.items(.data);
const extra = cg.air.extraData(Air.DbgInlineBlock, inst_datas[@intFromEnum(inst)].ty_pl.payload);
const block = cg.air.unwrapDbgBlock(inst);
const old_base_line = cg.base_line;
defer cg.base_line = old_base_line;
cg.base_line = zcu.navSrcLine(zcu.funcInfo(extra.data.func).owner_nav);
return cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
cg.base_line = zcu.navSrcLine(zcu.funcInfo(block.func).owner_nav);
return cg.lowerBlock(inst, block.body);
}
fn airDbgVar(cg: *CodeGen, inst: Air.Inst.Index) !void {
@ -5900,52 +5894,34 @@ fn airDbgVar(cg: *CodeGen, inst: Air.Inst.Index) !void {
fn airAssembly(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
const gpa = cg.module.gpa;
const zcu = cg.module.zcu;
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.Asm, ty_pl.payload);
const unwrapped_asm = cg.air.unwrapAsm(inst);
const is_volatile = extra.data.flags.is_volatile;
const outputs_len = extra.data.flags.outputs_len;
const is_volatile = unwrapped_asm.is_volatile;
const outputs_len = unwrapped_asm.outputs.len;
if (!is_volatile and cg.liveness.isUnused(inst)) return null;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
if (outputs.len > 1) {
if (outputs_len > 1) {
return cg.todo("implement inline asm with more than 1 output", .{});
}
var ass: Assembler = .{ .cg = cg };
defer ass.deinit();
var output_extra_i = extra_i;
for (outputs) |output| {
if (output != .none) {
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |out| {
if (out.operand != .none) {
return cg.todo("implement inline asm with non-returned output", .{});
}
const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
// TODO: Record output and use it somewhere.
}
for (inputs) |input| {
const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(extra_bytes, 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
it = unwrapped_asm.iterateInputs();
while (it.next()) |in| {
const input_ty = cg.typeOf(in.operand);
const input_ty = cg.typeOf(input);
if (std.mem.eql(u8, constraint, "c")) {
if (std.mem.eql(u8, in.constraint, "c")) {
// constant
const val = (try cg.air.value(input, cg.pt)) orelse {
const val = (try cg.air.value(in.operand, cg.pt)) orelse {
return cg.fail("assembly inputs with 'c' constraint have to be compile-time known", .{});
};
@ -5971,37 +5947,36 @@ fn airAssembly(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
.undef => return cg.fail("assembly input with 'c' constraint cannot be undefined", .{}),
.int => try ass.value_map.put(gpa, name, .{ .constant = @intCast(val.toUnsignedInt(zcu)) }),
.enum_literal => |str| try ass.value_map.put(gpa, name, .{ .string = str.toSlice(ip) }),
.int => try ass.value_map.put(gpa, in.name, .{ .constant = @intCast(val.toUnsignedInt(zcu)) }),
.enum_literal => |str| try ass.value_map.put(gpa, in.name, .{ .string = str.toSlice(ip) }),
else => unreachable, // TODO
}
} else if (std.mem.eql(u8, constraint, "t")) {
} else if (std.mem.eql(u8, in.constraint, "t")) {
// type
if (input_ty.zigTypeTag(zcu) == .type) {
// This assembly input is a type instead of a value.
// That's fine for now, just make sure to resolve it as such.
const val = (try cg.air.value(input, cg.pt)).?;
const val = (try cg.air.value(in.operand, cg.pt)).?;
const ty_id = try cg.resolveType(val.toType(), .direct);
try ass.value_map.put(gpa, name, .{ .ty = ty_id });
try ass.value_map.put(gpa, in.name, .{ .ty = ty_id });
} else {
const ty_id = try cg.resolveType(input_ty, .direct);
try ass.value_map.put(gpa, name, .{ .ty = ty_id });
try ass.value_map.put(gpa, in.name, .{ .ty = ty_id });
}
} else {
if (input_ty.zigTypeTag(zcu) == .type) {
return cg.fail("use the 't' constraint to supply types to SPIR-V inline assembly", .{});
}
const val_id = try cg.resolve(input);
try ass.value_map.put(gpa, name, .{ .value = val_id });
const val_id = try cg.resolve(in.operand);
try ass.value_map.put(gpa, in.name, .{ .value = val_id });
}
}
// TODO: do something with clobbers
_ = extra.data.clobbers;
_ = unwrapped_asm.clobbers;
const asm_source = std.mem.sliceAsBytes(cg.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
ass.assemble(asm_source) catch |err| switch (err) {
error.AssembleFail => {
@ -6033,26 +6008,20 @@ fn airAssembly(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
else => |others| return others,
};
for (outputs) |output| {
_ = output;
const extra_bytes = std.mem.sliceAsBytes(cg.air.extra.items[output_extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(cg.air.extra.items[output_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
output_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
const result = ass.value_map.get(name) orelse return {
return cg.fail("invalid asm output '{s}'", .{name});
it = unwrapped_asm.iterateOutputs();
while (it.next()) |out| {
const result = ass.value_map.get(out.name) orelse return {
return cg.fail("invalid asm output '{s}'", .{out.name});
};
switch (result) {
.just_declared, .unresolved_forward_reference => unreachable,
.ty => return cg.fail("cannot return spir-v type as value from assembly", .{}),
.value => |ref| return ref,
.constant, .string => return cg.fail("cannot return constant from assembly", .{}),
}
// TODO: Multiple results
// TODO: Check that the output type from assembly is the same as the type actually expected by Zig.
}
return null;
@ -6063,10 +6032,9 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
const gpa = cg.module.gpa;
const zcu = cg.module.zcu;
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = cg.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.args_len]);
const callee_ty = cg.typeOf(pl_op.operand);
const air_call = cg.air.unwrapCall(inst);
const args = air_call.args;
const callee_ty = cg.typeOf(air_call.callee);
const zig_fn_ty = switch (callee_ty.zigTypeTag(zcu)) {
.@"fn" => callee_ty,
.pointer => return cg.fail("cannot call function pointers", .{}),
@ -6077,7 +6045,7 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
const result_type_id = try cg.resolveFnReturnType(.fromInterned(return_type));
const result_id = cg.module.allocId();
const callee_id = try cg.resolve(pl_op.operand);
const callee_id = try cg.resolve(air_call.callee);
comptime assert(zig_call_abi_ver == 3);

View file

@ -2137,10 +2137,9 @@ fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{});
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = cg.air.extraData(Air.Call, pl_op.payload);
const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.args_len]);
const ty = cg.typeOf(pl_op.operand);
const call = cg.air.unwrapCall(inst);
const args = call.args;
const ty = cg.typeOf(call.callee);
const pt = cg.pt;
const zcu = pt.zcu;
@ -2155,7 +2154,7 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), zcu, cg.target);
const callee: ?InternPool.Nav.Index = blk: {
const func_val = (try cg.air.value(pl_op.operand, pt)) orelse break :blk null;
const func_val = (try cg.air.value(call.callee, pt)) orelse break :blk null;
switch (ip.indexToKey(func_val.toIntern())) {
inline .func, .@"extern" => |x| break :blk x.owner_nav,
@ -2189,7 +2188,7 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
// in this case we call a function pointer
// so load its value onto the stack
assert(ty.zigTypeTag(zcu) == .pointer);
const operand = try cg.resolveInst(pl_op.operand);
const operand = try cg.resolveInst(call.callee);
try cg.emitWValue(operand);
try cg.mir_func_tys.put(cg.gpa, fn_ty.toIntern(), {});
@ -2233,7 +2232,7 @@ fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifie
};
var bt = try cg.iterateBigTomb(inst, 1 + args.len);
bt.feed(pl_op.operand);
bt.feed(call.callee);
for (args) |arg| bt.feed(arg);
return bt.finishAir(result_value);
}
@ -3335,9 +3334,8 @@ fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
}
fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.Block, ty_pl.payload);
try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
const block = cg.air.unwrapBlock(inst);
try cg.lowerBlock(inst, block.ty, block.body);
}
fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
@ -3381,9 +3379,7 @@ fn endBlock(cg: *CodeGen) !void {
}
fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = cg.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[loop.end..][0..loop.data.body_len]);
const block = cg.air.unwrapBlock(inst);
// result type of loop is always 'noreturn', meaning we can always
// emit the wasm type 'block_empty'.
@ -3392,18 +3388,17 @@ fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try cg.loops.putNoClobber(cg.gpa, inst, cg.block_depth);
defer assert(cg.loops.remove(inst));
try cg.genBody(body);
try cg.genBody(block.body);
try cg.endBlock();
return cg.finishAir(inst, .none, &.{});
}
fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const condition = try cg.resolveInst(pl_op.operand);
const extra = cg.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = cg.air.unwrapCondBr(inst);
const condition = try cg.resolveInst(cond_br.condition);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const liveness_condbr = cg.liveness.getCondBr(inst);
// result type is always noreturn, so use `block_empty` as type.
@ -6423,10 +6418,9 @@ fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
}
fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const block = cg.air.unwrapDbgBlock(inst);
// TODO
try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]));
try cg.lowerBlock(inst, block.ty, block.body);
}
fn airDbgVar(
@ -6441,24 +6435,22 @@ fn airDbgVar(
}
fn airTry(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const err_union = try cg.resolveInst(pl_op.operand);
const extra = cg.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = cg.typeOf(pl_op.operand);
const unwrapped_try = cg.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const err_union = try cg.resolveInst(unwrapped_try.error_union);
const err_union_ty = cg.typeOf(unwrapped_try.error_union);
const result = try lowerTry(cg, inst, err_union, body, err_union_ty, false);
return cg.finishAir(inst, result, &.{pl_op.operand});
return cg.finishAir(inst, result, &.{unwrapped_try.error_union});
}
fn airTryPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const zcu = cg.pt.zcu;
const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = cg.air.extraData(Air.TryPtr, ty_pl.payload);
const err_union_ptr = try cg.resolveInst(extra.data.ptr);
const body: []const Air.Inst.Index = @ptrCast(cg.air.extra.items[extra.end..][0..extra.data.body_len]);
const err_union_ty = cg.typeOf(extra.data.ptr).childType(zcu);
const unwrapped_try = cg.air.unwrapTryPtr(inst);
const err_union_ptr = try cg.resolveInst(unwrapped_try.error_union_ptr);
const body = unwrapped_try.else_body;
const err_union_ty = cg.typeOf(unwrapped_try.error_union_ptr).childType(zcu);
const result = try lowerTry(cg, inst, err_union_ptr, body, err_union_ty, true);
return cg.finishAir(inst, result, &.{extra.data.ptr});
return cg.finishAir(inst, result, &.{unwrapped_try.error_union_ptr});
}
fn lowerTry(

View file

@ -67348,21 +67348,19 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
},
.bitcast => try cg.airBitCast(inst),
.block => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const block = cg.air.extraData(Air.Block, ty_pl.payload);
const block = cg.air.unwrapBlock(inst);
if (!cg.mod.strip) try cg.asmPseudo(.pseudo_dbg_enter_block_none);
try cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len]));
try cg.lowerBlock(inst, block.body);
if (!cg.mod.strip) try cg.asmPseudo(.pseudo_dbg_leave_block_none);
},
.loop => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const block = cg.air.extraData(Air.Block, ty_pl.payload);
const block = cg.air.unwrapBlock(inst);
try cg.loops.putNoClobber(cg.gpa, inst, .{
.state = try cg.saveState(),
.target = @intCast(cg.mir_instructions.len),
});
defer assert(cg.loops.remove(inst));
try cg.genBodyBlock(@ptrCast(cg.air.extra.items[block.end..][0..block.data.body_len]));
try cg.genBodyBlock(block.body);
},
.repeat => {
const repeat = air_datas[@intFromEnum(inst)].repeat;
@ -89048,17 +89046,16 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
try cg.asmOpOnly(.{ ._, .nop });
},
.dbg_inline_block => {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const dbg_inline_block = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
const dbg_inline_block = cg.air.unwrapDbgBlock(inst);
const old_inline_func = cg.inline_func;
defer cg.inline_func = old_inline_func;
cg.inline_func = dbg_inline_block.data.func;
cg.inline_func = dbg_inline_block.func;
if (!cg.mod.strip) _ = try cg.addInst(.{
.tag = .pseudo,
.ops = .pseudo_dbg_enter_inline_func,
.data = .{ .ip_index = dbg_inline_block.data.func },
.data = .{ .ip_index = dbg_inline_block.func },
});
try cg.lowerBlock(inst, @ptrCast(cg.air.extra.items[dbg_inline_block.end..][0..dbg_inline_block.data.body_len]));
try cg.lowerBlock(inst, dbg_inline_block.body);
if (!cg.mod.strip) _ = try cg.addInst(.{
.tag = .pseudo,
.ops = .pseudo_dbg_leave_inline_func,
@ -175916,10 +175913,8 @@ fn genLocalDebugInfo(cg: *CodeGen, air_tag: Air.Inst.Tag, ty: Type, mcv: MCValue
fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier, opts: CopyOptions) !void {
if (modifier == .always_tail) return self.fail("TODO implement tail calls for x86_64", .{});
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const arg_refs: []const Air.Inst.Ref =
@ptrCast(self.air.extra.items[extra.end..][0..extra.data.args_len]);
const call = self.air.unwrapCall(inst);
const arg_refs = call.args;
const ExpectedContents = extern struct {
tys: [32][@sizeOf(Type)]u8 align(@alignOf(Type)),
@ -175937,10 +175932,10 @@ fn airCall(self: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
defer allocator.free(arg_vals);
for (arg_vals, arg_refs) |*arg_val, arg_ref| arg_val.* = .{ .air_ref = arg_ref };
const ret = try self.genCall(.{ .air = pl_op.operand }, arg_tys, arg_vals, opts);
const ret = try self.genCall(.{ .air = call.callee }, arg_tys, arg_vals, opts);
var bt = self.liveness.iterateBigTomb(inst);
try self.feed(&bt, pl_op.operand);
try self.feed(&bt, call.callee);
for (arg_refs) |arg_ref| try self.feed(&bt, arg_ref);
const result = if (self.liveness.isUnused(inst)) .unreach else ret;
@ -176300,20 +176295,18 @@ fn airRetLoad(self: *CodeGen, inst: Air.Inst.Index) !void {
}
fn airTry(self: *CodeGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = self.air.extraData(Air.Try, pl_op.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = self.typeOf(pl_op.operand);
const result = try self.genTry(inst, pl_op.operand, body, operand_ty, false);
const unwrapped_try = self.air.unwrapTry(inst);
const body = unwrapped_try.else_body;
const operand_ty = self.typeOf(unwrapped_try.error_union);
const result = try self.genTry(inst, unwrapped_try.error_union, body, operand_ty, false);
return self.finishAir(inst, result, .{ .none, .none, .none });
}
fn airTryPtr(self: *CodeGen, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra.items[extra.end..][0..extra.data.body_len]);
const operand_ty = self.typeOf(extra.data.ptr);
const result = try self.genTry(inst, extra.data.ptr, body, operand_ty, true);
const unwrapped_try = self.air.unwrapTryPtr(inst);
const body = unwrapped_try.else_body;
const operand_ty = self.typeOf(unwrapped_try.error_union_ptr);
const result = try self.genTry(inst, unwrapped_try.error_union_ptr, body, operand_ty, true);
return self.finishAir(inst, result, .{ .none, .none, .none });
}
@ -176391,21 +176384,20 @@ fn genCondBrMir(self: *CodeGen, ty: Type, mcv: MCValue) !Mir.Inst.Index {
}
fn airCondBr(self: *CodeGen, inst: Air.Inst.Index) !void {
const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const cond = try self.resolveInst(pl_op.operand);
const cond_ty = self.typeOf(pl_op.operand);
const extra = self.air.extraData(Air.CondBr, pl_op.payload);
const then_body: []const Air.Inst.Index =
@ptrCast(self.air.extra.items[extra.end..][0..extra.data.then_body_len]);
const else_body: []const Air.Inst.Index =
@ptrCast(self.air.extra.items[extra.end + then_body.len ..][0..extra.data.else_body_len]);
const cond_br = self.air.unwrapCondBr(inst);
const then_body = cond_br.then_body;
const else_body = cond_br.else_body;
const cond = try self.resolveInst(cond_br.condition);
const cond_ty = self.typeOf(cond_br.condition);
const liveness_cond_br = self.liveness.getCondBr(inst);
// If the condition dies here in this condbr instruction, process
// that death now instead of later as this has an effect on
// whether it needs to be spilled in the branches
if (self.liveness.operandDies(inst, 0)) {
if (pl_op.operand.toIndex()) |op_inst| try self.processDeath(op_inst, .{});
if (cond_br.condition.toIndex()) |op_inst| try self.processDeath(op_inst, .{});
}
const state = try self.saveState();
@ -177124,14 +177116,10 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
@setEvalBranchQuota(1_100);
const pt = self.pt;
const zcu = pt.zcu;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.Asm, ty_pl.payload);
const outputs_len = extra.data.flags.outputs_len;
var extra_i: usize = extra.end;
const outputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..outputs_len]);
extra_i += outputs.len;
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra.items[extra_i..][0..extra.data.inputs_len]);
extra_i += inputs.len;
const unwrapped_asm = self.air.unwrapAsm(inst);
const outputs = unwrapped_asm.outputs;
const inputs = unwrapped_asm.inputs;
var result: MCValue = .none;
var args: std.array_list.Managed(MCValue) = .init(self.gpa);
@ -177146,36 +177134,29 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
try arg_map.ensureTotalCapacity(@intCast(outputs.len + inputs.len));
defer arg_map.deinit();
var outputs_extra_i = extra_i;
for (outputs) |output| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
const maybe_inst = switch (output) {
var it = unwrapped_asm.iterateOutputs();
while (it.next()) |out| {
const maybe_inst = switch (out.operand) {
.none => inst,
else => null,
};
const ty = switch (output) {
const ty = switch (out.operand) {
.none => self.typeOfIndex(inst),
else => self.typeOf(output).childType(zcu),
else => self.typeOf(out.operand).childType(zcu),
};
const is_read = switch (constraint[0]) {
const is_read = switch (out.constraint[0]) {
'=' => false,
'+' => read: {
if (output == .none) return self.fail(
if (out.operand == .none) return self.fail(
"read-write constraint unsupported for asm result: '{s}'",
.{constraint},
.{out.constraint},
);
break :read true;
},
else => return self.fail("invalid constraint: '{s}'", .{constraint}),
else => return self.fail("invalid constraint: '{s}'", .{out.constraint}),
};
const is_early_clobber = constraint[1] == '&';
const rest = constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
const is_early_clobber = out.constraint[1] == '&';
const rest = out.constraint[@as(usize, 1) + @intFromBool(is_early_clobber) ..];
const arg_mcv: MCValue = arg_mcv: {
const arg_maybe_reg: ?Register = if (std.mem.eql(u8, rest, "r") or
std.mem.eql(u8, rest, "f") or std.mem.eql(u8, rest, "x"))
@ -177189,30 +177170,30 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
@intCast(ty.abiSize(zcu)),
)
else if (std.mem.eql(u8, rest, "m"))
if (output != .none) null else return self.fail(
if (out.operand != .none) null else return self.fail(
"memory constraint unsupported for asm result: '{s}'",
.{constraint},
.{out.constraint},
)
else if (std.mem.eql(u8, rest, "g") or
std.mem.eql(u8, rest, "rm") or std.mem.eql(u8, rest, "mr") or
std.mem.eql(u8, rest, "r,m") or std.mem.eql(u8, rest, "m,r"))
self.register_manager.tryAllocReg(maybe_inst, abi.RegisterClass.gp) orelse
if (output != .none)
if (out.operand != .none)
null
else
return self.fail("ran out of registers lowering inline asm", .{})
else if (std.mem.startsWith(u8, rest, "{") and std.mem.endsWith(u8, rest, "}"))
parseRegName(rest["{".len .. rest.len - "}".len]) orelse
return self.fail("invalid register constraint: '{s}'", .{constraint})
return self.fail("invalid register constraint: '{s}'", .{out.constraint})
else if (rest.len == 1 and std.ascii.isDigit(rest[0])) {
const index = std.fmt.charToDigit(rest[0], 10) catch unreachable;
if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{
constraint,
out.constraint,
});
break :arg_mcv args.items[index];
} else return self.fail("invalid constraint: '{s}'", .{constraint});
} else return self.fail("invalid constraint: '{s}'", .{out.constraint});
break :arg_mcv if (arg_maybe_reg) |reg| .{ .register = reg } else arg: {
const ptr_mcv = try self.resolveInst(output);
const ptr_mcv = try self.resolveInst(out.operand);
switch (ptr_mcv) {
.immediate => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg ptr_mcv.deref(),
@ -177223,30 +177204,24 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
};
};
if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |tracked_index| {
try self.register_manager.getRegIndex(tracked_index, if (output == .none) inst else null);
try self.register_manager.getRegIndex(tracked_index, if (out.operand == .none) inst else null);
_ = self.register_manager.lockRegIndexAssumeUnused(tracked_index);
};
if (!std.mem.eql(u8, name, "_"))
arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
if (!std.mem.eql(u8, out.name, "_"))
arg_map.putAssumeCapacityNoClobber(out.name, @intCast(args.items.len));
args.appendAssumeCapacity(arg_mcv);
if (output == .none) result = arg_mcv;
if (is_read) try self.load(arg_mcv, self.typeOf(output), .{ .air_ref = output });
if (out.operand == .none) result = arg_mcv;
if (is_read) try self.load(arg_mcv, self.typeOf(out.operand), .{ .air_ref = out.operand });
}
for (inputs) |input| {
const input_bytes = std.mem.sliceAsBytes(self.air.extra.items[extra_i..]);
const constraint = std.mem.sliceTo(input_bytes, 0);
const name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
extra_i += (constraint.len + name.len + (2 + 3)) / 4;
const ty = self.typeOf(input);
const input_mcv = try self.resolveInst(input);
const arg_mcv: MCValue = if (std.mem.eql(u8, constraint, "r") or
std.mem.eql(u8, constraint, "f") or std.mem.eql(u8, constraint, "x"))
it = unwrapped_asm.iterateInputs();
while (it.next()) |in| {
const ty = self.typeOf(in.operand);
const input_mcv = try self.resolveInst(in.operand);
const arg_mcv: MCValue = if (std.mem.eql(u8, in.constraint, "r") or
std.mem.eql(u8, in.constraint, "f") or std.mem.eql(u8, in.constraint, "x"))
arg: {
const rc = switch (constraint[0]) {
const rc = switch (in.constraint[0]) {
'r' => abi.RegisterClass.gp,
'f' => abi.RegisterClass.x87,
'x' => abi.RegisterClass.sse,
@ -177258,14 +177233,14 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
const reg = try self.register_manager.allocReg(null, rc);
try self.genSetReg(reg, ty, input_mcv, .{});
break :arg .{ .register = registerAlias(reg, @intCast(ty.abiSize(zcu))) };
} else if (std.mem.eql(u8, constraint, "i") or std.mem.eql(u8, constraint, "n"))
} else if (std.mem.eql(u8, in.constraint, "i") or std.mem.eql(u8, in.constraint, "n"))
switch (input_mcv) {
.immediate => |imm| .{ .immediate = imm },
else => return self.fail("immediate operand requires comptime value: '{s}'", .{
constraint,
in.constraint,
}),
}
else if (std.mem.eql(u8, constraint, "m")) arg: {
else if (std.mem.eql(u8, in.constraint, "m")) arg: {
switch (input_mcv) {
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg input_mcv,
@ -177284,9 +177259,9 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
};
try self.genSetReg(addr_reg, .usize, input_mcv.address(), .{});
break :arg .{ .indirect = .{ .reg = addr_reg } };
} else if (std.mem.eql(u8, constraint, "g") or
std.mem.eql(u8, constraint, "rm") or std.mem.eql(u8, constraint, "mr") or
std.mem.eql(u8, constraint, "r,m") or std.mem.eql(u8, constraint, "m,r"))
} else if (std.mem.eql(u8, in.constraint, "g") or
std.mem.eql(u8, in.constraint, "rm") or std.mem.eql(u8, in.constraint, "mr") or
std.mem.eql(u8, in.constraint, "r,m") or std.mem.eql(u8, in.constraint, "m,r"))
arg: {
switch (input_mcv) {
.register, .indirect, .load_frame => break :arg input_mcv,
@ -177297,30 +177272,30 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
const temp_mcv = try self.allocTempRegOrMem(ty, true);
try self.genCopy(ty, temp_mcv, input_mcv, .{});
break :arg temp_mcv;
} else if (std.mem.eql(u8, constraint, "X"))
} else if (std.mem.eql(u8, in.constraint, "X"))
input_mcv
else if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) arg: {
const reg = parseRegName(constraint["{".len .. constraint.len - "}".len]) orelse
return self.fail("invalid register constraint: '{s}'", .{constraint});
else if (std.mem.startsWith(u8, in.constraint, "{") and std.mem.endsWith(u8, in.constraint, "}")) arg: {
const reg = parseRegName(in.constraint["{".len .. in.constraint.len - "}".len]) orelse
return self.fail("invalid register constraint: '{s}'", .{in.constraint});
try self.register_manager.getReg(reg, null);
try self.genSetReg(reg, ty, input_mcv, .{});
break :arg .{ .register = reg };
} else if (constraint.len == 1 and std.ascii.isDigit(constraint[0])) arg: {
const index = std.fmt.charToDigit(constraint[0], 10) catch unreachable;
if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{constraint});
} else if (in.constraint.len == 1 and std.ascii.isDigit(in.constraint[0])) arg: {
const index = std.fmt.charToDigit(in.constraint[0], 10) catch unreachable;
if (index >= args.items.len) return self.fail("constraint out of bounds: '{s}'", .{in.constraint});
try self.genCopy(ty, args.items[index], input_mcv, .{});
break :arg args.items[index];
} else return self.fail("invalid constraint: '{s}'", .{constraint});
} else return self.fail("invalid constraint: '{s}'", .{in.constraint});
if (arg_mcv.getReg()) |reg| if (RegisterManager.indexOfRegIntoTracked(reg)) |_| {
_ = self.register_manager.lockReg(reg);
};
if (!std.mem.eql(u8, name, "_"))
arg_map.putAssumeCapacityNoClobber(name, @intCast(args.items.len));
if (!std.mem.eql(u8, in.name, "_"))
arg_map.putAssumeCapacityNoClobber(in.name, @intCast(args.items.len));
args.appendAssumeCapacity(arg_mcv);
}
const ip = &zcu.intern_pool;
const aggregate = ip.indexToKey(extra.data.clobbers).aggregate;
const aggregate = ip.indexToKey(unwrapped_asm.clobbers).aggregate;
const struct_type: Type = .fromInterned(aggregate.ty);
switch (aggregate.storage) {
.elems => |elems| for (elems, 0..) |elem, i| switch (elem) {
@ -177390,7 +177365,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
labels.deinit(self.gpa);
}
const asm_source = std.mem.sliceAsBytes(self.air.extra.items[extra_i..])[0..extra.data.source_len];
const asm_source = unwrapped_asm.source;
var line_it = std.mem.tokenizeAny(u8, asm_source, "\n\r;");
next_line: while (line_it.next()) |line| {
var mnem_it = std.mem.tokenizeAny(u8, line, " \t");
@ -177821,19 +177796,13 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
while (label_it.next()) |label| if (label.value_ptr.pending_relocs.items.len > 0)
return self.fail("undefined label: '{s}'", .{label.key_ptr.*});
for (outputs, args.items[0..outputs.len]) |output, arg_mcv| {
const extra_bytes = std.mem.sliceAsBytes(self.air.extra.items[outputs_extra_i..]);
const constraint =
std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra.items[outputs_extra_i..]), 0);
const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
// This equation accounts for the fact that even if we have exactly 4 bytes
// for the string, we still use the next u32 for the null terminator.
outputs_extra_i += (constraint.len + name.len + (2 + 3)) / 4;
if (output == .none) continue;
it = unwrapped_asm.iterateOutputs();
while (it.next()) |out| {
const arg_mcv = args.items[it.current - 1];
if (out.operand == .none) continue;
if (arg_mcv != .register) continue;
if (constraint.len == 2 and std.ascii.isDigit(constraint[1])) continue;
try self.store(self.typeOf(output), .{ .air_ref = output }, arg_mcv, .{});
if (out.constraint.len == 2 and std.ascii.isDigit(out.constraint[1])) continue;
try self.store(self.typeOf(out.operand), .{ .air_ref = out.operand }, arg_mcv, .{});
}
simple: {