IoUring: update to new Io APIs

This commit is contained in:
Jacob Young 2026-01-06 05:10:33 -05:00
parent b48599c549
commit a28d57292f
19 changed files with 5912 additions and 1210 deletions

View file

@ -143,6 +143,7 @@ int main(int argc, char **argv) {
"pub const skip_non_native = false;\n"
"pub const debug_gpa = false;\n"
"pub const dev = .core;\n"
"pub const io_mode: enum { threaded, evented } = .threaded;\n"
"pub const value_interpret_mode = .direct;\n"
, zig_version);
if (written < 100)

View file

@ -13,6 +13,7 @@ const DevEnv = @import("src/dev.zig").Env;
const zig_version: std.SemanticVersion = .{ .major = 0, .minor = 16, .patch = 0 };
const stack_size = 46 * 1024 * 1024;
const IoMode = enum { threaded, evented };
const ValueInterpretMode = enum { direct, by_name };
pub fn build(b: *std.Build) !void {
@ -188,6 +189,7 @@ pub fn build(b: *std.Build) !void {
const strip = b.option(bool, "strip", "Omit debug information");
const valgrind = b.option(bool, "valgrind", "Enable valgrind integration");
const pie = b.option(bool, "pie", "Produce a Position Independent Executable");
const io_mode = b.option(IoMode, "io-mode", "How the compiler performs IO") orelse .threaded;
const value_interpret_mode = b.option(ValueInterpretMode, "value-interpret-mode", "How the compiler translates between 'std.builtin' types and its internal datastructures") orelse .direct;
const value_tracing = b.option(bool, "value-tracing", "Enable extra state tracking to help troubleshoot bugs in the compiler (using the std.debug.Trace API)") orelse false;
@ -236,6 +238,7 @@ pub fn build(b: *std.Build) !void {
exe_options.addOption(bool, "llvm_has_xtensa", llvm_has_xtensa);
exe_options.addOption(bool, "debug_gpa", debug_gpa);
exe_options.addOption(DevEnv, "dev", b.option(DevEnv, "dev", "Build a compiler with a reduced feature set for development of specific features") orelse if (only_c) .bootstrap else .full);
exe_options.addOption(IoMode, "io_mode", io_mode);
exe_options.addOption(ValueInterpretMode, "value_interpret_mode", value_interpret_mode);
if (link_libc) {
@ -710,6 +713,7 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
exe_options.addOption(u32, "tracy_callstack_depth", 0);
exe_options.addOption(bool, "value_tracing", false);
exe_options.addOption(DevEnv, "dev", .bootstrap);
exe_options.addOption(IoMode, "io_mode", .threaded);
// zig1 chooses to interpret values by name. The tradeoff is as follows:
//

View file

@ -378,7 +378,9 @@ pub const Operation = union(enum) {
pub const Pending = struct {
node: List.DoubleNode,
tag: Tag,
context: [3]usize,
context: Context align(@max(@alignOf(usize), 4)),
pub const Context = [3]usize;
};
pub const Completion = struct {
@ -426,10 +428,10 @@ pub fn operate(io: Io, operation: Operation) Cancelable!Operation.Result {
pub const Batch = struct {
storage: []Operation.Storage,
unused: Operation.List,
submissions: Operation.List,
submitted: Operation.List,
pending: Operation.List,
completions: Operation.List,
context: ?*anyopaque,
completed: Operation.List,
context: ?*anyopaque align(@max(@alignOf(?*anyopaque), 4)),
/// After calling this, it is safe to unconditionally defer a call to
/// `cancel`. `storage` is a pre-allocated buffer of undefined memory that
@ -448,9 +450,9 @@ pub const Batch = struct {
.head = .fromIndex(0),
.tail = .fromIndex(storage.len - 1),
},
.submissions = .empty,
.submitted = .empty,
.pending = .empty,
.completions = .empty,
.completed = .empty,
.context = null,
};
}
@ -471,20 +473,20 @@ pub const Batch = struct {
const storage = &b.storage[index];
const unused = storage.unused;
switch (unused.prev) {
.none => b.unused.head = .none,
.none => b.unused.head = unused.next,
else => |prev_index| b.storage[prev_index.toIndex()].unused.next = unused.next,
}
switch (unused.next) {
.none => b.unused.tail = .none,
.none => b.unused.tail = unused.prev,
else => |next_index| b.storage[next_index.toIndex()].unused.prev = unused.prev,
}
switch (b.submissions.tail) {
.none => b.submissions.head = .fromIndex(index),
switch (b.submitted.tail) {
.none => b.submitted.head = .fromIndex(index),
else => |tail_index| b.storage[tail_index.toIndex()].submission.node.next = .fromIndex(index),
}
storage.* = .{ .submission = .{ .node = .{ .next = .none }, .operation = operation } };
b.submissions.tail = .fromIndex(index);
b.submitted.tail = .fromIndex(index);
}
pub const Completion = struct {
@ -501,13 +503,13 @@ pub const Batch = struct {
/// Each completion returned from this function dequeues from the `Batch`.
/// It is not required to dequeue all completions before awaiting again.
pub fn next(b: *Batch) ?Completion {
const index = b.completions.head;
const index = b.completed.head;
if (index == .none) return null;
const storage = &b.storage[index.toIndex()];
const completion = storage.completion;
const next_index = completion.node.next;
b.completions.head = next_index;
if (next_index == .none) b.completions.tail = .none;
b.completed.head = next_index;
if (next_index == .none) b.completed.tail = .none;
const tail_index = b.unused.tail;
switch (tail_index) {
@ -551,7 +553,27 @@ pub const Batch = struct {
/// may have successfully completed regardless of the cancel request and
/// will appear in the iteration.
pub fn cancel(b: *Batch, io: Io) void {
return io.vtable.batchCancel(io.userdata, b);
{ // abort pending submissions
var tail_index = b.unused.tail;
defer b.unused.tail = tail_index;
var index = b.submitted.head;
errdefer b.submissions.head = index;
while (index != .none) {
const next_index = b.storage[index.toIndex()].submission.node.next;
switch (tail_index) {
.none => b.unused.head = index,
else => b.storage[tail_index.toIndex()].unused.next = index,
}
b.storage[index.toIndex()] = .{ .unused = .{ .prev = tail_index, .next = .none } };
tail_index = index;
index = next_index;
}
b.submitted = .{ .head = .none, .tail = .none };
}
io.vtable.batchCancel(io.userdata, b);
assert(b.submitted.head == .none and b.submitted.tail == .none);
assert(b.pending.head == .none and b.pending.tail == .none);
assert(b.context == null); // that was the last chance to deallocate resources
}
};
@ -1117,13 +1139,13 @@ pub fn recancel(io: Io) void {
/// To modify a task's cancel protection state, see `swapCancelProtection`.
///
/// For a description of cancelation and cancelation points, see `Future.cancel`.
pub const CancelProtection = enum {
pub const CancelProtection = enum(u1) {
/// Any call to an `Io` function with `error.Canceled` in its error set is a cancelation point.
///
/// This is the default state, which all tasks are created in.
unblocked,
unblocked = 0,
/// No `Io` function introduces a cancelation point (`error.Canceled` will never be returned).
blocked,
blocked = 1,
};
/// Updates the current task's cancel protection state (see `CancelProtection`).
///
@ -1292,8 +1314,7 @@ pub fn futexWake(io: Io, comptime T: type, ptr: *align(@alignOf(u32)) const T, m
/// shared region of code known as the "critical section".
///
/// Mutex is an extern struct so that it may be used as a field inside another
/// extern struct. Having a guaranteed memory layout including mutexes is
/// important for IPC over shared memory (mmap).
/// extern struct.
pub const Mutex = extern struct {
state: std.atomic.Value(State),

View file

@ -477,12 +477,17 @@ pub const Permissions = std.Options.FilePermissions orelse if (is_windows) enum(
/// libc implementations use `0o666` inside `fopen` and then rely on the
/// process-scoped "umask" setting to adjust this number for file creation.
default_file = 0o666,
default_dir = 0o755,
executable_file = 0o777,
/// This is the default mode given to POSIX operating systems for creating
/// directories. `0o777` is "-rwxrwxrwx" which is counter-intuitive at first,
/// since most people would expect "-rwxr-xr-x", for example, when using
/// the `touch` command, which would correspond to `0o755`.
default_dir = 0o777,
_,
pub const has_executable_bit = native_os != .wasi;
pub const executable_file: @This() = .default_dir;
pub fn toMode(self: @This()) std.posix.mode_t {
return @intFromEnum(self);
}

File diff suppressed because it is too large Load diff

View file

@ -78,7 +78,7 @@ null_file: NullFile = .{},
random_file: RandomFile = .{},
pipe_file: PipeFile = .{},
csprng: Csprng = .{},
csprng: Csprng = .uninitialized,
system_basic_information: SystemBasicInformation = .{},
@ -88,10 +88,12 @@ const SystemBasicInformation = if (!is_windows) struct {} else struct {
};
pub const Csprng = struct {
rng: std.Random.DefaultCsprng = .{
rng: std.Random.DefaultCsprng,
pub const uninitialized: Csprng = .{ .rng = .{
.state = undefined,
.offset = std.math.maxInt(usize),
},
} };
pub const seed_len = std.Random.DefaultCsprng.secret_seed_length;
@ -120,7 +122,7 @@ pub const Argv0 = switch (native_os) {
},
};
const Environ = struct {
pub const Environ = struct {
/// Unmodified data directly from the OS.
process_environ: process.Environ,
/// Protected by `mutex`. Determines whether the other fields have been
@ -157,6 +159,127 @@ const Environ = struct {
HOME: ?[:0]const u8 = null,
},
};
pub fn scan(environ: *Environ, allocator: std.mem.Allocator) void {
if (environ.initialized) return;
environ.initialized = true;
if (is_windows) {
// This value expires with any call that modifies the environment,
// which is outside of this Io implementation's control, so references
// must be short-lived.
const peb = windows.peb();
assert(windows.ntdll.RtlEnterCriticalSection(peb.FastPebLock) == .SUCCESS);
defer assert(windows.ntdll.RtlLeaveCriticalSection(peb.FastPebLock) == .SUCCESS);
const ptr = peb.ProcessParameters.Environment;
var i: usize = 0;
while (ptr[i] != 0) {
// There are some special environment variables that start with =,
// so we need a special case to not treat = as a key/value separator
// if it's the first character.
// https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133
const key_start = i;
if (ptr[i] == '=') i += 1;
while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {}
const key_w = ptr[key_start..i];
const value_start = i + 1;
while (ptr[i] != 0) : (i += 1) {} // skip over '=' and value
const value_w = ptr[value_start..i];
i += 1; // skip over null byte
if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'N', 'O', '_', 'C', 'O', 'L', 'O', 'R' })) {
environ.exist.NO_COLOR = true;
} else if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'C', 'L', 'I', 'C', 'O', 'L', 'O', 'R', '_', 'F', 'O', 'R', 'C', 'E' })) {
environ.exist.CLICOLOR_FORCE = true;
} else if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'Z', 'I', 'G', '_', 'P', 'R', 'O', 'G', 'R', 'E', 'S', 'S' })) {
environ.zig_progress_file = file: {
var value_buf: [std.fmt.count("{d}", .{std.math.maxInt(usize)})]u8 = undefined;
const len = std.unicode.calcWtf8Len(value_w);
if (len > value_buf.len) break :file error.UnrecognizedFormat;
assert(std.unicode.wtf16LeToWtf8(&value_buf, value_w) == len);
break :file .{
.handle = @ptrFromInt(std.fmt.parseInt(usize, value_buf[0..len], 10) catch
break :file error.UnrecognizedFormat),
.flags = .{ .nonblocking = true },
};
};
}
comptime assert(@sizeOf(String) == 0);
}
} else if (native_os == .wasi and !builtin.link_libc) {
var environ_size: usize = undefined;
var environ_buf_size: usize = undefined;
switch (std.os.wasi.environ_sizes_get(&environ_size, &environ_buf_size)) {
.SUCCESS => {},
else => |err| {
environ.err = posix.unexpectedErrno(err);
return;
},
}
if (environ_size == 0) return;
const wasi_environ = allocator.alloc([*:0]u8, environ_size) catch |err| {
environ.err = err;
return;
};
defer allocator.free(wasi_environ);
const wasi_environ_buf = allocator.alloc(u8, environ_buf_size) catch |err| {
environ.err = err;
return;
};
defer allocator.free(wasi_environ_buf);
switch (std.os.wasi.environ_get(wasi_environ.ptr, wasi_environ_buf.ptr)) {
.SUCCESS => {},
else => |err| {
environ.err = posix.unexpectedErrno(err);
return;
},
}
for (wasi_environ) |env| {
const pair = std.mem.sliceTo(env, 0);
var parts = std.mem.splitScalar(u8, pair, '=');
const key = parts.first();
if (std.mem.eql(u8, key, "NO_COLOR")) {
environ.exist.NO_COLOR = true;
} else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
environ.exist.CLICOLOR_FORCE = true;
}
comptime assert(@sizeOf(String) == 0);
}
} else {
for (environ.process_environ.block.slice) |opt_entry| {
const entry = opt_entry.?;
var entry_i: usize = 0;
while (entry[entry_i] != 0 and entry[entry_i] != '=') : (entry_i += 1) {}
const key = entry[0..entry_i];
var end_i: usize = entry_i;
while (entry[end_i] != 0) : (end_i += 1) {}
const value = entry[entry_i + 1 .. end_i :0];
if (std.mem.eql(u8, key, "NO_COLOR")) {
environ.exist.NO_COLOR = true;
} else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
environ.exist.CLICOLOR_FORCE = true;
} else if (std.mem.eql(u8, key, "ZIG_PROGRESS")) {
environ.zig_progress_file = file: {
break :file .{
.handle = std.fmt.parseInt(u31, value, 10) catch
break :file error.UnrecognizedFormat,
.flags = .{ .nonblocking = true },
};
};
} else inline for (@typeInfo(String).@"struct".fields) |field| {
if (std.mem.eql(u8, key, field.name)) @field(environ.string, field.name) = value;
}
}
}
}
};
pub const NullFile = switch (native_os) {
@ -1397,13 +1520,13 @@ pub fn waitForApcOrAlert() void {
_ = windows.ntdll.NtDelayExecution(windows.TRUE, &infinite_timeout);
}
const max_iovecs_len = 8;
const splat_buffer_size = 64;
pub const max_iovecs_len = 8;
pub const splat_buffer_size = 64;
/// Happens to be the same number that matches maximum number of handles that
/// NtWaitForMultipleObjects accepts. We use this value also for poll() on
/// posix systems.
const poll_buffer_len = 64;
const default_PATH = "/usr/local/bin:/bin/:/usr/bin";
pub const default_PATH = "/usr/local/bin:/bin/:/usr/bin";
/// There are multiple kernel bugs being worked around with retries.
const max_windows_kernel_bug_retries = 13;
@ -1588,7 +1711,7 @@ fn worker(t: *Threaded) void {
.cancel_protection = .unblocked,
.futex_waiter = undefined,
.unpark_flag = unpark_flag_init,
.csprng = .{},
.csprng = .uninitialized,
};
Thread.current = &thread;
@ -2563,12 +2686,12 @@ fn operate(userdata: ?*anyopaque, operation: Io.Operation) Io.Cancelable!Io.Oper
fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (is_windows) {
batchAwaitWindows(b, false) catch |err| switch (err) {
batchDrainSubmittedWindows(b, false) catch |err| switch (err) {
error.ConcurrencyUnavailable => unreachable, // passed concurrency=false
else => |e| return e,
};
const alertable_syscall = try AlertableSyscall.start();
while (b.pending.head != .none and b.completions.head == .none) waitForApcOrAlert();
while (b.pending.head != .none and b.completed.head == .none) waitForApcOrAlert();
alertable_syscall.finish();
return;
}
@ -2576,7 +2699,7 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
var poll_buffer: [poll_buffer_len]posix.pollfd = undefined;
var poll_len: u32 = 0;
{
var index = b.submissions.head;
var index = b.submitted.head;
while (index != .none and poll_len < poll_buffer_len) {
const submission = &b.storage[index.toIndex()].submission;
switch (submission.operation) {
@ -2605,7 +2728,7 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
1 => {},
else => while (true) {
const timeout_ms: i32 = t: {
if (b.completions.head != .none) {
if (b.completed.head != .none) {
// It is legal to call batchWait with already completed
// operations in the ring. In such case, we need to avoid
// blocking in the poll syscall, but we can still take this
@ -2620,7 +2743,7 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
switch (posix.errno(rc)) {
.SUCCESS => {
if (rc == 0) {
if (b.completions.head != .none) {
if (b.completed.head != .none) {
// Since there are already completions available in the
// queue, this is neither a timeout nor a case for
// retrying.
@ -2629,7 +2752,7 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
continue;
}
var prev_index: Io.Operation.OptionalIndex = .none;
var index = b.submissions.head;
var index = b.submitted.head;
for (poll_buffer[0..poll_len]) |poll_entry| {
const storage = &b.storage[index.toIndex()];
const submission = &storage.submission;
@ -2638,17 +2761,17 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
const result = try operate(t, submission.operation);
switch (prev_index) {
.none => b.submissions.head = next_index,
.none => b.submitted.head = next_index,
else => b.storage[prev_index.toIndex()].submission.node.next = next_index,
}
if (next_index == .none) b.submissions.tail = prev_index;
if (next_index == .none) b.submitted.tail = prev_index;
switch (b.completions.tail) {
.none => b.completions.head = index,
switch (b.completed.tail) {
.none => b.completed.head = index,
else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index,
}
storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } };
b.completions.tail = index;
b.completed.tail = index;
} else prev_index = index;
index = next_index;
}
@ -2662,10 +2785,10 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
}
}
var tail_index = b.completions.tail;
defer b.completions.tail = tail_index;
var index = b.submissions.head;
errdefer b.submissions.head = index;
var tail_index = b.completed.tail;
defer b.completed.tail = tail_index;
var index = b.submitted.head;
errdefer b.submitted.head = index;
while (index != .none) {
const storage = &b.storage[index.toIndex()];
const submission = &storage.submission;
@ -2673,22 +2796,22 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
const result = try operate(t, submission.operation);
switch (tail_index) {
.none => b.completions.head = index,
.none => b.completed.head = index,
else => b.storage[tail_index.toIndex()].completion.node.next = index,
}
storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } };
tail_index = index;
index = next_index;
}
b.submissions = .{ .head = .none, .tail = .none };
b.submitted = .{ .head = .none, .tail = .none };
}
fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.AwaitConcurrentError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (is_windows) {
const deadline: ?Io.Clock.Timestamp = timeout.toTimestamp(ioBasic(t));
try batchAwaitWindows(b, true);
while (b.pending.head != .none and b.completions.head == .none) {
try batchDrainSubmittedWindows(b, true);
while (b.pending.head != .none and b.completed.head == .none) {
var delay_interval: windows.LARGE_INTEGER = interval: {
const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER);
break :interval timeoutToWindowsInterval(.{ .deadline = d }).?;
@ -2701,7 +2824,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
// The thread woke due to the timeout. Although spurious
// timeouts are OK, when no deadline is passed we must not
// return `error.Timeout`.
if (timeout != .none and b.completions.head == .none) return error.Timeout;
if (timeout != .none and b.completed.head == .none) return error.Timeout;
},
else => {},
}
@ -2743,7 +2866,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
}
} = .{ .gpa = t.allocator, .b = b, .slice = &poll_buffer, .len = 0 };
{
var index = b.submissions.head;
var index = b.submitted.head;
while (index != .none) {
const submission = &b.storage[index.toIndex()].submission;
switch (submission.operation) {
@ -2757,18 +2880,18 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
switch (poll_storage.len) {
0 => return,
1 => if (timeout == .none) {
const index = b.submissions.head;
const index = b.submitted.head;
const storage = &b.storage[index.toIndex()];
const result = try operate(t, storage.submission.operation);
b.submissions = .{ .head = .none, .tail = .none };
b.submitted = .{ .head = .none, .tail = .none };
switch (b.completions.tail) {
.none => b.completions.head = index,
switch (b.completed.tail) {
.none => b.completed.head = index,
else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index,
}
storage.* = .{ .completion = .{ .node = .{ .next = .none }, .result = result } };
b.completions.tail = index;
b.completed.tail = index;
return;
},
else => {},
@ -2777,7 +2900,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
const deadline = timeout.toTimestamp(t_io);
while (true) {
const timeout_ms: i32 = t: {
if (b.completions.head != .none) {
if (b.completed.head != .none) {
// It is legal to call batchWait with already completed
// operations in the ring. In such case, we need to avoid
// blocking in the poll syscall, but we can still take this
@ -2794,7 +2917,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
switch (posix.errno(rc)) {
.SUCCESS => {
if (rc == 0) {
if (b.completions.head != .none) {
if (b.completed.head != .none) {
// Since there are already completions available in the
// queue, this is neither a timeout nor a case for
// retrying.
@ -2806,7 +2929,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
return error.Timeout;
}
var prev_index: Io.Operation.OptionalIndex = .none;
var index = b.submissions.head;
var index = b.submitted.head;
for (poll_storage.slice[0..poll_storage.len]) |poll_entry| {
const submission = &b.storage[index.toIndex()].submission;
const next_index = submission.node.next;
@ -2814,17 +2937,20 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
const result = try operate(t, submission.operation);
switch (prev_index) {
.none => b.submissions.head = next_index,
.none => b.submitted.head = next_index,
else => b.storage[prev_index.toIndex()].submission.node.next = next_index,
}
if (next_index == .none) b.submissions.tail = prev_index;
if (next_index == .none) b.submitted.tail = prev_index;
switch (b.completions.tail) {
.none => b.completions.head = index,
switch (b.completed.tail) {
.none => b.completed.head = index,
else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = index,
}
b.completions.tail = index;
b.storage[index.toIndex()] = .{ .completion = .{ .node = .{ .next = .none }, .result = result } };
b.completed.tail = index;
b.storage[index.toIndex()] = .{ .completion = .{
.node = .{ .next = .none },
.result = result,
} };
} else prev_index = index;
index = next_index;
}
@ -2841,7 +2967,7 @@ const WindowsBatchPendingOperationContext = extern struct {
file: windows.HANDLE,
iosb: windows.IO_STATUS_BLOCK,
const Erased = [3]usize;
const Erased = Io.Operation.Storage.Pending.Context;
comptime {
assert(@sizeOf(Erased) <= @sizeOf(WindowsBatchPendingOperationContext));
@ -2858,24 +2984,9 @@ const WindowsBatchPendingOperationContext = extern struct {
fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
{
var tail_index = b.unused.tail;
defer b.unused.tail = tail_index;
var index = b.submissions.head;
errdefer b.submissions.head = index;
while (index != .none) {
const next_index = b.storage[index.toIndex()].submission.node.next;
switch (tail_index) {
.none => b.unused.head = index,
else => b.storage[tail_index.toIndex()].unused.next = index,
}
b.storage[index.toIndex()] = .{ .unused = .{ .prev = tail_index, .next = .none } };
tail_index = index;
index = next_index;
}
b.submissions = .{ .head = .none, .tail = .none };
}
if (is_windows) {
if (b.pending.head == .none) return;
waitForApcOrAlert();
var index = b.pending.head;
while (index != .none) {
const pending = &b.storage[index.toIndex()].pending;
@ -2889,10 +3000,13 @@ fn batchCancel(userdata: ?*anyopaque, b: *Io.Batch) void {
t.allocator.free(@as([*]posix.pollfd, @ptrCast(@alignCast(context)))[0..b.storage.len]);
b.context = null;
}
assert(b.pending.head == .none);
}
fn batchApc(apc_context: ?*anyopaque, iosb: *windows.IO_STATUS_BLOCK, _: windows.ULONG) callconv(.winapi) void {
fn batchApc(
apc_context: ?*anyopaque,
iosb: *windows.IO_STATUS_BLOCK,
_: windows.ULONG,
) callconv(.winapi) void {
const b: *Io.Batch = @ptrCast(@alignCast(apc_context));
const context: *WindowsBatchPendingOperationContext = @fieldParentPtr("iosb", iosb);
const erased_context = context.toErased();
@ -2918,11 +3032,12 @@ fn batchApc(apc_context: ?*anyopaque, iosb: *windows.IO_STATUS_BLOCK, _: windows
b.unused.tail = .fromIndex(index);
},
else => {
switch (b.completions.tail) {
.none => b.completions.head = .fromIndex(index),
else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next = .fromIndex(index),
switch (b.completed.tail) {
.none => b.completed.head = .fromIndex(index),
else => |tail_index| b.storage[tail_index.toIndex()].completion.node.next =
.fromIndex(index),
}
b.completions.tail = .fromIndex(index);
b.completed.tail = .fromIndex(index);
const result: Io.Operation.Result = switch (pending.tag) {
.file_read_streaming => .{ .file_read_streaming = ntReadFileResult(iosb) },
.file_write_streaming => .{ .file_write_streaming = ntWriteFileResult(iosb) },
@ -2934,9 +3049,9 @@ fn batchApc(apc_context: ?*anyopaque, iosb: *windows.IO_STATUS_BLOCK, _: windows
}
/// If `concurrency` is false, `error.ConcurrencyUnavailable` is unreachable.
fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, ConcurrencyUnavailable }!void {
var index = b.submissions.head;
errdefer b.submissions.head = index;
fn batchDrainSubmittedWindows(b: *Io.Batch, concurrency: bool) (Io.ConcurrentError || Io.Cancelable)!void {
var index = b.submitted.head;
errdefer b.submitted.head = index;
while (index != .none) {
const storage = &b.storage[index.toIndex()];
const submission = storage.submission;
@ -2952,7 +3067,7 @@ fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, Concurren
b.pending.tail = index;
const context: *WindowsBatchPendingOperationContext = .fromErased(&storage.pending.context);
errdefer {
context.iosb.u.Status = .CANCELLED;
context.iosb = .{ .u = .{ .Status = .CANCELLED }, .Information = undefined };
batchApc(b, &context.iosb, 0);
}
switch (submission.operation) {
@ -2960,10 +3075,7 @@ fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, Concurren
var data_index: usize = 0;
while (o.data.len - data_index != 0 and o.data[data_index].len == 0) data_index += 1;
if (o.data.len - data_index == 0) {
context.iosb = .{
.u = .{ .Status = .SUCCESS },
.Information = 0,
};
context.iosb = .{ .u = .{ .Status = .SUCCESS }, .Information = 0 };
batchApc(b, &context.iosb, 0);
break :o;
}
@ -3023,10 +3135,7 @@ fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, Concurren
.file_write_streaming => |o| o: {
const buffer = windowsWriteBuffer(o.header, o.data, o.splat);
if (buffer.len == 0) {
context.iosb = .{
.u = .{ .Status = .SUCCESS },
.Information = 0,
};
context.iosb = .{ .u = .{ .Status = .SUCCESS }, .Information = 0 };
batchApc(b, &context.iosb, 0);
break :o;
}
@ -3140,7 +3249,7 @@ fn batchAwaitWindows(b: *Io.Batch, concurrency: bool) error{ Canceled, Concurren
}
index = submission.node.next;
}
b.submissions = .{ .head = .none, .tail = .none };
b.submitted = .{ .head = .none, .tail = .none };
}
/// Since Windows only supports writing one contiguous buffer, returns the
@ -3155,7 +3264,7 @@ fn windowsWriteBuffer(header: []const u8, data: []const []const u8, splat: usize
if (splat == 0) return &.{};
break :b data[data.len - 1];
};
return buffer[0..@min(buffer.len, std.math.maxInt(u32))];
return buffer[0..std.math.lossyCast(u32, buffer.len)];
}
fn submitComplete(ring: []u32, complete_tail: *Io.Batch.RingIndex, op: u32) void {
@ -4677,8 +4786,8 @@ fn atomicFileInit(
dir: Dir,
close_dir_on_deinit: bool,
) Dir.CreateFileAtomicError!File.Atomic {
var random_integer: u64 = undefined;
while (true) {
var random_integer: u64 = undefined;
t_io.random(@ptrCast(&random_integer));
const tmp_sub_path = std.fmt.hex(random_integer);
const file = dir.createFile(t_io, &tmp_sub_path, .{
@ -14317,11 +14426,11 @@ pub fn posixProtocol(protocol: ?net.Protocol) u32 {
return @intFromEnum(protocol orelse return 0);
}
fn recoverableOsBugDetected() void {
pub fn recoverableOsBugDetected() void {
if (is_debug) unreachable;
}
fn clockToPosix(clock: Io.Clock) posix.clockid_t {
pub fn clockToPosix(clock: Io.Clock) posix.clockid_t {
return switch (clock) {
.real => posix.CLOCK.REALTIME,
.awake => switch (native_os) {
@ -14355,7 +14464,7 @@ fn clockToWasi(clock: Io.Clock) std.os.wasi.clockid_t {
};
}
const linux_statx_request: std.os.linux.STATX = .{
pub const linux_statx_request: std.os.linux.STATX = .{
.TYPE = true,
.MODE = true,
.ATIME = true,
@ -14367,7 +14476,7 @@ const linux_statx_request: std.os.linux.STATX = .{
.BLOCKS = true,
};
const linux_statx_check: std.os.linux.STATX = .{
pub const linux_statx_check: std.os.linux.STATX = .{
.TYPE = true,
.MODE = true,
.ATIME = false,
@ -14379,7 +14488,7 @@ const linux_statx_check: std.os.linux.STATX = .{
.BLOCKS = false,
};
fn statFromLinux(stx: *const std.os.linux.Statx) Io.UnexpectedError!File.Stat {
pub fn statFromLinux(stx: *const std.os.linux.Statx) Io.UnexpectedError!File.Stat {
const actual_mask_int: u32 = @bitCast(stx.mask);
const wanted_mask_int: u32 = @bitCast(linux_statx_check);
if ((actual_mask_int | wanted_mask_int) != actual_mask_int) return error.Unexpected;
@ -14470,11 +14579,11 @@ fn statFromWasi(st: *const std.os.wasi.filestat_t) File.Stat {
};
}
fn timestampFromPosix(timespec: *const posix.timespec) Io.Timestamp {
pub fn timestampFromPosix(timespec: *const posix.timespec) Io.Timestamp {
return .{ .nanoseconds = nanosecondsFromPosix(timespec) };
}
fn nanosecondsFromPosix(timespec: *const posix.timespec) i96 {
pub fn nanosecondsFromPosix(timespec: *const posix.timespec) i96 {
return @intCast(@as(i128, timespec.sec) * std.time.ns_per_s + timespec.nsec);
}
@ -14492,7 +14601,7 @@ fn timestampToPosix(nanoseconds: i96) posix.timespec {
};
}
fn setTimestampToPosix(set_ts: File.SetTimestamp) posix.timespec {
pub fn setTimestampToPosix(set_ts: File.SetTimestamp) posix.timespec {
return switch (set_ts) {
.unchanged => .OMIT,
.now => .NOW,
@ -14500,7 +14609,7 @@ fn setTimestampToPosix(set_ts: File.SetTimestamp) posix.timespec {
};
}
fn pathToPosix(file_path: []const u8, buffer: *[posix.PATH_MAX]u8) Dir.PathNameError![:0]u8 {
pub fn pathToPosix(file_path: []const u8, buffer: *[posix.PATH_MAX]u8) Dir.PathNameError![:0]u8 {
if (std.mem.containsAtLeastScalar2(u8, file_path, 0, 1)) return error.BadPathName;
// >= rather than > to make room for the null byte
if (file_path.len >= buffer.len) return error.NameTooLong;
@ -14996,126 +15105,7 @@ const WindowsEnvironStrings = struct {
fn scanEnviron(t: *Threaded) void {
mutexLock(&t.mutex);
defer mutexUnlock(&t.mutex);
if (t.environ.initialized) return;
t.environ.initialized = true;
if (is_windows) {
// This value expires with any call that modifies the environment,
// which is outside of this Io implementation's control, so references
// must be short-lived.
const peb = windows.peb();
assert(windows.ntdll.RtlEnterCriticalSection(peb.FastPebLock) == .SUCCESS);
defer assert(windows.ntdll.RtlLeaveCriticalSection(peb.FastPebLock) == .SUCCESS);
const ptr = peb.ProcessParameters.Environment;
var i: usize = 0;
while (ptr[i] != 0) {
// There are some special environment variables that start with =,
// so we need a special case to not treat = as a key/value separator
// if it's the first character.
// https://devblogs.microsoft.com/oldnewthing/20100506-00/?p=14133
const key_start = i;
if (ptr[i] == '=') i += 1;
while (ptr[i] != 0 and ptr[i] != '=') : (i += 1) {}
const key_w = ptr[key_start..i];
const value_start = i + 1;
while (ptr[i] != 0) : (i += 1) {} // skip over '=' and value
const value_w = ptr[value_start..i];
i += 1; // skip over null byte
if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'N', 'O', '_', 'C', 'O', 'L', 'O', 'R' })) {
t.environ.exist.NO_COLOR = true;
} else if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'C', 'L', 'I', 'C', 'O', 'L', 'O', 'R', '_', 'F', 'O', 'R', 'C', 'E' })) {
t.environ.exist.CLICOLOR_FORCE = true;
} else if (windows.eqlIgnoreCaseWtf16(key_w, &.{ 'Z', 'I', 'G', '_', 'P', 'R', 'O', 'G', 'R', 'E', 'S', 'S' })) {
t.environ.zig_progress_file = file: {
var value_buf: [std.fmt.count("{d}", .{std.math.maxInt(usize)})]u8 = undefined;
const len = std.unicode.calcWtf8Len(value_w);
if (len > value_buf.len) break :file error.UnrecognizedFormat;
assert(std.unicode.wtf16LeToWtf8(&value_buf, value_w) == len);
break :file .{
.handle = @ptrFromInt(std.fmt.parseInt(usize, value_buf[0..len], 10) catch
break :file error.UnrecognizedFormat),
.flags = .{ .nonblocking = true },
};
};
}
comptime assert(@sizeOf(Environ.String) == 0);
}
} else if (native_os == .wasi and !builtin.link_libc) {
var environ_count: usize = undefined;
var environ_buf_size: usize = undefined;
switch (std.os.wasi.environ_sizes_get(&environ_count, &environ_buf_size)) {
.SUCCESS => {},
else => |err| {
t.environ.err = posix.unexpectedErrno(err);
return;
},
}
if (environ_count == 0) return;
const environ = t.allocator.alloc([*:0]u8, environ_count) catch |err| {
t.environ.err = err;
return;
};
defer t.allocator.free(environ);
const environ_buf = t.allocator.alloc(u8, environ_buf_size) catch |err| {
t.environ.err = err;
return;
};
defer t.allocator.free(environ_buf);
switch (std.os.wasi.environ_get(environ.ptr, environ_buf.ptr)) {
.SUCCESS => {},
else => |err| {
t.environ.err = posix.unexpectedErrno(err);
return;
},
}
for (environ) |env| {
const pair = std.mem.sliceTo(env, 0);
var parts = std.mem.splitScalar(u8, pair, '=');
const key = parts.first();
if (std.mem.eql(u8, key, "NO_COLOR")) {
t.environ.exist.NO_COLOR = true;
} else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
t.environ.exist.CLICOLOR_FORCE = true;
}
comptime assert(@sizeOf(Environ.String) == 0);
}
} else {
for (t.environ.process_environ.block.slice) |opt_entry| {
const entry = opt_entry.?;
var entry_i: usize = 0;
while (entry[entry_i] != 0 and entry[entry_i] != '=') : (entry_i += 1) {}
const key = entry[0..entry_i];
var end_i: usize = entry_i;
while (entry[end_i] != 0) : (end_i += 1) {}
const value = entry[entry_i + 1 .. end_i :0];
if (std.mem.eql(u8, key, "NO_COLOR")) {
t.environ.exist.NO_COLOR = true;
} else if (std.mem.eql(u8, key, "CLICOLOR_FORCE")) {
t.environ.exist.CLICOLOR_FORCE = true;
} else if (std.mem.eql(u8, key, "ZIG_PROGRESS")) {
t.environ.zig_progress_file = file: {
break :file .{
.handle = std.fmt.parseInt(u31, value, 10) catch
break :file error.UnrecognizedFormat,
.flags = .{ .nonblocking = true },
};
};
} else inline for (@typeInfo(Environ.String).@"struct".fields) |field| {
if (std.mem.eql(u8, key, field.name)) @field(t.environ.string, field.name) = value;
}
}
}
t.environ.scan(t.allocator);
}
fn processReplace(userdata: ?*anyopaque, options: process.ReplaceOptions) process.ReplaceError {
@ -15213,16 +15203,16 @@ fn spawnPosix(t: *Threaded, options: process.SpawnOptions) process.SpawnError!Sp
const any_ignore = (options.stdin == .ignore or options.stdout == .ignore or options.stderr == .ignore);
const dev_null_fd = if (any_ignore) try getDevNullFd(t) else undefined;
const prog_pipe: [2]posix.fd_t = if (options.progress_node.index != .none)
const prog_pipe: [2]posix.fd_t = if (options.progress_node.index != .none) pipe: {
// We use CLOEXEC for the same reason as in `pipe_flags`.
try pipe2(.{ .NONBLOCK = true, .CLOEXEC = true })
else
.{ -1, -1 };
errdefer destroyPipe(prog_pipe);
if (native_os == .linux and prog_pipe[0] != -1) {
_ = posix.system.fcntl(prog_pipe[0], posix.F.SETPIPE_SZ, @as(u32, std.Progress.max_packet_len * 2));
const pipe = try pipe2(.{ .NONBLOCK = true, .CLOEXEC = true });
switch (native_os) {
.linux => _ = posix.system.fcntl(pipe[0], posix.F.SETPIPE_SZ, @as(u32, std.Progress.max_packet_len * 2)),
else => {},
}
break :pipe pipe;
} else .{ -1, -1 };
errdefer destroyPipe(prog_pipe);
var arena_allocator = std.heap.ArenaAllocator.init(t.allocator);
defer arena_allocator.deinit();
@ -17241,16 +17231,7 @@ fn randomMainThread(t: *Threaded, buffer: []u8) void {
randomSecure(t, &seed) catch |err| switch (err) {
error.Canceled => unreachable,
error.EntropyUnavailable => {
@memset(&seed, 0);
const aslr_addr = @intFromPtr(t);
std.mem.writeInt(usize, seed[seed.len - @sizeOf(usize) ..][0..@sizeOf(usize)], aslr_addr, .native);
switch (native_os) {
.windows => fallbackSeedWindows(&seed),
.wasi => if (builtin.link_libc) fallbackSeedPosix(&seed) else fallbackSeedWasi(&seed),
else => fallbackSeedPosix(&seed),
}
},
error.EntropyUnavailable => fallbackSeed(t, &seed),
};
}
t.csprng.rng = .init(seed);
@ -17259,6 +17240,17 @@ fn randomMainThread(t: *Threaded, buffer: []u8) void {
t.csprng.rng.fill(buffer);
}
pub fn fallbackSeed(aslr_addr: ?*anyopaque, seed: *[Csprng.seed_len]u8) void {
@memset(seed, 0);
std.mem.writeInt(usize, seed[seed.len - @sizeOf(usize) ..][0..@sizeOf(usize)], @intFromPtr(aslr_addr), .native);
const fallbackSeedImpl = switch (native_os) {
.windows => fallbackSeedWindows,
.wasi => if (builtin.link_libc) fallbackSeedPosix else fallbackSeedWasi,
else => fallbackSeedPosix,
};
fallbackSeedImpl(seed);
}
fn fallbackSeedPosix(seed: *[Csprng.seed_len]u8) void {
std.mem.writeInt(posix.pid_t, seed[0..@sizeOf(posix.pid_t)], posix.system.getpid(), .native);
const i_1 = @sizeOf(posix.pid_t);

View file

@ -6717,9 +6717,10 @@ pub const IORING_ACCEPT_MULTISHOT = 1 << 0;
/// IORING_OP_MSG_RING command types, stored in sqe->addr
pub const IORING_MSG_RING_COMMAND = enum(u8) {
/// pass sqe->len as 'res' and off as user_data
DATA,
DATA = 0,
/// send a registered fd to another ring
SEND_FD,
SEND_FD = 1,
_,
};
// io_uring_sqe.msg_ring_flags (rw_flags in the Zig struct)
@ -6772,6 +6773,8 @@ pub const IORING_CQE_F_SOCK_NONEMPTY = 1 << 2;
pub const IORING_CQE_F_NOTIF = 1 << 3;
/// If set, the buffer ID set in the completion will get more completions.
pub const IORING_CQE_F_BUF_MORE = 1 << 4;
pub const IORING_CQE_F_SKIP = 1 << 5;
pub const IORING_CQE_F_32 = 1 << 15;
pub const IORING_CQE_BUFFER_SHIFT = 16;
@ -7068,7 +7071,7 @@ pub const IORING_RESTRICTION = enum(u16) {
_,
};
pub const IO_URING_SOCKET_OP = enum(u16) {
pub const IO_URING_SOCKET_OP = enum(u32) {
SIOCIN = 0,
SIOCOUTQ = 1,
GETSOCKOPT = 2,

View file

@ -60,7 +60,7 @@ pub const CurrentPathError = error{
NameTooLong,
/// Not possible on Windows. Always returned on WASI.
CurrentDirUnlinked,
} || Io.UnexpectedError;
} || Io.Cancelable || Io.UnexpectedError;
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On other platforms, the result is an opaque sequence of bytes with no
@ -72,7 +72,7 @@ pub fn currentPath(io: Io, buffer: []u8) CurrentPathError!usize {
pub const CurrentPathAllocError = Allocator.Error || error{
/// Not possible on Windows. Always returned on WASI.
CurrentDirUnlinked,
} || Io.UnexpectedError;
} || Io.Cancelable || Io.UnexpectedError;
/// On Windows, the result is encoded as [WTF-8](https://wtf-8.codeberg.page/).
/// On other platforms, the result is an opaque sequence of bytes with no
@ -355,7 +355,7 @@ pub const SpawnError = error{
/// On Windows, the volume does not contain a recognized file system. File
/// system drivers might not be loaded, or the volume may be corrupt.
UnrecognizedVolume,
} || Io.Dir.PathNameError || Io.Cancelable || Io.UnexpectedError;
} || Io.File.OpenError || Io.Dir.PathNameError || Io.Cancelable || Io.UnexpectedError;
pub const SpawnOptions = struct {
argv: []const []const u8,

View file

@ -1128,10 +1128,10 @@ fn filePermissions(mode: u32, options: PipeOptions) Io.File.Permissions {
test filePermissions {
if (!Io.File.Permissions.has_executable_bit) return error.SkipZigTest;
try testing.expectEqual(.default_file, filePermissions(0o744, .{ .mode_mode = .ignore }));
try testing.expectEqual(.executable_file, filePermissions(0o744, .{}));
try testing.expectEqual(.default_file, filePermissions(0o644, .{}));
try testing.expectEqual(.default_file, filePermissions(0o655, .{}));
try testing.expectEqual(Io.File.Permissions.default_file, filePermissions(0o744, .{ .mode_mode = .ignore }));
try testing.expectEqual(Io.File.Permissions.executable_file, filePermissions(0o744, .{}));
try testing.expectEqual(Io.File.Permissions.default_file, filePermissions(0o644, .{}));
try testing.expectEqual(Io.File.Permissions.default_file, filePermissions(0o655, .{}));
}
test "executable bit" {

View file

@ -4891,11 +4891,7 @@ fn performAllTheWork(
work: while (true) {
for (&comp.work_queues) |*work_queue| if (work_queue.popFront()) |job| {
try processOneJob(
@intFromEnum(Zcu.PerThread.Id.main),
comp,
job,
);
try processOneJob(.main, comp, job);
continue :work;
};
if (comp.zcu) |zcu| {
@ -5160,11 +5156,7 @@ pub fn queueJobs(comp: *Compilation, jobs: []const Job) !void {
for (jobs) |job| try comp.queueJob(job);
}
fn processOneJob(
tid: usize,
comp: *Compilation,
job: Job,
) JobError!void {
fn processOneJob(tid: Zcu.PerThread.Id, comp: *Compilation, job: Job) JobError!void {
switch (job) {
.codegen_func => |func| {
const zcu = comp.zcu.?;
@ -5232,7 +5224,7 @@ fn processOneJob(
const named_frame = tracy.namedFrame("analyze_func");
defer named_frame.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
pt.ensureFuncBodyUpToDate(func) catch |err| switch (err) {
@ -5245,7 +5237,7 @@ fn processOneJob(
const named_frame = tracy.namedFrame("analyze_comptime_unit");
defer named_frame.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
const maybe_err: Zcu.SemaError!void = switch (unit.unwrap()) {
@ -5285,7 +5277,7 @@ fn processOneJob(
const named_frame = tracy.namedFrame("resolve_type_fully");
defer named_frame.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
Type.fromInterned(ty).resolveFully(pt) catch |err| switch (err) {
error.OutOfMemory, error.Canceled => |e| return e,
@ -5296,7 +5288,7 @@ fn processOneJob(
const named_frame = tracy.namedFrame("analyze_mod");
defer named_frame.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
pt.semaMod(mod) catch |err| switch (err) {
error.OutOfMemory, error.Canceled => |e| return e,
@ -5642,13 +5634,14 @@ fn workerUpdateFile(
prog_node: std.Progress.Node,
group: *Io.Group,
) void {
const tid = Compilation.getTid();
const io = comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
const child_prog_node = prog_node.start(fs.path.basename(file.path.sub_path), 0);
defer child_prog_node.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(comp.zcu.?, tid);
defer pt.deactivate();
pt.updateFile(file_index, file) catch |err| {
pt.reportRetryableFileError(file_index, "unable to load '{s}': {s}", .{ fs.path.basename(file.path.sub_path), @errorName(err) }) catch |oom| switch (oom) {
@ -5708,9 +5701,10 @@ fn workerUpdateBuiltinFile(comp: *Compilation, file: *Zcu.File) void {
}
fn workerUpdateEmbedFile(comp: *Compilation, ef_index: Zcu.EmbedFile.Index, ef: *Zcu.EmbedFile) void {
const tid = Compilation.getTid();
const io = comp.io;
comp.detectEmbedFileUpdate(@enumFromInt(tid), ef_index, ef) catch |err| switch (err) {
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
comp.detectEmbedFileUpdate(tid, ef_index, ef) catch |err| switch (err) {
error.OutOfMemory => {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
@ -5868,7 +5862,7 @@ pub fn translateC(
}
var stdout: []u8 = undefined;
try @import("main.zig").translateC(gpa, arena, io, argv.items, environ_map, prog_node, &stdout);
try @import("main.zig").translateC(gpa, arena, io, argv.items, environ_map, prog_node, comp.thread_limit, &stdout);
if (out_dep_path) |dep_file_path| add_deps: {
if (comp.verbose_cimport) log.info("processing dep file at {s}", .{dep_file_path});
@ -8394,17 +8388,3 @@ pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode {
pub fn compilerRtStrip(comp: Compilation) bool {
return comp.root_mod.strip;
}
/// This is a temporary workaround put in place to migrate from `std.Thread.Pool`
/// to `std.Io.Threaded` for asynchronous/concurrent work. The eventual solution
/// will likely involve significant changes to the `InternPool` implementation.
pub fn getTid() usize {
if (my_tid == null) my_tid = next_tid.fetchAdd(1, .monotonic);
return my_tid.?;
}
pub fn setMainThread() void {
my_tid = 0;
}
/// TID 0 is reserved for the main thread.
var next_tid: std.atomic.Value(usize) = .init(1);
threadlocal var my_tid: ?usize = null;

View file

@ -3,6 +3,7 @@
const InternPool = @This();
const builtin = @import("builtin");
const build_options = @import("build_options");
const std = @import("std");
const Io = std.Io;
@ -86,13 +87,11 @@ dep_entries: std.ArrayList(DepEntry),
/// garbage collection pass.
free_dep_entries: std.ArrayList(DepEntry.Index),
/// Whether a multi-threaded intern pool is useful.
/// Currently `false` until the intern pool is actually accessed
/// from multiple threads to reduce the cost of this data structure.
const want_multi_threaded = true;
/// Whether a single-threaded intern pool impl is in use.
pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
pub const single_threaded = switch (build_options.io_mode) {
.threaded => builtin.single_threaded,
.evented => false, // even without threads, evented can be access from multiple tasks at a time
};
pub const empty: InternPool = .{
.locals = &.{},
@ -6915,7 +6914,7 @@ pub fn init(ip: *InternPool, gpa: Allocator, io: Io, available_threads: usize) !
assert(ip.locals.len == 0 and ip.shards.len == 0);
assert(available_threads > 0 and available_threads <= std.math.maxInt(u8));
const used_threads = if (single_threaded) 1 else available_threads;
const used_threads = if (single_threaded) 1 else @max(available_threads, 2);
ip.locals = try gpa.alloc(Local, used_threads);
@memset(ip.locals, .{
.shared = .{

View file

@ -4954,8 +4954,10 @@ pub const CodegenTaskPool = struct {
// We own `air` now, so we are responsbile for freeing it.
var air = orig_air;
defer air.deinit(zcu.comp.gpa);
const tid = Compilation.getTid();
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
const io = zcu.comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
return pt.runCodegen(func_index, &air);
}
@ -4964,8 +4966,10 @@ pub const CodegenTaskPool = struct {
func_index: InternPool.Index,
air: *Air,
) CodegenResult {
const tid = Compilation.getTid();
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
const io = zcu.comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
return pt.runCodegen(func_index, air);
}

View file

@ -41,13 +41,86 @@ zcu: *Zcu,
tid: Id,
pub const IdBacking = u7;
pub const Id = if (InternPool.single_threaded) enum { main } else enum(IdBacking) { main, _ };
pub const Id = if (InternPool.single_threaded) enum {
main,
pub fn allocate(arena: Allocator, n: usize) Allocator.Error!void {
_ = arena;
_ = n;
}
pub fn acquire(io: std.Io) Id {
_ = io;
return .main;
}
pub fn release(tid: Id, io: std.Io) void {
_ = io;
_ = tid;
}
} else enum(IdBacking) {
main,
_,
var tid_mutex: std.Io.Mutex = .init;
var tid_cond: std.Io.Condition = .init;
/// This is a temporary workaround put in place to migrate from `std.Thread.Pool`
/// to `std.Io.Threaded` for asynchronous/concurrent work. The eventual solution
/// will likely involve significant changes to the `InternPool` implementation.
var available_tids: std.ArrayList(Id) = .empty;
threadlocal var recursive_depth: usize = 0;
threadlocal var recursive_tid: Id = .main;
pub fn allocate(arena: Allocator, n: usize) Allocator.Error!void {
assert(available_tids.items.len == 0);
try available_tids.ensureTotalCapacityPrecise(arena, n - 1);
for (1..n) |tid| available_tids.appendAssumeCapacity(@enumFromInt(tid));
}
pub fn acquire(io: std.Io) Id {
switch (build_options.io_mode) {
.threaded => {
recursive_depth += 1;
if (recursive_depth > 1) {
assert(recursive_tid != .main);
return recursive_tid;
}
},
.evented => {},
}
tid_mutex.lockUncancelable(io);
defer tid_mutex.unlock(io);
while (true) {
if (available_tids.pop()) |tid| {
switch (build_options.io_mode) {
.threaded => recursive_tid = tid,
.evented => {},
}
return tid;
}
tid_cond.waitUncancelable(io, &tid_mutex);
}
}
pub fn release(tid: Id, io: std.Io) void {
switch (build_options.io_mode) {
.threaded => {
assert(recursive_tid == tid);
recursive_depth -= 1;
if (recursive_depth > 0) return;
recursive_tid = .main;
},
.evented => {},
}
{
tid_mutex.lockUncancelable(io);
defer tid_mutex.unlock(io);
available_tids.appendAssumeCapacity(tid);
}
tid_cond.signal(io);
}
};
pub fn activate(zcu: *Zcu, tid: Id) Zcu.PerThread {
zcu.intern_pool.activate();
return .{ .zcu = zcu, .tid = tid };
}
pub fn deactivate(pt: Zcu.PerThread) void {
pt.zcu.intern_pool.deactivate();
}

View file

@ -1,34 +1,14 @@
/// We override the panic implementation to our own one, so we can print our own information before
/// calling the default panic handler. This declaration must be re-exposed from `@import("root")`.
pub const panic = if (dev.env == .bootstrap)
std.debug.simple_panic
else
std.debug.FullPanic(panicImpl);
/// We let std install its segfault handler, but we override the target-agnostic handler it calls,
/// so we can print our own information before calling the default segfault logic. This declaration
/// must be re-exposed from `@import("root")`.
pub const debug = struct {
pub const handleSegfault = handleSegfaultImpl;
};
/// Printed in panic messages when suggesting a command to run, allowing copy-pasting the command.
/// Set by `main` as soon as arguments are known. The value here is a default in case we somehow
/// crash earlier than that.
pub var zig_argv0: []const u8 = "zig";
fn handleSegfaultImpl(addr: ?usize, name: []const u8, opt_ctx: ?std.debug.CpuContextPtr) noreturn {
@branchHint(.cold);
dumpCrashContext() catch {};
std.debug.defaultHandleSegfault(addr, name, opt_ctx);
}
fn panicImpl(msg: []const u8, first_trace_addr: ?usize) noreturn {
@branchHint(.cold);
dumpCrashContext() catch {};
std.debug.defaultPanic(msg, first_trace_addr orelse @returnAddress());
}
const enabled = switch (build_options.io_mode) {
.threaded => build_options.enable_debug_extensions,
.evented => false, // would use threadlocals in a way incompatible with evented
};
pub const AnalyzeBody = if (build_options.enable_debug_extensions) struct {
pub const AnalyzeBody = if (enabled) struct {
parent: ?*AnalyzeBody,
sema: *Sema,
block: *Sema.Block,
@ -63,7 +43,7 @@ pub const AnalyzeBody = if (build_options.enable_debug_extensions) struct {
pub inline fn setBodyIndex(_: @This(), _: usize) void {}
};
pub const CodegenFunc = if (build_options.enable_debug_extensions) struct {
pub const CodegenFunc = if (enabled) struct {
zcu: *const Zcu,
func_index: InternPool.Index,
threadlocal var current: ?CodegenFunc = null;
@ -82,23 +62,14 @@ pub const CodegenFunc = if (build_options.enable_debug_extensions) struct {
pub fn stop(_: InternPool.Index) void {}
};
fn dumpCrashContext() Io.Writer.Error!void {
pub fn dumpCrashContext(terminal: Io.Terminal) Io.Writer.Error!void {
const S = struct {
/// In the case of recursive panics or segfaults, don't print the context for a second time.
threadlocal var already_dumped = false;
/// TODO: make this unnecessary. It exists because `print_zir` currently needs an allocator,
/// but that shouldn't be necessary---it's already only used in one place.
threadlocal var crash_heap: [64 * 1024]u8 = undefined;
var crash_heap: [64 * 1024]u8 = undefined;
};
if (S.already_dumped) return;
S.already_dumped = true;
// TODO: this does mean that a different thread could grab the stderr mutex between the context
// and the actual panic printing, which would be quite confusing.
const stderr = std.debug.lockStderr(&.{});
defer std.debug.unlockStderr();
const w = &stderr.file_writer.interface;
const w = terminal.writer;
try w.writeAll("Compiler crash context:\n");
if (CodegenFunc.current) |*cg| {

View file

@ -54,11 +54,7 @@ pub fn findZigLibDir(gpa: Allocator, io: Io) !Cache.Directory {
/// Like `std.process.currentPathAlloc`, but also resolves the path with `Dir.path.resolve`. This
/// means the path has no repeated separators, no "." or ".." components, and no trailing separator.
/// On WASI, "" is returned instead of ".".
pub fn getResolvedCwd(io: Io, gpa: Allocator) error{
OutOfMemory,
CurrentDirUnlinked,
Unexpected,
}![]u8 {
pub fn getResolvedCwd(io: Io, gpa: Allocator) std.process.CurrentPathAllocError![]u8 {
if (builtin.target.os.tag == .wasi) {
if (std.debug.runtime_safety) {
const cwd = try std.process.currentPathAlloc(io, gpa);

View file

@ -1500,12 +1500,12 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
},
}
}
pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
pub fn doZcuTask(comp: *Compilation, tid: Zcu.PerThread.Id, task: ZcuTask) void {
const io = comp.io;
const diags = &comp.link_diags;
const zcu = comp.zcu.?;
const ip = &zcu.intern_pool;
const pt: Zcu.PerThread = .activate(zcu, @enumFromInt(tid));
const pt: Zcu.PerThread = .activate(zcu, tid);
defer pt.deactivate();
var timer = comp.startTimer();
@ -1610,8 +1610,8 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
}
}
}
pub fn doIdleTask(comp: *Compilation, tid: usize) error{ OutOfMemory, LinkFailure }!bool {
return if (comp.bin_file) |lf| lf.idle(@enumFromInt(tid)) else false;
pub fn doIdleTask(comp: *Compilation, tid: Zcu.PerThread.Id) error{ OutOfMemory, LinkFailure }!bool {
return if (comp.bin_file) |lf| lf.idle(tid) else false;
}
/// After the main pipeline is done, but before flush, the compilation may need to link one final
/// `Nav` into the binary: the `builtin.test_functions` value. Since the link thread isn't running

View file

@ -96,12 +96,12 @@ pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask)
pub fn enqueueZcu(
q: *Queue,
comp: *Compilation,
tid: usize,
tid: Zcu.PerThread.Id,
task: ZcuTask,
) Io.Cancelable!void {
const io = comp.io;
assert(tid == 0);
assert(tid == .main);
if (q.future != null) {
if (q.zcu_queue.putOne(io, task)) |_| {
@ -148,8 +148,9 @@ pub fn finishZcuQueue(q: *Queue, comp: *Compilation) void {
}
fn runLinkTasks(q: *Queue, comp: *Compilation) void {
const tid = Compilation.getTid();
const io = comp.io;
const tid: Zcu.PerThread.Id = .acquire(io);
defer tid.release(io);
var have_idle_tasks = true;
@ -198,7 +199,7 @@ fn runLinkTasks(q: *Queue, comp: *Compilation) void {
}
}
}
fn runIdleTask(comp: *Compilation, tid: usize) bool {
fn runIdleTask(comp: *Compilation, tid: Zcu.PerThread.Id) bool {
return link.doIdleTask(comp, tid) catch |err| switch (err) {
error.OutOfMemory => have_more: {
comp.link_diags.setAllocFailure();
@ -217,5 +218,6 @@ const Compilation = @import("../Compilation.zig");
const InternPool = @import("../InternPool.zig");
const link = @import("../link.zig");
const PrelinkTask = link.PrelinkTask;
const ZcuTask = link.ZcuTask;
const Queue = @This();
const Zcu = @import("../Zcu.zig");
const ZcuTask = link.ZcuTask;

View file

@ -52,8 +52,11 @@ pub const std_options: std.Options = .{
};
pub const std_options_cwd = if (native_os == .wasi) wasi_cwd else null;
pub const panic = crash_report.panic;
pub const debug = crash_report.debug;
pub const debug = struct {
pub fn printCrashContext(terminal: Io.Terminal) void {
crash_report.dumpCrashContext(terminal) catch {};
}
};
var preopens: std.process.Preopens = .empty;
pub fn wasi_cwd() Io.Dir {
@ -158,25 +161,55 @@ pub fn log(
std.log.defaultLog(level, scope, format, args);
}
var debug_allocator: std.heap.DebugAllocator(.{
.stack_trace_frames = build_options.mem_leak_frames,
}) = .init;
const use_debug_allocator = build_options.debug_gpa or
(native_os != .wasi and !builtin.link_libc and switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
});
pub fn main(init: std.process.Init.Minimal) anyerror!void {
const gpa = gpa: {
if (use_debug_allocator) break :gpa debug_allocator.allocator();
if (native_os == .wasi) break :gpa std.heap.wasm_allocator;
if (builtin.link_libc) break :gpa std.heap.c_allocator;
break :gpa std.heap.smp_allocator;
const RootAllocator = if (use_debug_allocator) std.heap.DebugAllocator(.{
.stack_trace_frames = build_options.mem_leak_frames,
.thread_safe = switch (build_options.io_mode) {
.threaded => true,
.evented => false,
},
}) else struct {
pub const init: RootAllocator = .{};
pub fn allocator(_: RootAllocator) Allocator {
if (native_os == .wasi) return std.heap.wasm_allocator;
if (builtin.link_libc) return std.heap.c_allocator;
return std.heap.smp_allocator;
}
pub fn deinit(_: RootAllocator) std.heap.Check {
return .ok;
}
};
defer if (use_debug_allocator) {
_ = debug_allocator.deinit();
pub fn main(init: std.process.Init.Minimal) anyerror!void {
var root_allocator: RootAllocator = .init;
defer _ = root_allocator.deinit();
const root_gpa = root_allocator.allocator();
var io_impl: IoImpl = undefined;
switch (build_options.io_mode) {
.threaded => io_impl = .init(root_gpa, .{
.stack_size = thread_stack_size,
.argv0 = .init(init.args),
.environ = init.environ,
}),
.evented => try io_impl.init(root_gpa, .{
.argv0 = .init(init.args),
.environ = init.environ,
.backing_allocator_needs_mutex = use_debug_allocator,
}),
}
defer io_impl.deinit();
io_impl_ptr = &io_impl;
const io = io_impl.io();
const gpa = switch (build_options.io_mode) {
.threaded => root_gpa,
.evented => io_impl.allocator(),
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
@ -193,17 +226,6 @@ pub fn main(init: std.process.Init.Minimal) anyerror!void {
var environ_map = init.environ.createMap(arena) catch |err| fatal("failed to parse environment: {t}", .{err});
Compilation.setMainThread();
var threaded: Io.Threaded = .init(gpa, .{
.argv0 = .init(init.args),
.environ = init.environ,
});
defer threaded.deinit();
threaded_impl_ptr = &threaded;
threaded.stack_size = thread_stack_size;
const io = threaded.io();
if (tracy.enable_allocation) {
var gpa_tracy = tracy.tracyAllocator(gpa);
return mainArgs(gpa_tracy.allocator(), arena, io, args, &environ_map);
@ -3400,7 +3422,7 @@ fn buildOutputType(
@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1),
std.math.maxInt(Zcu.PerThread.IdBacking),
);
setThreadLimit(thread_limit);
try setThreadLimit(arena, thread_limit);
for (create_module.c_source_files.items) |*src| {
dev.check(.c_compiler);
@ -4731,13 +4753,13 @@ pub fn translateC(
argv: []const []const u8,
environ_map: *const process.Environ.Map,
prog_node: std.Progress.Node,
thread_limit: usize,
capture: ?*[]u8,
) !void {
try jitCmd(gpa, arena, io, argv, environ_map, .{
try jitCmdInner(gpa, arena, io, argv, environ_map, prog_node, thread_limit, .{
.cmd_name = "translate-c",
.root_src_path = "translate-c/main.zig",
.depend_on_aro = true,
.progress_node = prog_node,
.capture = capture,
});
}
@ -5187,7 +5209,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, io: Io, args: []const []const u8,
@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1),
std.math.maxInt(Zcu.PerThread.IdBacking),
);
setThreadLimit(thread_limit);
try setThreadLimit(arena, thread_limit);
// Dummy http client that is not actually used when fetch_command is unsupported.
// Prevents bootstrap from depending on a bunch of unnecessary stuff.
@ -5651,7 +5673,7 @@ const JitCmdOptions = struct {
capture: ?*[]u8 = null,
/// Send error bundles via std.zig.Server over stdout
server: bool = false,
progress_node: ?std.Progress.Node = null,
color: Color = .auto,
};
fn jitCmd(
@ -5664,12 +5686,30 @@ fn jitCmd(
) !void {
dev.check(.jit_command);
const color: Color = .auto;
const root_prog_node = if (options.progress_node) |node| node else std.Progress.start(io, .{
.disable_printing = (color == .off),
const root_prog_node = std.Progress.start(io, .{
.disable_printing = (options.color == .off),
});
defer root_prog_node.end();
const thread_limit = @min(
@max(std.Thread.getCpuCount() catch 1, 1),
std.math.maxInt(Zcu.PerThread.IdBacking),
);
try setThreadLimit(arena, thread_limit);
return jitCmdInner(gpa, arena, io, args, environ_map, root_prog_node, thread_limit, options);
}
fn jitCmdInner(
gpa: Allocator,
arena: Allocator,
io: Io,
args: []const []const u8,
environ_map: *const process.Environ.Map,
root_prog_node: std.Progress.Node,
thread_limit: usize,
options: JitCmdOptions,
) !void {
const target_query: std.Target.Query = .{};
const resolved_target: Package.Module.ResolvedTarget = .{
.result = std.zig.resolveTargetQueryOrFatal(io, target_query),
@ -5702,12 +5742,6 @@ fn jitCmd(
);
defer dirs.deinit(io);
const thread_limit = @min(
@max(std.Thread.getCpuCount() catch 1, 1),
std.math.maxInt(Zcu.PerThread.IdBacking),
);
setThreadLimit(thread_limit);
var child_argv: std.ArrayList([]const u8) = .empty;
try child_argv.ensureUnusedCapacity(arena, args.len + 4);
@ -5795,7 +5829,7 @@ fn jitCmd(
process.exit(2);
}
} else {
updateModule(comp, color, root_prog_node) catch |err| switch (err) {
updateModule(comp, options.color, root_prog_node) catch |err| switch (err) {
error.CompileErrorsReported => process.exit(2),
else => |e| return e,
};
@ -7777,15 +7811,25 @@ fn addLibDirectoryWarn2(
});
}
var threaded_impl_ptr: *Io.Threaded = undefined;
fn setThreadLimit(n: usize) void {
const IoImpl = switch (build_options.io_mode) {
.threaded => Io.Threaded,
.evented => Io.Evented,
};
var io_impl_ptr: *IoImpl = undefined;
fn setThreadLimit(arena: std.mem.Allocator, n: usize) Allocator.Error!void {
switch (build_options.io_mode) {
.threaded => {
// We want a maximum of n total threads to keep the InternPool happy, but
// the main thread doesn't count towards the limits, so use n-1. Also, the
// linker can run concurrently, so we need to set both the async *and* the
// concurrency limit.
const limit: Io.Limit = .limited(n - 1);
threaded_impl_ptr.setAsyncLimit(limit);
threaded_impl_ptr.concurrent_limit = limit;
io_impl_ptr.setAsyncLimit(limit);
io_impl_ptr.concurrent_limit = limit;
},
.evented => {},
}
try Zcu.PerThread.Id.allocate(arena, @max(n, 2));
}
fn randInt(io: Io, comptime T: type) T {

View file

@ -13,4 +13,5 @@ pub const value_tracing = false;
pub const skip_non_native = false;
pub const debug_gpa = false;
pub const dev = .core;
pub const io_mode: enum { threaded, evented } = .threaded;
pub const value_interpret_mode = .direct;