Merge pull request 'std: finish moving time to Io interface' (#31086) from time into master

Reviewed-on: https://codeberg.org/ziglang/zig/pulls/31086
This commit is contained in:
Andrew Kelley 2026-02-03 20:16:18 +01:00
commit e5e4602b18
23 changed files with 272 additions and 406 deletions

View file

@ -107,7 +107,7 @@ pub const Environment = struct {
if (parsed > max_timestamp) return error.InvalidEpoch;
return .{ .provided = parsed };
} else {
const timestamp = try Io.Clock.real.now(io);
const timestamp = Io.Clock.real.now(io);
const seconds = std.math.cast(u64, timestamp.toSeconds()) orelse return error.InvalidEpoch;
return .{ .system = std.math.clamp(seconds, 0, max_timestamp) };
}

View file

@ -301,7 +301,7 @@ pub fn init(comp: *Compilation, source_epoch: SourceEpoch) Preprocessor {
/// Initialize Preprocessor with builtin macros.
pub fn initDefault(comp: *Compilation) !Preprocessor {
const source_epoch: SourceEpoch = comp.environment.sourceEpoch(comp.io) catch |er| switch (er) {
error.InvalidEpoch, error.UnsupportedClock, error.Unexpected => blk: {
error.InvalidEpoch => blk: {
const diagnostic: Diagnostic = .invalid_source_epoch;
try comp.diagnostics.add(.{ .text = diagnostic.fmt, .kind = diagnostic.kind, .opt = diagnostic.opt, .location = null });
break :blk .default;

View file

@ -548,7 +548,7 @@ pub fn main(init: process.Init.Minimal) !void {
break :w try .init(graph.cache.cwd);
};
const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err});
const now = Io.Clock.Timestamp.now(io, .awake);
run.web_server = if (webui_listen) |listen_address| ws: {
if (builtin.single_threaded) unreachable; // `fatal` above

View file

@ -266,16 +266,19 @@ pub fn init(options: StepOptions) Step {
/// here.
pub fn make(s: *Step, options: MakeOptions) error{ MakeFailed, MakeSkipped }!void {
const arena = s.owner.allocator;
const graph = s.owner.graph;
const io = graph.io;
var timer: ?std.time.Timer = t: {
if (!s.owner.graph.time_report) break :t null;
var start_ts: ?Io.Timestamp = t: {
if (!graph.time_report) break :t null;
if (s.id == .compile) break :t null;
if (s.id == .run and s.cast(Run).?.stdio == .zig_test) break :t null;
break :t std.time.Timer.start() catch @panic("--time-report not supported on this host");
break :t Io.Clock.awake.now(io);
};
const make_result = s.makeFn(s, options);
if (timer) |*t| {
options.web_server.?.updateTimeReportGeneric(s, t.read());
if (start_ts) |*ts| {
const duration = ts.untilNow(io, .awake);
options.web_server.?.updateTimeReportGeneric(s, duration);
}
make_result catch |err| switch (err) {
@ -534,7 +537,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.
const arena = b.allocator;
const io = b.graph.io;
var timer = try std.time.Timer.start();
const start_ts = Io.Clock.awake.now(io);
try sendMessage(io, zp.child.stdin.?, .update);
if (!watch) try sendMessage(io, zp.child.stdin.?, .exit);
@ -637,7 +640,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.
.compile = s.cast(Step.Compile).?,
.use_llvm = tr.flags.use_llvm,
.stats = tr.stats,
.ns_total = timer.read(),
.ns_total = @intCast(start_ts.untilNow(io, .awake).toNanoseconds()),
.llvm_pass_timings_len = tr.llvm_pass_timings_len,
.files_len = tr.files_len,
.decls_len = tr.decls_len,
@ -648,7 +651,7 @@ fn zigProcessUpdate(s: *Step, zp: *ZigProcess, watch: bool, web_server: ?*Build.
}
}
s.result_duration_ns = timer.read();
s.result_duration_ns = @intCast(start_ts.untilNow(io, .awake).toNanoseconds());
const stderr_contents = zp.multi_reader.reader(1).buffered();
if (stderr_contents.len > 0) {

View file

@ -1587,12 +1587,12 @@ fn spawnChildAndCollect(
};
if (run.stdio == .zig_test) {
const started: Io.Clock.Timestamp = try .now(io, .awake);
const started: Io.Clock.Timestamp = .now(io, .awake);
const result = evalZigTest(run, spawn_options, options, fuzz_context) catch |err| switch (err) {
error.Canceled => |e| return e,
else => |e| e,
};
run.step.result_duration_ns = @intCast((try started.untilNow(io)).raw.nanoseconds);
run.step.result_duration_ns = @intCast(started.untilNow(io).raw.nanoseconds);
try result;
return null;
} else {
@ -1607,12 +1607,12 @@ fn spawnChildAndCollect(
defer if (inherit) io.unlockStderr();
try setColorEnvironmentVariables(run, environ_map, terminal_mode);
const started: Io.Clock.Timestamp = try .now(io, .awake);
const started: Io.Clock.Timestamp = .now(io, .awake);
const result = evalGeneric(run, spawn_options) catch |err| switch (err) {
error.Canceled => |e| return e,
else => |e| e,
};
run.step.result_duration_ns = @intCast((try started.untilNow(io)).raw.nanoseconds);
run.step.result_duration_ns = @intCast(started.untilNow(io).raw.nanoseconds);
return try result;
}
}
@ -1869,7 +1869,7 @@ fn waitZigTest(
var active_test_index: ?u32 = null;
var last_update: Io.Clock.Timestamp = try .now(io, .awake);
var last_update: Io.Clock.Timestamp = .now(io, .awake);
var coverage_id: ?u64 = null;
@ -1908,11 +1908,11 @@ fn waitZigTest(
multi_reader.fill(64, timeout) catch |err| switch (err) {
error.Timeout => return .{ .timeout = .{
.active_test_index = active_test_index,
.ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds),
.ns_elapsed = @intCast(last_update.untilNow(io).raw.nanoseconds),
} },
error.EndOfStream => return .{ .no_poll = .{
.active_test_index = active_test_index,
.ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds),
.ns_elapsed = @intCast(last_update.untilNow(io).raw.nanoseconds),
} },
else => |e| return e,
};
@ -1926,11 +1926,11 @@ fn waitZigTest(
multi_reader.fill(64, timeout) catch |err| switch (err) {
error.Timeout => return .{ .timeout = .{
.active_test_index = active_test_index,
.ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds),
.ns_elapsed = @intCast(last_update.untilNow(io).raw.nanoseconds),
} },
error.EndOfStream => return .{ .no_poll = .{
.active_test_index = active_test_index,
.ns_elapsed = @intCast((try last_update.untilNow(io)).raw.nanoseconds),
.ns_elapsed = @intCast(last_update.untilNow(io).raw.nanoseconds),
} },
else => |e| return e,
};
@ -1976,13 +1976,13 @@ fn waitZigTest(
@memset(opt_metadata.*.?.ns_per_test, std.math.maxInt(u64));
active_test_index = null;
last_update = try .now(io, .awake);
last_update = .now(io, .awake);
requestNextTest(io, child.stdin.?, &opt_metadata.*.?, &sub_prog_node) catch |err| return .{ .write_failed = err };
},
.test_started => {
active_test_index = opt_metadata.*.?.next_index - 1;
last_update = try .now(io, .awake);
last_update = .now(io, .awake);
},
.test_results => {
assert(fuzz_context == null);
@ -2026,7 +2026,7 @@ fn waitZigTest(
active_test_index = null;
const now: Io.Clock.Timestamp = try .now(io, .awake);
const now: Io.Clock.Timestamp = .now(io, .awake);
md.ns_per_test[tr_hdr.index] = @intCast(last_update.durationTo(now).raw.nanoseconds);
last_update = now;
@ -2239,7 +2239,7 @@ fn evalGeneric(run: *Run, spawn_options: process.SpawnOptions) !EvalGenericResul
return error.StderrStreamTooLong;
}
} else |err| switch (err) {
error.UnsupportedClock, error.Timeout => unreachable,
error.Timeout => unreachable,
error.EndOfStream => {},
else => |e| return e,
}

View file

@ -243,7 +243,7 @@ pub fn finishBuild(ws: *WebServer, opts: struct {
pub fn now(s: *const WebServer) i64 {
const io = s.graph.io;
const ts = base_clock.now(io) catch s.base_timestamp;
const ts = base_clock.now(io);
return @intCast(s.base_timestamp.durationTo(ts).toNanoseconds());
}
@ -761,7 +761,7 @@ pub fn updateTimeReportCompile(ws: *WebServer, opts: struct {
ws.notifyUpdate();
}
pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64) void {
pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, duration: Io.Duration) void {
const gpa = ws.gpa;
const io = ws.graph.io;
@ -780,7 +780,7 @@ pub fn updateTimeReportGeneric(ws: *WebServer, step: *Build.Step, ns_total: u64)
const out: *align(1) abi.time_report.GenericResult = @ptrCast(buf);
out.* = .{
.step_idx = step_idx,
.ns_total = ns_total,
.ns_total = @intCast(duration.toNanoseconds()),
};
{
ws.time_report_mutex.lock(io) catch return;

View file

@ -231,8 +231,9 @@ pub const VTable = struct {
progressParentFile: *const fn (?*anyopaque) std.Progress.ParentFileError!File,
now: *const fn (?*anyopaque, Clock) Clock.Error!Timestamp,
sleep: *const fn (?*anyopaque, Timeout) SleepError!void,
now: *const fn (?*anyopaque, Clock) Timestamp,
clockResolution: *const fn (?*anyopaque, Clock) Clock.ResolutionError!Duration,
sleep: *const fn (?*anyopaque, Timeout) Cancelable!void,
random: *const fn (?*anyopaque, buffer: []u8) void,
randomSecure: *const fn (?*anyopaque, buffer: []u8) RandomSecureError!void,
@ -701,30 +702,53 @@ pub const Clock = enum {
/// thread.
cpu_thread,
pub const Error = error{UnsupportedClock} || UnexpectedError;
/// This function is not cancelable because first of all it does not block,
/// but more importantly, the cancelation logic itself may want to check
/// the time.
pub fn now(clock: Clock, io: Io) Error!Io.Timestamp {
/// This function is not cancelable because it does not block.
///
/// Resolution is determined by `resolution` which may be 0 if the
/// clock is unsupported.
///
/// See also:
/// * `Clock.Timestamp.now`
pub fn now(clock: Clock, io: Io) Io.Timestamp {
return io.vtable.now(io.userdata, clock);
}
pub const ResolutionError = error{
ClockUnavailable,
Unexpected,
};
/// Reveals the granularity of `clock`. May be zero, indicating
/// unsupported clock.
pub fn resolution(clock: Clock, io: Io) ResolutionError!Io.Duration {
return io.vtable.clockResolution(io.userdata, clock);
}
pub const Timestamp = struct {
raw: Io.Timestamp,
clock: Clock,
/// This function is not cancelable because first of all it does not block,
/// but more importantly, the cancelation logic itself may want to check
/// the time.
pub fn now(io: Io, clock: Clock) Error!Clock.Timestamp {
/// This function is not cancelable because it does not block.
///
/// Resolution is determined by `resolution` which may be 0 if
/// the clock is unsupported.
///
/// See also:
/// * `Clock.now`
pub fn now(io: Io, clock: Clock) Clock.Timestamp {
return .{
.raw = try io.vtable.now(io.userdata, clock),
.raw = io.vtable.now(io.userdata, clock),
.clock = clock,
};
}
pub fn wait(t: Clock.Timestamp, io: Io) SleepError!void {
/// Sleeps until the timestamp arrives.
///
/// See also:
/// * `Io.sleep`
/// * `Clock.Duration.sleep`
/// * `Timeout.sleep`
pub fn wait(t: Clock.Timestamp, io: Io) Cancelable!void {
return io.vtable.sleep(io.userdata, .{ .deadline = t });
}
@ -752,30 +776,38 @@ pub const Clock = enum {
};
}
pub fn fromNow(io: Io, duration: Clock.Duration) Error!Clock.Timestamp {
/// Resolution is determined by `resolution` which may be 0 if
/// the clock is unsupported.
pub fn fromNow(io: Io, duration: Clock.Duration) Clock.Timestamp {
return .{
.clock = duration.clock,
.raw = (try duration.clock.now(io)).addDuration(duration.raw),
.raw = duration.clock.now(io).addDuration(duration.raw),
};
}
pub fn untilNow(timestamp: Clock.Timestamp, io: Io) Error!Clock.Duration {
const now_ts = try Clock.Timestamp.now(io, timestamp.clock);
/// Resolution is determined by `resolution` which may be 0 if
/// the clock is unsupported.
pub fn untilNow(timestamp: Clock.Timestamp, io: Io) Clock.Duration {
const now_ts = Clock.Timestamp.now(io, timestamp.clock);
return timestamp.durationTo(now_ts);
}
pub fn durationFromNow(timestamp: Clock.Timestamp, io: Io) Error!Clock.Duration {
const now_ts = try timestamp.clock.now(io);
/// Resolution is determined by `resolution` which may be 0 if
/// the clock is unsupported.
pub fn durationFromNow(timestamp: Clock.Timestamp, io: Io) Clock.Duration {
const now_ts = timestamp.clock.now(io);
return .{
.clock = timestamp.clock,
.raw = now_ts.durationTo(timestamp.raw),
};
}
pub fn toClock(t: Clock.Timestamp, io: Io, clock: Clock) Error!Clock.Timestamp {
/// Resolution is determined by `resolution` which may be 0 if
/// the clock is unsupported.
pub fn toClock(t: Clock.Timestamp, io: Io, clock: Clock) Clock.Timestamp {
if (t.clock == clock) return t;
const now_old = try t.clock.now(io);
const now_new = try clock.now(io);
const now_old = t.clock.now(io);
const now_new = clock.now(io);
const duration = now_old.durationTo(t);
return .{
.clock = clock,
@ -793,7 +825,13 @@ pub const Clock = enum {
raw: Io.Duration,
clock: Clock,
pub fn sleep(duration: Clock.Duration, io: Io) SleepError!void {
/// Waits until a specified amount of time has passed on `clock`.
///
/// See also:
/// * `Io.sleep`
/// * `Clock.Timestamp.wait`
/// * `Timeout.sleep`
pub fn sleep(duration: Clock.Duration, io: Io) Cancelable!void {
return io.vtable.sleep(io.userdata, .{ .duration = duration });
}
};
@ -802,6 +840,10 @@ pub const Clock = enum {
pub const Timestamp = struct {
nanoseconds: i96,
pub fn now(io: Io, clock: Clock) Io.Timestamp {
return io.vtable.now(io.userdata, clock);
}
pub const zero: Timestamp = .{ .nanoseconds = 0 };
pub fn durationTo(from: Timestamp, to: Timestamp) Duration {
@ -844,6 +886,13 @@ pub const Timestamp = struct {
.fill = n.fill,
});
}
/// Resolution is determined by `Clock.resolution` which may be 0 if
/// the clock is unsupported.
pub fn untilNow(t: Timestamp, io: Io, clock: Clock) Duration {
const now_ts = clock.now(io);
return t.durationTo(now_ts);
}
};
pub const Duration = struct {
@ -883,12 +932,12 @@ pub const Timeout = union(enum) {
duration: Clock.Duration,
deadline: Clock.Timestamp,
pub const Error = error{ Timeout, UnsupportedClock };
pub const Error = error{Timeout};
pub fn toTimestamp(t: Timeout, io: Io) Clock.Error!?Clock.Timestamp {
pub fn toTimestamp(t: Timeout, io: Io) ?Clock.Timestamp {
return switch (t) {
.none => null,
.duration => |d| try .fromNow(io, d),
.duration => |d| .fromNow(io, d),
.deadline => |d| d,
};
}
@ -896,20 +945,26 @@ pub const Timeout = union(enum) {
pub fn toDeadline(t: Timeout, io: Io) Timeout {
return switch (t) {
.none => .none,
.duration => |d| .{ .deadline = Clock.Timestamp.fromNow(io, d) catch @panic("TODO") },
.duration => |d| .{ .deadline = .fromNow(io, d) },
.deadline => |d| .{ .deadline = d },
};
}
pub fn toDurationFromNow(t: Timeout, io: Io) Clock.Error!?Clock.Duration {
pub fn toDurationFromNow(t: Timeout, io: Io) ?Clock.Duration {
return switch (t) {
.none => null,
.duration => |d| d,
.deadline => |d| try d.durationFromNow(io),
.deadline => |d| d.durationFromNow(io),
};
}
pub fn sleep(timeout: Timeout, io: Io) SleepError!void {
/// Waits until the timeout has passed.
///
/// See also:
/// * `Io.sleep`
/// * `Clock.Duration.sleep`
/// * `Clock.Timestamp.wait`
pub fn sleep(timeout: Timeout, io: Io) Cancelable!void {
return io.vtable.sleep(io.userdata, timeout);
}
};
@ -2027,9 +2082,13 @@ pub fn concurrent(
return future;
}
pub const SleepError = error{UnsupportedClock} || UnexpectedError || Cancelable;
pub fn sleep(io: Io, duration: Duration, clock: Clock) SleepError!void {
/// Waits until a specified amount of time has passed on `clock`.
///
/// See also:
/// * `Clock.Duration.sleep`
/// * `Clock.Timestamp.wait`
/// * `Timeout.sleep`
pub fn sleep(io: Io, duration: Duration, clock: Clock) Cancelable!void {
return io.vtable.sleep(io.userdata, .{ .duration = .{
.raw = duration,
.clock = clock,

View file

@ -179,7 +179,7 @@ fn rebase(r: *Io.Reader, capacity: usize) Io.Reader.RebaseError!void {
fn fillUntimed(context: *Context, capacity: usize) Io.Reader.Error!void {
fill(context.mr, capacity, .none) catch |err| switch (err) {
error.Timeout, error.UnsupportedClock => unreachable,
error.Timeout => unreachable,
error.Canceled, error.ConcurrencyUnavailable => |e| {
context.err = e;
return error.ReadFailed;

View file

@ -1712,6 +1712,7 @@ pub fn io(t: *Threaded) Io {
.progressParentFile = progressParentFile,
.now = now,
.clockResolution = clockResolution,
.sleep = sleep,
.random = random,
@ -1875,6 +1876,7 @@ pub fn ioBasic(t: *Threaded) Io {
.progressParentFile = progressParentFile,
.now = now,
.clockResolution = clockResolution,
.sleep = sleep,
.random = random,
@ -2487,7 +2489,7 @@ fn futexWait(userdata: ?*anyopaque, ptr: *const u32, expected: u32, timeout: Io.
const t: *Threaded = @ptrCast(@alignCast(userdata));
const t_io = ioBasic(t);
const timeout_ns: ?u64 = ns: {
const d = (timeout.toDurationFromNow(t_io) catch break :ns 10) orelse break :ns null;
const d = timeout.toDurationFromNow(t_io) orelse break :ns null;
break :ns std.math.lossyCast(u64, d.raw.toNanoseconds());
};
return Thread.futexWait(ptr, expected, timeout_ns);
@ -2655,24 +2657,12 @@ fn batchAwaitAsync(userdata: ?*anyopaque, b: *Io.Batch) Io.Cancelable!void {
fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout) Io.Batch.AwaitConcurrentError!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (is_windows) {
const deadline: ?Io.Clock.Timestamp = timeout.toTimestamp(ioBasic(t)) catch |err| switch (err) {
error.Unexpected => deadline: {
recoverableOsBugDetected();
break :deadline .{ .raw = .{ .nanoseconds = 0 }, .clock = .awake };
},
error.UnsupportedClock => |e| return e,
};
const deadline: ?Io.Clock.Timestamp = timeout.toTimestamp(ioBasic(t));
try batchAwaitWindows(b, true);
while (b.pending.head != .none and b.completions.head == .none) {
var delay_interval: windows.LARGE_INTEGER = interval: {
const d = deadline orelse break :interval std.math.minInt(windows.LARGE_INTEGER);
break :interval t.deadlineToWindowsInterval(d) catch |err| switch (err) {
error.UnsupportedClock => |e| return e,
error.Unexpected => {
recoverableOsBugDetected();
break :interval -1;
},
};
break :interval t.deadlineToWindowsInterval(d);
};
const alertable_syscall = try AlertableSyscall.start();
const delay_rc = windows.ntdll.NtDelayExecution(windows.TRUE, &delay_interval);
@ -2754,7 +2744,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
else => {},
}
const t_io = ioBasic(t);
const deadline = timeout.toTimestamp(t_io) catch return error.UnsupportedClock;
const deadline = timeout.toTimestamp(t_io);
while (true) {
const timeout_ms: i32 = t: {
if (b.completions.head != .none) {
@ -2765,7 +2755,7 @@ fn batchAwaitConcurrent(userdata: ?*anyopaque, b: *Io.Batch, timeout: Io.Timeout
break :t 0;
}
const d = deadline orelse break :t -1;
const duration = d.durationFromNow(t_io) catch return error.UnsupportedClock;
const duration = d.durationFromNow(t_io);
if (duration.raw.nanoseconds <= 0) return error.Timeout;
const max_poll_ms = std.math.maxInt(i32);
break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds()));
@ -10821,22 +10811,21 @@ fn fileWriteFilePositional(
return error.Unimplemented;
}
fn nowPosix(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
fn nowPosix(clock: Io.Clock) Io.Timestamp {
const clock_id: posix.clockid_t = clockToPosix(clock);
var tp: posix.timespec = undefined;
switch (posix.errno(posix.system.clock_gettime(clock_id, &tp))) {
.SUCCESS => return timestampFromPosix(&tp),
.INVAL => return error.UnsupportedClock,
else => |err| return posix.unexpectedErrno(err),
var timespec: posix.timespec = undefined;
switch (posix.errno(posix.system.clock_gettime(clock_id, &timespec))) {
.SUCCESS => return timestampFromPosix(&timespec),
else => return .zero,
}
}
fn now(userdata: ?*anyopaque, clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
fn now(userdata: ?*anyopaque, clock: Io.Clock) Io.Timestamp {
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
return nowInner(clock);
}
fn nowInner(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
fn nowInner(clock: Io.Clock) Io.Timestamp {
return switch (native_os) {
.windows => nowWindows(clock),
.wasi => nowWasi(clock),
@ -10844,7 +10833,55 @@ fn nowInner(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
};
}
fn nowWindows(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
fn clockResolution(userdata: ?*anyopaque, clock: Io.Clock) Io.Clock.ResolutionError!Io.Duration {
const t: *Threaded = @ptrCast(@alignCast(userdata));
_ = t;
return switch (native_os) {
.windows => switch (clock) {
.awake, .boot, .real => {
// We don't need to cache QPF as it's internally just a memory read to KUSER_SHARED_DATA
// (a read-only page of info updated and mapped by the kernel to all processes):
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-kuser_shared_data
// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
var qpf: windows.LARGE_INTEGER = undefined;
if (windows.ntdll.RtlQueryPerformanceFrequency(&qpf) != 0) {
recoverableOsBugDetected();
return .zero;
}
// 10Mhz (1 qpc tick every 100ns) is a common enough QPF value that we can optimize on it.
// https://github.com/microsoft/STL/blob/785143a0c73f030238ef618890fd4d6ae2b3a3a0/stl/inc/chrono#L694-L701
const common_qpf = 10_000_000;
if (qpf == common_qpf) return .fromNanoseconds(std.time.ns_per_s / common_qpf);
// Convert to ns using fixed point.
const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf));
const result = scale >> 32;
return .fromNanoseconds(result);
},
.cpu_process, .cpu_thread => return .zero,
},
.wasi => {
if (builtin.link_libc) return clockResolutionPosix(clock);
var ns: std.os.wasi.timestamp_t = undefined;
return switch (std.os.wasi.clock_res_get(clockToWasi(clock), &ns)) {
.SUCCESS => .fromNanoseconds(ns),
else => .zero,
};
},
else => return clockResolutionPosix(clock),
};
}
fn clockResolutionPosix(clock: Io.Clock) Io.Clock.ResolutionError!Io.Duration {
const clock_id: posix.clockid_t = clockToPosix(clock);
var timespec: posix.timespec = undefined;
return switch (posix.errno(posix.system.clock_getres(clock_id, &timespec))) {
.SUCCESS => .fromNanoseconds(nanosecondsFromPosix(&timespec)),
else => .zero,
};
}
fn nowWindows(clock: Io.Clock) Io.Timestamp {
switch (clock) {
.real => {
// RtlGetSystemTimePrecise() has a granularity of 100 nanoseconds
@ -10882,8 +10919,7 @@ fn nowWindows(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
&times,
@sizeOf(windows.KERNEL_USER_TIMES),
null,
) != .SUCCESS)
return error.Unexpected;
) != .SUCCESS) return .zero;
const sum = @as(i96, times.UserTime) + @as(i96, times.KernelTime);
return .{ .nanoseconds = sum * 100 };
@ -10899,8 +10935,7 @@ fn nowWindows(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
&times,
@sizeOf(windows.KERNEL_USER_TIMES),
null,
) != .SUCCESS)
return error.Unexpected;
) != .SUCCESS) return .zero;
const sum = @as(i96, times.UserTime) + @as(i96, times.KernelTime);
return .{ .nanoseconds = sum * 100 };
@ -10908,23 +10943,23 @@ fn nowWindows(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
}
}
fn nowWasi(clock: Io.Clock) Io.Clock.Error!Io.Timestamp {
fn nowWasi(clock: Io.Clock) Io.Timestamp {
var ns: std.os.wasi.timestamp_t = undefined;
const err = std.os.wasi.clock_time_get(clockToWasi(clock), 1, &ns);
if (err != .SUCCESS) return error.Unexpected;
if (err != .SUCCESS) return .zero;
return .fromNanoseconds(ns);
}
fn sleep(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void {
fn sleep(userdata: ?*anyopaque, timeout: Io.Timeout) Io.Cancelable!void {
const t: *Threaded = @ptrCast(@alignCast(userdata));
if (timeout == .none) return;
if (use_parking_sleep) return parking_sleep.sleep(try timeout.toTimestamp(ioBasic(t)));
if (use_parking_sleep) return parking_sleep.sleep(timeout.toTimestamp(ioBasic(t)));
if (native_os == .wasi) return sleepWasi(t, timeout);
if (@TypeOf(posix.system.clock_nanosleep) != void) return sleepPosix(timeout);
return sleepNanosleep(t, timeout);
}
fn sleepPosix(timeout: Io.Timeout) Io.SleepError!void {
fn sleepPosix(timeout: Io.Timeout) Io.Cancelable!void {
const clock_id: posix.clockid_t = clockToPosix(switch (timeout) {
.none => .awake,
.duration => |d| d.clock,
@ -10944,25 +10979,27 @@ fn sleepPosix(timeout: Io.Timeout) Io.SleepError!void {
} }, &timespec, &timespec);
// POSIX-standard libc clock_nanosleep() returns *positive* errno values directly
switch (if (builtin.link_libc) @as(posix.E, @enumFromInt(rc)) else posix.errno(rc)) {
.SUCCESS => {
syscall.finish();
return;
},
.INTR => {
try syscall.checkCancel();
continue;
},
.INVAL => return syscall.fail(error.UnsupportedClock),
else => |err| return syscall.unexpectedErrno(err),
// Handles SUCCESS as well as clock not available and unexpected
// errors. The user had a chance to check clock resolution before
// getting here, which would have reported 0, making this a legal
// amount of time to sleep.
else => {
syscall.finish();
return;
},
}
}
}
fn sleepWasi(t: *Threaded, timeout: Io.Timeout) Io.SleepError!void {
fn sleepWasi(t: *Threaded, timeout: Io.Timeout) Io.Cancelable!void {
const t_io = ioBasic(t);
const w = std.os.wasi;
const clock: w.subscription_clock_t = if (try timeout.toDurationFromNow(t_io)) |d| .{
const clock: w.subscription_clock_t = if (timeout.toDurationFromNow(t_io)) |d| .{
.id = clockToWasi(d.clock),
.timeout = std.math.lossyCast(u64, d.raw.nanoseconds),
.precision = 0,
@ -10987,13 +11024,13 @@ fn sleepWasi(t: *Threaded, timeout: Io.Timeout) Io.SleepError!void {
syscall.finish();
}
fn sleepNanosleep(t: *Threaded, timeout: Io.Timeout) Io.SleepError!void {
fn sleepNanosleep(t: *Threaded, timeout: Io.Timeout) Io.Cancelable!void {
const t_io = ioBasic(t);
const sec_type = @typeInfo(posix.timespec).@"struct".fields[0].type;
const nsec_type = @typeInfo(posix.timespec).@"struct".fields[1].type;
var timespec: posix.timespec = t: {
const d = (try timeout.toDurationFromNow(t_io)) orelse break :t .{
const d = timeout.toDurationFromNow(t_io) orelse break :t .{
.sec = std.math.maxInt(sec_type),
.nsec = std.math.maxInt(nsec_type),
};
@ -12630,7 +12667,7 @@ fn netReceivePosix(
var message_i: usize = 0;
var data_i: usize = 0;
const deadline = timeout.toTimestamp(t_io) catch |err| return .{ err, message_i };
const deadline = timeout.toTimestamp(t_io);
recv: while (true) {
if (message_buffer.len - message_i == 0) return .{ null, message_i };
@ -12678,7 +12715,7 @@ fn netReceivePosix(
const max_poll_ms = std.math.maxInt(u31);
const timeout_ms: u31 = if (deadline) |d| t: {
const duration = d.durationFromNow(t_io) catch |err| return .{ err, message_i };
const duration = d.durationFromNow(t_io);
if (duration.raw.nanoseconds <= 0) return .{ error.Timeout, message_i };
break :t @intCast(@min(max_poll_ms, duration.raw.toMilliseconds()));
} else max_poll_ms;
@ -13875,7 +13912,11 @@ fn statFromWasi(st: *const std.os.wasi.filestat_t) File.Stat {
}
fn timestampFromPosix(timespec: *const posix.timespec) Io.Timestamp {
return .{ .nanoseconds = @intCast(@as(i128, timespec.sec) * std.time.ns_per_s + timespec.nsec) };
return .{ .nanoseconds = nanosecondsFromPosix(timespec) };
}
fn nanosecondsFromPosix(timespec: *const posix.timespec) i96 {
return @intCast(@as(i128, timespec.sec) * std.time.ns_per_s + timespec.nsec);
}
fn timestampToPosix(nanoseconds: i96) posix.timespec {
@ -14013,13 +14054,13 @@ fn lookupDns(
// boot clock is chosen because time the computer is suspended should count
// against time spent waiting for external messages to arrive.
const clock: Io.Clock = .boot;
var now_ts = try clock.now(t_io);
var now_ts = clock.now(t_io);
const final_ts = now_ts.addDuration(.fromSeconds(rc.timeout_seconds));
const attempt_duration: Io.Duration = .{
.nanoseconds = (std.time.ns_per_s / rc.attempts) * @as(i96, rc.timeout_seconds),
};
send: while (now_ts.nanoseconds < final_ts.nanoseconds) : (now_ts = try clock.now(t_io)) {
send: while (now_ts.nanoseconds < final_ts.nanoseconds) : (now_ts = clock.now(t_io)) {
const max_messages = queries_buffer.len * HostName.ResolvConf.max_nameservers;
{
var message_buffer: [max_messages]Io.net.OutgoingMessage = undefined;
@ -17021,7 +17062,7 @@ const parking_futex = struct {
const deadline: ?Io.Clock.Timestamp = switch (timeout) {
.none => null,
.duration => |d| .{
.raw = (nowInner(d.clock) catch unreachable).addDuration(d.raw),
.raw = nowInner(d.clock).addDuration(d.raw),
.clock = d.clock,
},
.deadline => |d| d,
@ -17143,7 +17184,7 @@ const parking_sleep = struct {
comptime {
assert(use_parking_sleep);
}
fn sleep(deadline: ?Io.Clock.Timestamp) Io.SleepError!void {
fn sleep(deadline: ?Io.Clock.Timestamp) Io.Cancelable!void {
const opt_thread = Thread.current;
cancelable: {
const thread = opt_thread orelse break :cancelable;
@ -17216,12 +17257,9 @@ const parking_sleep = struct {
}
/// Sleep for approximately `ms` awake milliseconds in an attempt to work around Windows kernel bugs.
fn windowsRetrySleep(ms: u32) (Io.Cancelable || Io.UnexpectedError)!void {
const now_timestamp = nowWindows(.awake) catch unreachable; // '.awake' is supported on Windows
const now_timestamp = nowWindows(.awake); // '.awake' is supported on Windows
const deadline = now_timestamp.addDuration(.fromMilliseconds(ms));
parking_sleep.sleep(.{ .raw = deadline, .clock = .awake }) catch |err| switch (err) {
error.UnsupportedClock => unreachable,
else => |e| return e,
};
try parking_sleep.sleep(.{ .raw = deadline, .clock = .awake });
}
};
@ -17234,7 +17272,7 @@ fn park(opt_deadline: ?Io.Clock.Timestamp, addr_hint: ?*const anyopaque) error{T
.windows => {
var timeout_buf: windows.LARGE_INTEGER = undefined;
const raw_timeout: ?*windows.LARGE_INTEGER = if (opt_deadline) |deadline| timeout: {
const now_timestamp = nowWindows(deadline.clock) catch unreachable;
const now_timestamp = nowWindows(deadline.clock);
const nanoseconds = now_timestamp.durationTo(deadline.raw).nanoseconds;
timeout_buf = @intCast(@divTrunc(-nanoseconds, 100));
break :timeout &timeout_buf;
@ -17284,17 +17322,17 @@ fn park(opt_deadline: ?Io.Clock.Timestamp, addr_hint: ?*const anyopaque) error{T
}
}
fn deadlineToWindowsInterval(t: *Io.Threaded, deadline: Io.Clock.Timestamp) Io.Clock.Error!windows.LARGE_INTEGER {
fn deadlineToWindowsInterval(t: *Io.Threaded, deadline: Io.Clock.Timestamp) windows.LARGE_INTEGER {
// ntdll only supports two combinations:
// * real-time (`.real`) sleeps with absolute deadlines
// * monotonic (`.awake`/`.boot`) sleeps with relative durations
switch (deadline.clock) {
.cpu_process, .cpu_thread => unreachable, // cannot sleep for CPU time
.cpu_process, .cpu_thread => return 0,
.real => {
return @intCast(@max(@divTrunc(deadline.raw.nanoseconds, 100), 0));
},
.awake, .boot => {
const duration = try deadline.durationFromNow(ioBasic(t));
const duration = deadline.durationFromNow(ioBasic(t));
return @intCast(@min(@divTrunc(-duration.raw.nanoseconds, 100), -1));
},
}

View file

@ -1137,7 +1137,7 @@ pub const Socket = struct {
const maybe_err, const count = io.vtable.netReceive(io.userdata, s.handle, (&message)[0..1], buffer, .{}, .none);
if (maybe_err) |err| switch (err) {
// No timeout is passed to `netReceieve`, so it must not return timeout related errors.
error.Timeout, error.UnsupportedClock => unreachable,
error.Timeout => unreachable,
else => |e| return e,
};
assert(1 == count);

View file

@ -145,7 +145,7 @@ pub const LookupError = error{
NoAddressReturned,
/// Failed to open or read "/etc/hosts" or "/etc/resolv.conf".
DetectingNetworkConfigurationFailed,
} || Io.Clock.Error || IpAddress.BindError || Io.Cancelable;
} || IpAddress.BindError || Io.Cancelable;
pub const LookupResult = union(enum) {
address: IpAddress,

View file

@ -216,14 +216,12 @@ test "Group.cancel" {
defer result.* = 1;
io.sleep(.fromSeconds(100_000), .awake) catch |err| switch (err) {
error.Canceled => |e| return e,
else => {},
};
}
fn sleepRecancel(io: Io, result: *usize) void {
io.sleep(.fromSeconds(100_000), .awake) catch |err| switch (err) {
error.Canceled => io.recancel(),
else => {},
};
result.* = 1;
}
@ -523,8 +521,6 @@ test "cancel sleep" {
fn blockUntilCanceled(io: Io) void {
while (true) io.sleep(.fromSeconds(100_000), .awake) catch |err| switch (err) {
error.Canceled => return,
error.UnsupportedClock => @panic("unsupported clock"),
error.Unexpected => @panic("unexpected"),
};
}
};
@ -552,8 +548,6 @@ test "tasks spawned in group after Group.cancel are canceled" {
fn blockUntilCanceled(io: Io) Io.Cancelable!void {
while (true) io.sleep(.fromSeconds(100_000), .awake) catch |err| switch (err) {
error.Canceled => |e| return e,
error.UnsupportedClock => @panic("unsupported clock"),
error.Unexpected => @panic("unexpected"),
};
}
};

View file

@ -212,7 +212,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, io: Io, now: Io.Timestamp, i
}
}
pub const AddCertsFromFilePathError = Io.File.OpenError || AddCertsFromFileError || Io.Clock.Error;
pub const AddCertsFromFilePathError = Io.File.OpenError || AddCertsFromFileError;
pub fn addCertsFromFilePathAbsolute(
cb: *Bundle,
@ -338,7 +338,7 @@ test "scan for OS-provided certificates" {
var bundle: Bundle = .{};
defer bundle.deinit(gpa);
const now = try Io.Clock.real.now(io);
const now = Io.Clock.real.now(io);
try bundle.rescan(gpa, io, now);
}

View file

@ -1700,7 +1700,7 @@ pub fn request(
defer client.ca_bundle_mutex.unlock(io);
if (client.now == null) {
const now = try Io.Clock.real.now(io);
const now = Io.Clock.real.now(io);
client.now = now;
client.ca_bundle.rescan(client.allocator, io, now) catch
return error.CertificateBundleLoadFailure;

View file

@ -1914,21 +1914,21 @@ fn init_vdso_clock_gettime(clk: clockid_t, ts: *timespec) callconv(.c) usize {
@atomicStore(?VdsoClockGettime, &vdso_clock_gettime, ptr, .monotonic);
// Call into the VDSO if available
if (ptr) |f| return f(clk, ts);
return @as(usize, @bitCast(-@as(isize, @intFromEnum(E.NOSYS))));
return @bitCast(-@as(isize, @intFromEnum(E.NOSYS)));
}
pub fn clock_getres(clk_id: i32, tp: *timespec) usize {
pub fn clock_getres(clk_id: clockid_t, tp: *timespec) usize {
return syscall2(
if (@hasField(SYS, "clock_getres") and native_arch != .hexagon) .clock_getres else .clock_getres_time64,
@as(usize, @bitCast(@as(isize, clk_id))),
@as(usize, @intFromEnum(clk_id)),
@intFromPtr(tp),
);
}
pub fn clock_settime(clk_id: i32, tp: *const timespec) usize {
pub fn clock_settime(clk_id: clockid_t, tp: *const timespec) usize {
return syscall2(
if (@hasField(SYS, "clock_settime") and native_arch != .hexagon) .clock_settime else .clock_settime64,
@as(usize, @bitCast(@as(isize, clk_id))),
@as(usize, @intFromEnum(clk_id)),
@intFromPtr(tp),
);
}

View file

@ -620,12 +620,12 @@ test "timeout (after a relative time)" {
const margin = 5;
const ts: linux.kernel_timespec = .{ .sec = 0, .nsec = ms * 1000000 };
const started = try std.Io.Clock.awake.now(io);
const started = std.Io.Clock.awake.now(io);
const sqe = try ring.timeout(0x55555555, &ts, 0, 0);
try testing.expectEqual(linux.IORING_OP.TIMEOUT, sqe.opcode);
try testing.expectEqual(@as(u32, 1), try ring.submit());
const cqe = try ring.copy_cqe();
const stopped = try std.Io.Clock.awake.now(io);
const stopped = std.Io.Clock.awake.now(io);
try testing.expectEqual(linux.io_uring_cqe{
.user_data = 0x55555555,

View file

@ -864,58 +864,6 @@ pub fn dl_iterate_phdr(
}
}
pub const ClockGetTimeError = error{UnsupportedClock} || UnexpectedError;
pub fn clock_gettime(clock_id: clockid_t) ClockGetTimeError!timespec {
var tp: timespec = undefined;
if (native_os == .windows) {
@compileError("Windows does not support POSIX; use Windows-specific API or cross-platform std.time API");
} else if (native_os == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
switch (system.clock_time_get(clock_id, 1, &ts)) {
.SUCCESS => {
tp = .{
.sec = @intCast(ts / std.time.ns_per_s),
.nsec = @intCast(ts % std.time.ns_per_s),
};
},
.INVAL => return error.UnsupportedClock,
else => |err| return unexpectedErrno(err),
}
return tp;
}
switch (errno(system.clock_gettime(clock_id, &tp))) {
.SUCCESS => return tp,
.FAULT => unreachable,
.INVAL => return error.UnsupportedClock,
else => |err| return unexpectedErrno(err),
}
}
pub fn clock_getres(clock_id: clockid_t, res: *timespec) ClockGetTimeError!void {
if (native_os == .wasi and !builtin.link_libc) {
var ts: timestamp_t = undefined;
switch (system.clock_res_get(@bitCast(clock_id), &ts)) {
.SUCCESS => res.* = .{
.sec = @intCast(ts / std.time.ns_per_s),
.nsec = @intCast(ts % std.time.ns_per_s),
},
.INVAL => return error.UnsupportedClock,
else => |err| return unexpectedErrno(err),
}
return;
}
switch (errno(system.clock_getres(clock_id, res))) {
.SUCCESS => return,
.FAULT => unreachable,
.INVAL => return error.UnsupportedClock,
else => |err| return unexpectedErrno(err),
}
}
pub const SchedGetAffinityError = error{PermissionDenied} || UnexpectedError;
pub fn sched_getaffinity(pid: pid_t) SchedGetAffinityError!cpu_set_t {

View file

@ -1,11 +1,3 @@
const std = @import("std.zig");
const builtin = @import("builtin");
const assert = std.debug.assert;
const testing = std.testing;
const math = std.math;
const windows = std.os.windows;
const posix = std.posix;
pub const epoch = @import("time/epoch.zig");
// Divisions of a nanosecond.
@ -38,180 +30,6 @@ pub const s_per_hour = s_per_min * 60;
pub const s_per_day = s_per_hour * 24;
pub const s_per_week = s_per_day * 7;
/// An Instant represents a timestamp with respect to the currently
/// executing program that ticks during suspend and can be used to
/// record elapsed time unlike `nanoTimestamp`.
///
/// It tries to sample the system's fastest and most precise timer available.
/// It also tries to be monotonic, but this is not a guarantee due to OS/hardware bugs.
/// If you need monotonic readings for elapsed time, consider `Timer` instead.
pub const Instant = struct {
timestamp: if (is_posix) posix.timespec else u64,
// true if we should use clock_gettime()
const is_posix = switch (builtin.os.tag) {
.windows, .uefi, .wasi => false,
else => true,
};
/// Queries the system for the current moment of time as an Instant.
/// This is not guaranteed to be monotonic or steadily increasing, but for
/// most implementations it is.
/// Returns `error.Unsupported` when a suitable clock is not detected.
pub fn now() error{Unsupported}!Instant {
const clock_id = switch (builtin.os.tag) {
.windows => {
// QPC on windows doesn't fail on >= XP/2000 and includes time suspended.
return .{ .timestamp = windows.QueryPerformanceCounter() };
},
.wasi => {
var ns: std.os.wasi.timestamp_t = undefined;
const rc = std.os.wasi.clock_time_get(.MONOTONIC, 1, &ns);
if (rc != .SUCCESS) return error.Unsupported;
return .{ .timestamp = ns };
},
.uefi => {
const value, _ = std.os.uefi.system_table.runtime_services.getTime() catch return error.Unsupported;
return .{ .timestamp = value.toEpoch() };
},
// On darwin, use UPTIME_RAW instead of MONOTONIC as it ticks while
// suspended.
.driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => posix.CLOCK.UPTIME_RAW,
// On freebsd derivatives, use MONOTONIC_FAST as currently there's
// no precision tradeoff.
.freebsd, .dragonfly => posix.CLOCK.MONOTONIC_FAST,
// On linux, use BOOTTIME instead of MONOTONIC as it ticks while
// suspended.
.linux => posix.CLOCK.BOOTTIME,
// On other posix systems, MONOTONIC is generally the fastest and
// ticks while suspended.
else => posix.CLOCK.MONOTONIC,
};
const ts = posix.clock_gettime(clock_id) catch return error.Unsupported;
return .{ .timestamp = ts };
}
/// Quickly compares two instances between each other.
pub fn order(self: Instant, other: Instant) std.math.Order {
// windows and wasi timestamps are in u64 which is easily comparible
if (!is_posix) {
return std.math.order(self.timestamp, other.timestamp);
}
var ord = std.math.order(self.timestamp.sec, other.timestamp.sec);
if (ord == .eq) {
ord = std.math.order(self.timestamp.nsec, other.timestamp.nsec);
}
return ord;
}
/// Returns elapsed time in nanoseconds since the `earlier` Instant.
/// This assumes that the `earlier` Instant represents a moment in time before or equal to `self`.
/// This also assumes that the time that has passed between both Instants fits inside a u64 (~585 yrs).
pub fn since(self: Instant, earlier: Instant) u64 {
switch (builtin.os.tag) {
.windows => {
// We don't need to cache QPF as it's internally just a memory read to KUSER_SHARED_DATA
// (a read-only page of info updated and mapped by the kernel to all processes):
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-kuser_shared_data
// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/inc/api/ntexapi_x/kuser_shared_data/index.htm
const qpc = self.timestamp - earlier.timestamp;
const qpf = windows.QueryPerformanceFrequency();
// 10Mhz (1 qpc tick every 100ns) is a common enough QPF value that we can optimize on it.
// https://github.com/microsoft/STL/blob/785143a0c73f030238ef618890fd4d6ae2b3a3a0/stl/inc/chrono#L694-L701
const common_qpf = 10_000_000;
if (qpf == common_qpf) {
return qpc * (ns_per_s / common_qpf);
}
// Convert to ns using fixed point.
const scale = @as(u64, std.time.ns_per_s << 32) / @as(u32, @intCast(qpf));
const result = (@as(u96, qpc) * scale) >> 32;
return @as(u64, @truncate(result));
},
.uefi, .wasi => {
// UEFI and WASI timestamps are directly in nanoseconds
return self.timestamp - earlier.timestamp;
},
else => {
// Convert timespec diff to ns
const seconds = @as(u64, @intCast(self.timestamp.sec - earlier.timestamp.sec));
const elapsed = (seconds * ns_per_s) + @as(u32, @intCast(self.timestamp.nsec));
return elapsed - @as(u32, @intCast(earlier.timestamp.nsec));
},
}
}
};
/// A monotonic, high performance timer.
///
/// Timer.start() is used to initialize the timer
/// and gives the caller an opportunity to check for the existence of a supported clock.
/// Once a supported clock is discovered,
/// it is assumed that it will be available for the duration of the Timer's use.
///
/// Monotonicity is ensured by saturating on the most previous sample.
/// This means that while timings reported are monotonic,
/// they're not guaranteed to tick at a steady rate as this is up to the underlying system.
pub const Timer = struct {
started: Instant,
previous: Instant,
pub const Error = error{TimerUnsupported};
/// Initialize the timer by querying for a supported clock.
/// Returns `error.TimerUnsupported` when such a clock is unavailable.
/// This should only fail in hostile environments such as linux seccomp misuse.
pub fn start() Error!Timer {
const current = Instant.now() catch return error.TimerUnsupported;
return Timer{ .started = current, .previous = current };
}
/// Reads the timer value since start or the last reset in nanoseconds.
pub fn read(self: *Timer) u64 {
const current = self.sample();
return current.since(self.started);
}
/// Resets the timer value to 0/now.
pub fn reset(self: *Timer) void {
const current = self.sample();
self.started = current;
}
/// Returns the current value of the timer in nanoseconds, then resets it.
pub fn lap(self: *Timer) u64 {
const current = self.sample();
defer self.started = current;
return current.since(self.started);
}
/// Returns an Instant sampled at the callsite that is
/// guaranteed to be monotonic with respect to the timer's starting point.
fn sample(self: *Timer) Instant {
const current = Instant.now() catch unreachable;
if (current.order(self.previous) == .gt) {
self.previous = current;
}
return self.previous;
}
};
test Timer {
const io = std.testing.io;
var timer = try Timer.start();
try std.Io.Clock.Duration.sleep(.{ .clock = .awake, .raw = .fromMilliseconds(10) }, io);
const time_0 = timer.read();
try testing.expect(time_0 > 0);
const time_1 = timer.lap();
try testing.expect(time_1 >= time_0);
}
test {
_ = epoch;
}

View file

@ -331,48 +331,42 @@ const QueuedJobs = struct {
pub const Timer = union(enum) {
unused,
active: struct {
start: std.time.Instant,
start: Io.Timestamp,
saved_ns: u64,
},
paused: u64,
stopped,
pub fn pause(t: *Timer) void {
pub fn pause(t: *Timer, io: Io) void {
switch (t.*) {
.unused => return,
.active => |a| {
const current = std.time.Instant.now() catch unreachable;
const new_ns = switch (current.order(a.start)) {
.lt, .eq => 0,
.gt => current.since(a.start),
};
const current: Io.Timestamp = .now(io, .awake);
const new_ns: u64 = @intCast(current.nanoseconds -| a.start.nanoseconds);
t.* = .{ .paused = a.saved_ns + new_ns };
},
.paused => unreachable,
.stopped => unreachable,
}
}
pub fn @"resume"(t: *Timer) void {
pub fn @"resume"(t: *Timer, io: Io) void {
switch (t.*) {
.unused => return,
.active => unreachable,
.paused => |saved_ns| t.* = .{ .active = .{
.start = std.time.Instant.now() catch unreachable,
.start = .now(io, .awake),
.saved_ns = saved_ns,
} },
.stopped => unreachable,
}
}
pub fn finish(t: *Timer) ?u64 {
pub fn finish(t: *Timer, io: Io) ?u64 {
defer t.* = .stopped;
switch (t.*) {
.unused => return null,
.active => |a| {
const current = std.time.Instant.now() catch unreachable;
const new_ns = switch (current.order(a.start)) {
.lt, .eq => 0,
.gt => current.since(a.start),
};
const current: Io.Timestamp = .now(io, .awake);
const new_ns: u64 = @intCast(current.nanoseconds -| a.start.nanoseconds);
return a.saved_ns + new_ns;
},
.paused => |ns| return ns,
@ -387,7 +381,8 @@ pub const Timer = union(enum) {
/// is set.
pub fn startTimer(comp: *Compilation) Timer {
if (comp.time_report == null) return .unused;
const now = std.time.Instant.now() catch @panic("std.time.Timer unsupported; cannot emit time report");
const io = comp.io;
const now: Io.Timestamp = .now(io, .awake);
return .{ .active = .{
.start = now,
.saved_ns = 0,
@ -3408,7 +3403,7 @@ fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancel
defer sub_prog_node.end();
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
defer if (timer.finish(io)) |ns| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_llvm_emit = ns;
@ -3453,7 +3448,7 @@ fn flush(comp: *Compilation, arena: Allocator, tid: Zcu.PerThread.Id) (Io.Cancel
}
if (comp.bin_file) |lf| {
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
defer if (timer.finish(io)) |ns| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_link_flush = ns;
@ -4686,7 +4681,7 @@ fn performAllTheWork(
var decl_work_timer: ?Timer = null;
defer commit_timer: {
const t = &(decl_work_timer orelse break :commit_timer);
const ns = t.finish() orelse break :commit_timer;
const ns = t.finish(io) orelse break :commit_timer;
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_decls = ns;
@ -4719,7 +4714,7 @@ fn performAllTheWork(
defer zir_prog_node.end();
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
defer if (timer.finish(io)) |ns| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.real_ns_files = ns;

View file

@ -4754,6 +4754,7 @@ const TrackedUnitSema = struct {
analysis_timer_decl: ?InternPool.TrackedInst.Index,
pub fn end(tus: TrackedUnitSema, zcu: *Zcu) void {
const comp = zcu.comp;
const io = comp.io;
if (tus.old_name) |old_name| {
zcu.sema_prog_node.completeOne(); // we're just renaming, but it's effectively completion
zcu.cur_sema_prog_node.setName(&old_name);
@ -4762,9 +4763,8 @@ const TrackedUnitSema = struct {
zcu.cur_sema_prog_node = .none;
}
report_time: {
const sema_ns = zcu.cur_analysis_timer.?.finish() orelse break :report_time;
const sema_ns = zcu.cur_analysis_timer.?.finish(io) orelse break :report_time;
const zir_decl = tus.analysis_timer_decl orelse break :report_time;
const io = comp.io;
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_sema += sema_ns;
@ -4779,11 +4779,13 @@ const TrackedUnitSema = struct {
gop.value_ptr.count += 1;
}
zcu.cur_analysis_timer = tus.old_analysis_timer;
if (zcu.cur_analysis_timer) |*t| t.@"resume"();
if (zcu.cur_analysis_timer) |*t| t.@"resume"(io);
}
};
pub fn trackUnitSema(zcu: *Zcu, name: []const u8, zir_inst: ?InternPool.TrackedInst.Index) TrackedUnitSema {
if (zcu.cur_analysis_timer) |*t| t.pause();
const comp = zcu.comp;
const io = comp.io;
if (zcu.cur_analysis_timer) |*t| t.pause(io);
const old_analysis_timer = zcu.cur_analysis_timer;
zcu.cur_analysis_timer = zcu.comp.startTimer();
const old_name: ?[std.Progress.Node.max_name_len]u8 = old_name: {

View file

@ -263,7 +263,7 @@ pub fn updateFile(
var timer = comp.startTimer();
// Any potential AST errors are converted to ZIR errors when we run AstGen/ZonGen.
file.tree = try Ast.parse(gpa, source, file.getMode());
if (timer.finish()) |ns_parse| {
if (timer.finish(io)) |ns_parse| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_parse += ns_parse;
@ -295,7 +295,7 @@ pub fn updateFile(
else => |e| return e,
};
if (timer.finish()) |ns_astgen| {
if (timer.finish(io)) |ns_astgen| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_astgen += ns_astgen;
@ -4485,7 +4485,7 @@ pub fn runCodegen(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) Ru
const codegen_result = runCodegenInner(pt, func_index, air);
if (timer.finish()) |ns_codegen| report_time: {
if (timer.finish(io)) |ns_codegen| report_time: {
const ip = &zcu.intern_pool;
const nav = ip.indexToKey(func_index).func.owner_nav;
const zir_decl = ip.getNav(nav).srcInst(ip);

View file

@ -1388,7 +1388,7 @@ pub fn doPrelinkTask(comp: *Compilation, task: PrelinkTask) void {
};
var timer = comp.startTimer();
defer if (timer.finish()) |ns| {
defer if (timer.finish(io)) |ns| {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
comp.time_report.?.stats.cpu_ns_link += ns;
@ -1535,12 +1535,12 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
break :nav nav_index;
},
.link_func => |codegen_task| nav: {
timer.pause();
timer.pause(io);
const func, var mir = codegen_task.wait(&zcu.codegen_task_pool, io) catch |err| switch (err) {
error.Canceled, error.AlreadyReported => return,
};
defer mir.deinit(zcu);
timer.@"resume"();
timer.@"resume"(io);
const nav = zcu.funcInfo(func).owner_nav;
const fqn_slice = ip.getNav(nav).fqn.toSlice(ip);
@ -1592,7 +1592,7 @@ pub fn doZcuTask(comp: *Compilation, tid: usize, task: ZcuTask) void {
},
};
if (timer.finish()) |ns_link| report_time: {
if (timer.finish(io)) |ns_link| report_time: {
comp.mutex.lockUncancelable(io);
defer comp.mutex.unlock(io);
const tr = &zcu.comp.time_report.?;

View file

@ -924,6 +924,15 @@ uint32_t wasi_snapshot_preview1_clock_time_get(uint32_t id, uint64_t precision,
return wasi_errno_success;
}
uint32_t wasi_snapshot_preview1_clock_res_get(uint32_t id, uint32_t res_timestamp) {
uint8_t *const m = *wasm_memory;
uint64_t *res_timestamp_ptr = (uint64_t *)&m[res_timestamp];
#if LOG_TRACE
fprintf(stderr, "wasi_snapshot_preview1_clock_res_get(%u, %llu)\n", id, (unsigned long long)res_timestamp);
#endif
return wasi_errno_notcapable;
}
uint32_t wasi_snapshot_preview1_path_remove_directory(uint32_t fd, uint32_t path, uint32_t path_len) {
uint8_t *const m = *wasm_memory;
const char *path_ptr = (const char *)&m[path];