test-standalone: fix most compilation errors

This commit is contained in:
Andrew Kelley 2025-12-22 23:24:18 -08:00
parent 0870f17501
commit 669dae140c
28 changed files with 181 additions and 169 deletions

View file

@ -1,18 +1,22 @@
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const Io = std.Io;
const fatal = std.process.fatal;
const mem = std.mem;
const math = std.math;
const Allocator = mem.Allocator;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const panic = std.debug.panic;
const abi = std.Build.abi.fuzz;
const native_endian = builtin.cpu.arch.endian();
pub const std_options = std.Options{
.logFn = logOverride,
};
const io = std.Io.Threaded.global_single_threaded.ioBasic();
fn logOverride(
comptime level: std.log.Level,
comptime scope: @EnumLiteral(),
@ -21,12 +25,12 @@ fn logOverride(
) void {
const f = log_f orelse
panic("attempt to use log before initialization, message:\n" ++ format, args);
f.lock(.exclusive) catch |e| panic("failed to lock logging file: {t}", .{e});
defer f.unlock();
f.lock(io, .exclusive) catch |e| panic("failed to lock logging file: {t}", .{e});
defer f.unlock(io);
var buf: [256]u8 = undefined;
var fw = f.writer(&buf);
const end = f.getEndPos() catch |e| panic("failed to get fuzzer log file end: {t}", .{e});
var fw = f.writer(io, &buf);
const end = f.length(io) catch |e| panic("failed to get fuzzer log file end: {t}", .{e});
fw.seekTo(end) catch |e| panic("failed to seek to fuzzer log file end: {t}", .{e});
const prefix1 = comptime level.asText();
@ -45,7 +49,7 @@ const gpa = switch (builtin.mode) {
};
/// Part of `exec`, however seperate to allow it to be set before `exec` is.
var log_f: ?std.fs.File = null;
var log_f: ?Io.File = null;
var exec: Executable = .preinit;
var inst: Instrumentation = .preinit;
var fuzzer: Fuzzer = undefined;
@ -59,7 +63,7 @@ const Executable = struct {
/// Tracks the hit count for each pc as updated by the process's instrumentation.
pc_counters: []u8,
cache_f: std.fs.Dir,
cache_f: Io.Dir,
/// Shared copy of all pcs that have been hit stored in a memory-mapped file that can viewed
/// while the fuzzer is running.
shared_seen_pcs: MemoryMappedList,
@ -76,16 +80,16 @@ const Executable = struct {
.pc_digest = undefined,
};
fn getCoverageFile(cache_dir: std.fs.Dir, pcs: []const usize, pc_digest: u64) MemoryMappedList {
fn getCoverageFile(cache_dir: Io.Dir, pcs: []const usize, pc_digest: u64) MemoryMappedList {
const pc_bitset_usizes = bitsetUsizes(pcs.len);
const coverage_file_name = std.fmt.hex(pc_digest);
comptime assert(abi.SeenPcsHeader.trailing[0] == .pc_bits_usize);
comptime assert(abi.SeenPcsHeader.trailing[1] == .pc_addr);
var v = cache_dir.makeOpenPath("v", .{}) catch |e|
var v = cache_dir.createDirPathOpen(io, "v", .{}) catch |e|
panic("failed to create directory 'v': {t}", .{e});
defer v.close();
const coverage_file, const populate = if (v.createFile(&coverage_file_name, .{
defer v.close(io);
const coverage_file, const populate = if (v.createFile(io, &coverage_file_name, .{
.read = true,
// If we create the file, we want to block other processes while we populate it
.lock = .exclusive,
@ -93,7 +97,7 @@ const Executable = struct {
})) |f|
.{ f, true }
else |e| switch (e) {
error.PathAlreadyExists => .{ v.openFile(&coverage_file_name, .{
error.PathAlreadyExists => .{ v.openFile(io, &coverage_file_name, .{
.mode = .read_write,
.lock = .shared,
}) catch |e2| panic(
@ -108,7 +112,7 @@ const Executable = struct {
pcs.len * @sizeOf(usize);
if (populate) {
defer coverage_file.lock(.shared) catch |e| panic(
defer coverage_file.lock(io, .shared) catch |e| panic(
"failed to demote lock for coverage file '{s}': {t}",
.{ &coverage_file_name, e },
);
@ -130,10 +134,8 @@ const Executable = struct {
}
return map;
} else {
const size = coverage_file.getEndPos() catch |e| panic(
"failed to stat coverage file '{s}': {t}",
.{ &coverage_file_name, e },
);
const size = coverage_file.length(io) catch |e|
panic("failed to stat coverage file '{s}': {t}", .{ &coverage_file_name, e });
if (size != coverage_file_len) panic(
"incompatible existing coverage file '{s}' (differing lengths: {} != {})",
.{ &coverage_file_name, size, coverage_file_len },
@ -165,13 +167,11 @@ const Executable = struct {
pub fn init(cache_dir_path: []const u8) Executable {
var self: Executable = undefined;
const cache_dir = std.fs.cwd().makeOpenPath(cache_dir_path, .{}) catch |e| panic(
"failed to open directory '{s}': {t}",
.{ cache_dir_path, e },
);
log_f = cache_dir.createFile("tmp/libfuzzer.log", .{ .truncate = false }) catch |e|
const cache_dir = Io.Dir.cwd().createDirPathOpen(io, cache_dir_path, .{}) catch |e|
panic("failed to open directory '{s}': {t}", .{ cache_dir_path, e });
log_f = cache_dir.createFile(io, "tmp/libfuzzer.log", .{ .truncate = false }) catch |e|
panic("failed to create file 'tmp/libfuzzer.log': {t}", .{e});
self.cache_f = cache_dir.makeOpenPath("f", .{}) catch |e|
self.cache_f = cache_dir.createDirPathOpen(io, "f", .{}) catch |e|
panic("failed to open directory 'f': {t}", .{e});
// Linkers are expected to automatically add symbols prefixed with these for the start and
@ -391,7 +391,7 @@ const Fuzzer = struct {
mutations: std.ArrayList(Mutation) = .empty,
/// Filesystem directory containing found inputs for future runs
corpus_dir: std.fs.Dir,
corpus_dir: Io.Dir,
corpus_dir_idx: usize = 0,
pub fn init(test_one: abi.TestOne, unit_test_name: []const u8) Fuzzer {
@ -405,10 +405,10 @@ const Fuzzer = struct {
};
const arena = self.arena_ctx.allocator();
self.corpus_dir = exec.cache_f.makeOpenPath(unit_test_name, .{}) catch |e|
self.corpus_dir = exec.cache_f.createDirPathOpen(io, unit_test_name, .{}) catch |e|
panic("failed to open directory '{s}': {t}", .{ unit_test_name, e });
self.input = in: {
const f = self.corpus_dir.createFile("in", .{
const f = self.corpus_dir.createFile(io, "in", .{
.read = true,
.truncate = false,
// In case any other fuzz tests are running under the same test name,
@ -419,7 +419,7 @@ const Fuzzer = struct {
error.WouldBlock => @panic("input file 'in' is in use by another fuzzing process"),
else => panic("failed to create input file 'in': {t}", .{e}),
};
const size = f.getEndPos() catch |e| panic("failed to stat input file 'in': {t}", .{e});
const size = f.length(io) catch |e| panic("failed to stat input file 'in': {t}", .{e});
const map = (if (size < std.heap.page_size_max)
MemoryMappedList.create(f, 8, std.heap.page_size_max)
else
@ -445,6 +445,7 @@ const Fuzzer = struct {
while (true) {
var name_buf: [@sizeOf(usize) * 2]u8 = undefined;
const bytes = self.corpus_dir.readFileAlloc(
io,
std.fmt.bufPrint(&name_buf, "{x}", .{self.corpus_dir_idx}) catch unreachable,
arena,
.unlimited,
@ -466,7 +467,7 @@ const Fuzzer = struct {
self.input.deinit();
self.corpus.deinit(gpa);
self.mutations.deinit(gpa);
self.corpus_dir.close();
self.corpus_dir.close(io);
self.arena_ctx.deinit();
self.* = undefined;
}
@ -573,17 +574,10 @@ const Fuzzer = struct {
// Write new corpus to cache
var name_buf: [@sizeOf(usize) * 2]u8 = undefined;
self.corpus_dir.writeFile(.{
.sub_path = std.fmt.bufPrint(
&name_buf,
"{x}",
.{self.corpus_dir_idx},
) catch unreachable,
self.corpus_dir.writeFile(io, .{
.sub_path = std.fmt.bufPrint(&name_buf, "{x}", .{self.corpus_dir_idx}) catch unreachable,
.data = bytes,
}) catch |e| panic(
"failed to write corpus file '{x}': {t}",
.{ self.corpus_dir_idx, e },
);
}) catch |e| panic("failed to write corpus file '{x}': {t}", .{ self.corpus_dir_idx, e });
self.corpus_dir_idx += 1;
}
}
@ -1320,9 +1314,9 @@ pub const MemoryMappedList = struct {
/// How many bytes this list can hold without allocating additional memory.
capacity: usize,
/// The file is kept open so that it can be resized.
file: std.fs.File,
file: Io.File,
pub fn init(file: std.fs.File, length: usize, capacity: usize) !MemoryMappedList {
pub fn init(file: Io.File, length: usize, capacity: usize) !MemoryMappedList {
const ptr = try std.posix.mmap(
null,
capacity,
@ -1338,13 +1332,13 @@ pub const MemoryMappedList = struct {
};
}
pub fn create(file: std.fs.File, length: usize, capacity: usize) !MemoryMappedList {
try file.setEndPos(capacity);
pub fn create(file: Io.File, length: usize, capacity: usize) !MemoryMappedList {
try file.setLength(io, capacity);
return init(file, length, capacity);
}
pub fn deinit(l: *MemoryMappedList) void {
l.file.close();
l.file.close(io);
std.posix.munmap(@volatileCast(l.items.ptr[0..l.capacity]));
l.* = undefined;
}
@ -1369,7 +1363,7 @@ pub const MemoryMappedList = struct {
if (l.capacity >= new_capacity) return;
std.posix.munmap(@volatileCast(l.items.ptr[0..l.capacity]));
try l.file.setEndPos(new_capacity);
try l.file.setLength(io, new_capacity);
l.* = try init(l.file, l.items.len, new_capacity);
}

View file

@ -26,14 +26,14 @@ fn run(allocator: std.mem.Allocator, io: Io) !void {
const hello_arg = "hello arg";
const a1 = args.next() orelse unreachable;
if (!std.mem.eql(u8, a1, hello_arg)) {
testError("first arg: '{s}'; want '{s}'", .{ a1, hello_arg });
testError(io, "first arg: '{s}'; want '{s}'", .{ a1, hello_arg });
}
if (args.next()) |a2| {
testError("expected only one arg; got more: {s}", .{a2});
testError(io, "expected only one arg; got more: {s}", .{a2});
}
// test stdout pipe; parent verifies
try std.Io.File.stdout().writeAll("hello from stdout");
try std.Io.File.stdout().writeStreamingAll(io, "hello from stdout");
// test stdin pipe from parent
const hello_stdin = "hello from stdin";
@ -42,12 +42,12 @@ fn run(allocator: std.mem.Allocator, io: Io) !void {
var reader = stdin.reader(io, &.{});
const n = try reader.interface.readSliceShort(&buf);
if (!std.mem.eql(u8, buf[0..n], hello_stdin)) {
testError("stdin: '{s}'; want '{s}'", .{ buf[0..n], hello_stdin });
testError(io, "stdin: '{s}'; want '{s}'", .{ buf[0..n], hello_stdin });
}
}
fn testError(comptime fmt: []const u8, args: anytype) void {
var stderr_writer = std.Io.File.stderr().writer(&.{});
fn testError(io: Io, comptime fmt: []const u8, args: anytype) void {
var stderr_writer = std.Io.File.stderr().writer(io, &.{});
const stderr = &stderr_writer.interface;
stderr.print("CHILD TEST ERROR: ", .{}) catch {};
stderr.print(fmt, args) catch {};

View file

@ -31,7 +31,7 @@ pub fn main() !void {
child.stderr_behavior = .Inherit;
try child.spawn(io);
const child_stdin = child.stdin.?;
try child_stdin.writeAll("hello from stdin"); // verified in child
try child_stdin.writeStreamingAll(io, "hello from stdin"); // verified in child
child_stdin.close(io);
child.stdin = null;
@ -43,7 +43,7 @@ pub fn main() !void {
testError(io, "child stdout: '{s}'; want '{s}'", .{ buf[0..n], hello_stdout });
}
switch (try child.wait()) {
switch (try child.wait(io)) {
.Exited => |code| {
const child_ok_code = 42; // set by child if no test errors
if (code != child_ok_code) {

View file

@ -39,5 +39,5 @@ fn run(allocator: std.mem.Allocator) !void {
var dir = try std.Io.Dir.cwd().openDir(io, dir_path, .{});
defer dir.close(io);
_ = try dir.statFile(io, relpath);
_ = try dir.statFile(io, relpath, .{});
}

View file

@ -34,7 +34,7 @@ fn run(allocator: std.mem.Allocator) !void {
var dir = try std.Io.Dir.cwd().openDir(io, dir_path, .{});
defer dir.close(io);
_ = dir.statFile(io, basename) catch {
_ = dir.statFile(io, basename, .{}) catch {
var file = try dir.createFile(io, basename, .{});
file.close(io);
};

View file

@ -14,7 +14,7 @@ pub fn main() !void {
const io = std.Io.Threaded.global_single_threaded.ioBasic();
const cwd = std.Io.Dir.cwd();
const cwd_realpath = try cwd.realPathAlloc(io, arena, ".");
const cwd_realpath = try cwd.realPathFileAlloc(io, ".", arena);
while (arg_it.next()) |file_path| {
if (file_path.len > 0 and file_path[0] == '!') {
@ -22,7 +22,7 @@ pub fn main() !void {
"exclusive file check '{s}{c}{s}' failed",
.{ cwd_realpath, std.fs.path.sep, file_path[1..] },
);
if (cwd.statFile(io, file_path[1..])) |_| {
if (cwd.statFile(io, file_path[1..], .{})) |_| {
return error.FileFound;
} else |err| switch (err) {
error.FileNotFound => {},
@ -33,7 +33,7 @@ pub fn main() !void {
"inclusive file check '{s}{c}{s}' failed",
.{ cwd_realpath, std.fs.path.sep, file_path },
);
_ = try cwd.statFile(io, file_path);
_ = try cwd.statFile(io, file_path, .{});
}
}
}

View file

@ -7,5 +7,5 @@ pub fn main() !void {
const filename = args.next().?;
const file = try std.Io.Dir.cwd().createFile(io, filename, .{});
defer file.close(io);
try file.writeAll(io, filename);
try file.writeStreamingAll(io, filename);
}

View file

@ -10,8 +10,8 @@ pub fn main() !void {
else
dir_name, .{});
const file_name = args.next().?;
const file = try dir.createFile(file_name, .{});
var file_writer = file.writer(&.{});
const file = try dir.createFile(io, file_name, .{});
var file_writer = file.writer(io, &.{});
try file_writer.interface.print(
\\{s}
\\{s}

View file

@ -12,10 +12,11 @@ pub fn main() !void {
const self_path = try std.process.executablePathAlloc(io, gpa);
defer gpa.free(self_path);
var self_exe = try std.fs.openSelfExe(.{});
var self_exe = try std.process.openExecutable(io, .{});
defer self_exe.close(io);
var buf: [std.fs.max_path_bytes]u8 = undefined;
const self_exe_path = try std.os.getFdPath(self_exe.handle, &buf);
const self_exe_path = buf[0..try self_exe.realPath(io, &buf)];
try std.testing.expectEqualStrings(self_exe_path, self_path);
}

View file

@ -2,6 +2,7 @@
//! including file:line:column information for each PC.
const std = @import("std");
const Io = std.Io;
const fatal = std.process.fatal;
const Path = std.Build.Cache.Path;
const assert = std.debug.assert;
@ -16,7 +17,7 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: std.Io.Threaded = .init(gpa, .{});
var threaded: Io.Threaded = .init(gpa, .{});
defer threaded.deinit();
const io = threaded.io();
@ -57,6 +58,7 @@ pub fn main() !void {
defer debug_info.deinit(gpa);
const cov_bytes = cov_path.root_dir.handle.readFileAllocOptions(
io,
cov_path.sub_path,
arena,
.limited(1 << 30),
@ -67,7 +69,7 @@ pub fn main() !void {
};
var stdout_buffer: [4000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
const header: *SeenPcsHeader = @ptrCast(cov_bytes);
@ -83,7 +85,7 @@ pub fn main() !void {
std.mem.sortUnstable(usize, sorted_pcs, {}, std.sort.asc(usize));
const source_locations = try arena.alloc(std.debug.Coverage.SourceLocation, sorted_pcs.len);
try debug_info.resolveAddresses(gpa, sorted_pcs, source_locations);
try debug_info.resolveAddresses(gpa, io, sorted_pcs, source_locations);
const seen_pcs = header.seenBits();

View file

@ -92,7 +92,7 @@ pub fn main() anyerror!void {
const sysroot_path = sysroot orelse blk: {
const target = try std.zig.system.resolveTargetQuery(io, .{});
break :blk std.zig.system.darwin.getSdk(allocator, &target) orelse
break :blk std.zig.system.darwin.getSdk(allocator, io, &target) orelse
fatal("no SDK found; you can provide one explicitly with '--sysroot' flag", .{});
};
@ -166,10 +166,7 @@ fn fetchTarget(
});
try cc_argv.appendSlice(args);
const res = try std.process.Child.run(.{
.allocator = arena,
.argv = cc_argv.items,
});
const res = try std.process.Child.run(arena, io, .{ .argv = cc_argv.items });
if (res.stderr.len != 0) {
std.log.err("{s}", .{res.stderr});
@ -179,7 +176,7 @@ fn fetchTarget(
const headers_list_file = try tmp.dir.openFile(io, headers_list_filename, .{});
defer headers_list_file.close(io);
var headers_dir = Dir.cwd().openDir(headers_source_prefix, .{}) catch |err| switch (err) {
var headers_dir = Dir.cwd().openDir(io, headers_source_prefix, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
=> fatal("path '{s}' not found or not a directory. Did you accidentally delete it?", .{
@ -215,15 +212,15 @@ fn fetchTarget(
const line_stripped = mem.trim(u8, line, " \\");
const abs_dirname = Dir.path.dirname(line_stripped).?;
var orig_subdir = try Dir.cwd().openDir(abs_dirname, .{});
var orig_subdir = try Dir.cwd().openDir(io, abs_dirname, .{});
defer orig_subdir.close(io);
try orig_subdir.copyFile(basename, maybe_dir.value_ptr.*, basename, .{});
try orig_subdir.copyFile(basename, maybe_dir.value_ptr.*, basename, io, .{});
}
}
var dir_it = dirs.iterator();
while (dir_it.next(io)) |entry| {
while (dir_it.next()) |entry| {
entry.value_ptr.close(io);
}
}

View file

@ -38,7 +38,7 @@ pub fn main() anyerror!void {
if (positionals.items.len != 1) fatal("expected one positional argument: [dir]", .{});
var dir = try std.fs.cwd().openDir(io, positionals.items[0], .{ .follow_symlinks = false });
var dir = try Io.Dir.cwd().openDir(io, positionals.items[0], .{ .follow_symlinks = false });
defer dir.close(io);
var paths = std.array_list.Managed([]const u8).init(arena);
try findHeaders(arena, io, dir, "", &paths);
@ -53,7 +53,7 @@ pub fn main() anyerror!void {
std.mem.sort([]const u8, paths.items, {}, SortFn.lessThan);
var buffer: [2000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &buffer);
const w = &stdout_writer.interface;
try w.writeAll("#define _XOPEN_SOURCE\n");
for (paths.items) |path| {
@ -75,18 +75,18 @@ fn findHeaders(
paths: *std.array_list.Managed([]const u8),
) anyerror!void {
var it = dir.iterate();
while (try it.next()) |entry| {
while (try it.next(io)) |entry| {
switch (entry.kind) {
.directory => {
const path = try std.fs.path.join(arena, &.{ prefix, entry.name });
const path = try Io.Dir.path.join(arena, &.{ prefix, entry.name });
var subdir = try dir.openDir(io, entry.name, .{ .follow_symlinks = false });
defer subdir.close(io);
try findHeaders(arena, io, subdir, path, paths);
},
.file, .sym_link => {
const ext = std.fs.path.extension(entry.name);
const ext = Io.Dir.path.extension(entry.name);
if (!std.mem.eql(u8, ext, ".h")) continue;
const path = try std.fs.path.join(arena, &.{ prefix, entry.name });
const path = try Io.Dir.path.join(arena, &.{ prefix, entry.name });
try paths.append(path);
},
else => {},

View file

@ -1,4 +1,5 @@
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const AtomicOp = enum {
@ -15,10 +16,14 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: std.Io.Threaded = .init(arena, .{});
defer threaded.deinit();
const io = threaded.io();
//const args = try std.process.argsAlloc(arena);
var stdout_buffer: [2000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const w = &stdout_writer.interface;
try w.writeAll(

View file

@ -1,5 +1,7 @@
const std = @import("std");
const Io = std.Io;
const Allocator = std.mem.Allocator;
const g = @import("spirv/grammar.zig");
const CoreRegistry = g.CoreRegistry;
const ExtensionRegistry = g.ExtensionRegistry;
@ -63,24 +65,28 @@ pub fn main() !void {
usageAndExit(args[0], 1);
}
const json_path = try std.fs.path.join(allocator, &.{ args[1], "include/spirv/unified1/" });
const dir = try std.fs.cwd().openDir(json_path, .{ .iterate = true });
var threaded: std.Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
const core_spec = try readRegistry(CoreRegistry, dir, "spirv.core.grammar.json");
const json_path = try Io.Dir.path.join(allocator, &.{ args[1], "include/spirv/unified1/" });
const dir = try Io.Dir.cwd().openDir(io, json_path, .{ .iterate = true });
const core_spec = try readRegistry(io, CoreRegistry, dir, "spirv.core.grammar.json");
std.mem.sortUnstable(Instruction, core_spec.instructions, CmpInst{}, CmpInst.lt);
var exts = std.array_list.Managed(Extension).init(allocator);
var it = dir.iterate();
while (try it.next()) |entry| {
while (try it.next(io)) |entry| {
if (entry.kind != .file) {
continue;
}
try readExtRegistry(&exts, dir, entry.name);
try readExtRegistry(io, &exts, dir, entry.name);
}
try readExtRegistry(&exts, std.fs.cwd(), args[2]);
try readExtRegistry(io, &exts, Io.Dir.cwd(), args[2]);
var allocating: std.Io.Writer.Allocating = .init(allocator);
defer allocating.deinit();
@ -91,7 +97,7 @@ pub fn main() !void {
var tree = try std.zig.Ast.parse(allocator, output, .zig);
if (tree.errors.len != 0) {
try std.zig.printAstErrorsToStderr(allocator, tree, "", .auto);
try std.zig.printAstErrorsToStderr(allocator, io, tree, "", .auto);
return;
}
@ -103,22 +109,22 @@ pub fn main() !void {
try wip_errors.addZirErrorMessages(zir, tree, output, "");
var error_bundle = try wip_errors.toOwnedBundle("");
defer error_bundle.deinit(allocator);
error_bundle.renderToStdErr(.{}, .auto);
try error_bundle.renderToStderr(io, .{}, .auto);
}
const formatted_output = try tree.renderAlloc(allocator);
_ = try std.fs.File.stdout().write(formatted_output);
try Io.File.stdout().writeStreamingAll(io, formatted_output);
}
fn readExtRegistry(exts: *std.array_list.Managed(Extension), dir: std.fs.Dir, sub_path: []const u8) !void {
const filename = std.fs.path.basename(sub_path);
fn readExtRegistry(io: Io, exts: *std.array_list.Managed(Extension), dir: Io.Dir, sub_path: []const u8) !void {
const filename = Io.Dir.path.basename(sub_path);
if (!std.mem.startsWith(u8, filename, "extinst.")) {
return;
}
std.debug.assert(std.mem.endsWith(u8, filename, ".grammar.json"));
const name = filename["extinst.".len .. filename.len - ".grammar.json".len];
const spec = try readRegistry(ExtensionRegistry, dir, sub_path);
const spec = try readRegistry(io, ExtensionRegistry, dir, sub_path);
const set_name = set_names.get(name) orelse {
std.log.info("ignored instruction set '{s}'", .{name});
@ -134,8 +140,8 @@ fn readExtRegistry(exts: *std.array_list.Managed(Extension), dir: std.fs.Dir, su
});
}
fn readRegistry(comptime RegistryType: type, dir: std.fs.Dir, path: []const u8) !RegistryType {
const spec = try dir.readFileAlloc(path, allocator, .unlimited);
fn readRegistry(io: Io, comptime RegistryType: type, dir: Io.Dir, path: []const u8) !RegistryType {
const spec = try dir.readFileAlloc(io, path, allocator, .unlimited);
// Required for json parsing.
// TODO: ALI
@setEvalBranchQuota(10000);

View file

@ -55,12 +55,14 @@
// - e.g. find a common previous symbol and put it after that one
// - they definitely need to go into the correct section
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const builtin = std.builtin;
const Io = std.Io;
const mem = std.mem;
const log = std.log;
const elf = std.elf;
const native_endian = @import("builtin").cpu.arch.endian();
const Arch = enum {
aarch64,
@ -284,10 +286,14 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: std.Io.Threaded = .init(arena, .{});
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const build_all_path = args[1];
var build_all_dir = try std.fs.cwd().openDir(build_all_path, .{});
var build_all_dir = try Io.Dir.cwd().openDir(io, build_all_path, .{});
var sym_table = std.StringArrayHashMap(MultiSym).init(arena);
var sections = std.StringArrayHashMap(void).init(arena);
@ -299,6 +305,7 @@ pub fn main() !void {
// Read the ELF header.
const elf_bytes = build_all_dir.readFileAllocOptions(
io,
libc_so_path,
arena,
.limited(100 * 1024 * 1024),
@ -334,7 +341,7 @@ pub fn main() !void {
}
var stdout_buffer: [2000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
try stdout.writeAll(
\\#ifdef PTR64
@ -539,7 +546,7 @@ pub fn main() !void {
try stdout.flush();
}
fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: builtin.Endian) !void {
fn parseElf(parse: Parse, comptime is_64: bool, comptime endian: std.builtin.Endian) !void {
const arena = parse.arena;
const elf_bytes = parse.elf_bytes;
const header = parse.header;

View file

@ -1,13 +1,18 @@
// zig run this file inside the test_parsing/ directory of this repo: https://github.com/nst/JSONTestSuite
const std = @import("std");
const Io = std.Io;
pub fn main() !void {
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
var allocator = gpa.allocator();
var threaded: std.Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
var stdout_buffer: [2000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const output = &stdout_writer.interface;
try output.writeAll(
\\// This file was generated by _generate_JSONTestSuite.zig
@ -20,9 +25,9 @@ pub fn main() !void {
);
var names = std.array_list.Managed([]const u8).init(allocator);
var cwd = try std.fs.cwd().openDir(".", .{ .iterate = true });
var cwd = try Io.Dir.cwd().openDir(io, ".", .{ .iterate = true });
var it = cwd.iterate();
while (try it.next()) |entry| {
while (try it.next(io)) |entry| {
try names.append(try allocator.dupe(u8, entry.name));
}
std.mem.sort([]const u8, names.items, {}, (struct {
@ -32,7 +37,7 @@ pub fn main() !void {
}).lessThan);
for (names.items) |name| {
const contents = try std.fs.cwd().readFileAlloc(name, allocator, .limited(250001));
const contents = try Io.Dir.cwd().readFileAlloc(io, name, allocator, .limited(250001));
try output.writeAll("test ");
try writeString(output, name);
try output.writeAll(" {\n try ");

View file

@ -7,6 +7,7 @@
//! target.
const std = @import("std");
const Io = std.Io;
fn cName(ty: std.Target.CType) []const u8 {
return switch (ty) {
@ -47,7 +48,7 @@ pub fn main() !void {
const target = try std.zig.system.resolveTargetQuery(io, query);
var buffer: [2000]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &buffer);
const w = &stdout_writer.interface;
inline for (@typeInfo(std.Target.CType).@"enum".fields) |field| {
const c_type: std.Target.CType = @enumFromInt(field.value);

View file

@ -189,10 +189,10 @@ pub fn main() !void {
const linux_path = args[1];
var stdout_buffer: [2048]u8 = undefined;
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
var linux_dir = try std.fs.cwd().openDir(io, linux_path, .{});
var linux_dir = try Io.Dir.cwd().openDir(io, linux_path, .{});
defer linux_dir.close(io);
// As of 6.11, the largest table is 24195 bytes.
@ -225,7 +225,7 @@ pub fn main() !void {
, .{version});
for (architectures, 0..) |arch, i| {
const table = try linux_dir.readFile(switch (arch.table) {
const table = try linux_dir.readFile(io, switch (arch.table) {
.generic => "scripts/syscall.tbl",
.specific => |f| f,
}, buf);

View file

@ -26,15 +26,15 @@ pub fn main() !void {
defer threaded.deinit();
const io = threaded.io();
var in_file = try Dir.cwd().openFile(input_file, .{ .mode = .read_only });
var in_file = try Dir.cwd().openFile(io, input_file, .{ .mode = .read_only });
defer in_file.close(io);
var out_file = try Dir.cwd().createFile(output_file, .{});
var out_file = try Dir.cwd().createFile(io, output_file, .{});
defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
var out_file_writer = out_file.writer(&out_file_buffer);
var out_file_writer = out_file.writer(io, &out_file_buffer);
var out_dir = try Dir.cwd().openDir(Dir.path.dirname(output_file).?, .{});
var out_dir = try Dir.cwd().openDir(io, Dir.path.dirname(output_file).?, .{});
defer out_dir.close(io);
var in_file_reader = in_file.reader(io, &.{});
@ -42,7 +42,7 @@ pub fn main() !void {
var tokenizer = Tokenizer.init(input_file, input_file_bytes);
try walk(arena, &tokenizer, out_dir, &out_file_writer.interface);
try walk(arena, io, &tokenizer, out_dir, &out_file_writer.interface);
try out_file_writer.end();
}
@ -387,12 +387,12 @@ fn walk(arena: Allocator, io: Io, tokenizer: *Tokenizer, out_dir: Dir, w: anytyp
const basename = try std.fmt.allocPrint(arena, "{s}.zig", .{name});
var file = out_dir.createFile(basename, .{ .exclusive = true }) catch |err| {
var file = out_dir.createFile(io, basename, .{ .exclusive = true }) catch |err| {
fatal("unable to create file '{s}': {s}", .{ name, @errorName(err) });
};
defer file.close(io);
var file_buffer: [1024]u8 = undefined;
var file_writer = file.writer(&file_buffer);
var file_writer = file.writer(io, &file_buffer);
const code = &file_writer.interface;
const source = tokenizer.buffer[source_token.start..source_token.end];

View file

@ -131,7 +131,7 @@ pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = arena.allocator();
var threaded: Io.Threaded = .init(allocator);
var threaded: Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
@ -253,14 +253,14 @@ pub fn main() !void {
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
while (try dir_it.next(io)) |entry| {
const full_path = try Dir.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),
.file, .sym_link => {
const rel_path = try Dir.path.relative(allocator, target_include_dir, full_path);
const max_size = 2 * 1024 * 1024 * 1024;
const raw_bytes = try Dir.cwd().readFileAlloc(full_path, allocator, .limited(max_size));
const raw_bytes = try Dir.cwd().readFileAlloc(io, full_path, allocator, .limited(max_size));
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try allocator.alloc(u8, 32);
@ -273,9 +273,7 @@ pub fn main() !void {
max_bytes_saved += raw_bytes.len;
gop.value_ptr.hit_count += 1;
std.debug.print("duplicate: {s} {s} ({B})\n", .{
libc_dir,
rel_path,
raw_bytes.len,
libc_dir, rel_path, raw_bytes.len,
});
} else {
gop.value_ptr.* = Contents{

View file

@ -206,7 +206,7 @@ pub fn main() !void {
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
while (try dir_it.next(io)) |entry| {
const full_path = try Dir.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),

View file

@ -10,7 +10,7 @@
//! would mean that the next parameter specifies the target.
const std = @import("std");
const fs = std.fs;
const Io = std.Io;
const assert = std.debug.assert;
const json = std.json;
@ -634,8 +634,12 @@ pub fn main() anyerror!void {
const allocator = arena.allocator();
const args = try std.process.argsAlloc(allocator);
var threaded: std.Io.Threaded = .init(allocator, .{});
defer threaded.deinit();
const io = threaded.io();
var stdout_buffer: [4000]u8 = undefined;
var stdout_writer = fs.File.stdout().writerStreaming(&stdout_buffer);
var stdout_writer = Io.File.stdout().writerStreaming(io, &stdout_buffer);
const stdout = &stdout_writer.interface;
if (args.len <= 1) printUsageAndExit(args[0]);
@ -676,8 +680,7 @@ pub fn main() anyerror!void {
try std.fmt.allocPrint(allocator, "-I={s}/clang/include/clang/Driver", .{llvm_src_root}),
};
const child_result = try std.process.Child.run(.{
.allocator = allocator,
const child_result = try std.process.Child.run(allocator, io, .{
.argv = &child_args,
.max_output_bytes = 100 * 1024 * 1024,
});

View file

@ -1994,8 +1994,7 @@ fn processOneTarget(io: Io, job: Job) void {
}),
};
const child_result = try std.process.Child.run(.{
.allocator = arena,
const child_result = try std.process.Child.run(arena, io, .{
.argv = &child_args,
.max_output_bytes = 500 * 1024 * 1024,
});
@ -2250,7 +2249,7 @@ fn processOneTarget(io: Io, job: Job) void {
defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);
var zig_code_file_writer = zig_code_file.writer(io, &zig_code_file_buffer);
const w = &zig_code_file_writer.interface;
try w.writeAll(

View file

@ -35,7 +35,7 @@ pub fn main() anyerror!void {
var zig_code_file = try hash_target_dir.createFile(io, "crc.zig", .{});
defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);
var zig_code_file_writer = zig_code_file.writer(io, &zig_code_file_buffer);
const code_writer = &zig_code_file_writer.interface;
try code_writer.writeAll(
@ -59,7 +59,7 @@ pub fn main() anyerror!void {
var zig_test_file = try crc_target_dir.createFile(io, "test.zig", .{});
defer zig_test_file.close(io);
var zig_test_file_buffer: [4096]u8 = undefined;
var zig_test_file_writer = zig_test_file.writer(&zig_test_file_buffer);
var zig_test_file_writer = zig_test_file.writer(io, &zig_test_file_buffer);
const test_writer = &zig_test_file_writer.interface;
try test_writer.writeAll(

View file

@ -27,13 +27,13 @@ pub fn main() !void {
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/freebsd", .{zig_src_path});
var dest_dir = std.fs.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
var dest_dir = Io.Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
defer dest_dir.close(io);
var freebsd_src_dir = try std.fs.cwd().openDir(freebsd_src_path, .{});
var freebsd_src_dir = try Io.Dir.cwd().openDir(io, freebsd_src_path, .{});
defer freebsd_src_dir.close(io);
// Copy updated files from upstream.
@ -41,7 +41,7 @@ pub fn main() !void {
var walker = try dest_dir.walk(arena);
defer walker.deinit();
walk: while (try walker.next()) |entry| {
walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (std.mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@ -49,15 +49,12 @@ pub fn main() !void {
}
std.log.info("updating '{s}/{s}' from '{s}/{s}'", .{
dest_dir_path, entry.path,
freebsd_src_path, entry.path,
dest_dir_path, entry.path, freebsd_src_path, entry.path,
});
freebsd_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
freebsd_src_path, entry.path,
dest_dir_path, entry.path,
@errorName(err),
freebsd_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
freebsd_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
try dest_dir.deleteFile(io, entry.path);

View file

@ -66,7 +66,7 @@ pub fn main() !void {
var walker = try dest_dir.walk(arena);
defer walker.deinit();
walk: while (try walker.next()) |entry| {
walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@ -76,7 +76,7 @@ pub fn main() !void {
if (mem.endsWith(u8, entry.path, ext)) continue :walk;
}
glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
glibc_src_path, entry.path, dest_dir_path, entry.path, err,
});
@ -106,7 +106,7 @@ pub fn main() !void {
var walker = try include_dir.walk(arena);
defer walker.deinit();
walk: while (try walker.next()) |entry| {
walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@ -116,23 +116,21 @@ pub fn main() !void {
const max_file_size = 10 * 1024 * 1024;
const generic_glibc_contents = generic_glibc_dir.readFileAlloc(
io,
entry.path,
arena,
.limited(max_file_size),
) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| fatal("unable to load '{s}/include/{s}': {s}", .{
generic_glibc_path, entry.path, @errorName(e),
}),
else => |e| fatal("unable to load '{s}/include/{s}': {t}", .{ generic_glibc_path, entry.path, e }),
};
const glibc_include_contents = include_dir.readFileAlloc(
io,
entry.path,
arena,
.limited(max_file_size),
) catch |err| {
fatal("unable to load '{s}/include/{s}': {s}", .{
dest_dir_path, entry.path, @errorName(err),
});
fatal("unable to load '{s}/include/{s}': {t}", .{ dest_dir_path, entry.path, err });
};
const whitespace = " \r\n\t";
@ -140,8 +138,7 @@ pub fn main() !void {
const glibc_include_trimmed = mem.trim(u8, glibc_include_contents, whitespace);
if (mem.eql(u8, generic_glibc_trimmed, glibc_include_trimmed)) {
log.warn("same contents: '{s}/include/{s}' and '{s}/include/{s}'", .{
generic_glibc_path, entry.path,
dest_dir_path, entry.path,
generic_glibc_path, entry.path, dest_dir_path, entry.path,
});
}
}

View file

@ -26,13 +26,13 @@ pub fn main() !void {
// in zig's installation.
var dest_crt_dir = Dir.cwd().openDir(io, dest_mingw_crt_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ dest_mingw_crt_path, @errorName(err) });
std.log.err("unable to open directory '{s}': {t}", .{ dest_mingw_crt_path, err });
std.process.exit(1);
};
defer dest_crt_dir.close(io);
var src_crt_dir = Dir.cwd().openDir(io, src_mingw_crt_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ src_mingw_crt_path, @errorName(err) });
std.log.err("unable to open directory '{s}': {t}", .{ src_mingw_crt_path, err });
std.process.exit(1);
};
defer src_crt_dir.close(io);
@ -43,10 +43,10 @@ pub fn main() !void {
var fail = false;
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, .{}) catch |err| switch (err) {
src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
const keep = for (kept_crt_files) |item| {
if (std.mem.eql(u8, entry.path, item)) break true;
@ -94,10 +94,10 @@ pub fn main() !void {
var fail = false;
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
src_winpthreads_dir.copyFile(entry.path, dest_winpthreads_dir, entry.path, .{}) catch |err| switch (err) {
src_winpthreads_dir.copyFile(entry.path, dest_winpthreads_dir, entry.path, io, .{}) catch |err| switch (err) {
error.FileNotFound => {
std.log.warn("deleting {s}", .{entry.path});
try dest_winpthreads_dir.deleteFile(io, entry.path);
@ -120,17 +120,17 @@ pub fn main() !void {
var fail = false;
while (try walker.next()) |entry| {
while (try walker.next(io)) |entry| {
switch (entry.kind) {
.directory => {
switch (entry.depth()) {
1 => if (def_dirs.has(entry.basename)) {
try walker.enter(entry);
try walker.enter(io, entry);
continue;
},
else => {
// The top-level directory was already validated
try walker.enter(entry);
try walker.enter(io, entry);
continue;
},
}
@ -157,15 +157,15 @@ pub fn main() !void {
if (std.mem.endsWith(u8, entry.basename, "_onecore.def"))
continue;
src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, .{}) catch |err| {
std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
src_crt_dir.copyFile(entry.path, dest_crt_dir, entry.path, io, .{}) catch |err| {
std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
};
}
if (fail) std.process.exit(1);
}
return std.process.cleanExit();
return std.process.cleanExit(io);
}
const kept_crt_files = [_][]const u8{

View file

@ -27,13 +27,13 @@ pub fn main() !void {
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/netbsd", .{zig_src_path});
var dest_dir = std.fs.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
var dest_dir = Io.Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
defer dest_dir.close(io);
var netbsd_src_dir = try std.fs.cwd().openDir(io, netbsd_src_path, .{});
var netbsd_src_dir = try Io.Dir.cwd().openDir(io, netbsd_src_path, .{});
defer netbsd_src_dir.close(io);
// Copy updated files from upstream.
@ -41,7 +41,7 @@ pub fn main() !void {
var walker = try dest_dir.walk(arena);
defer walker.deinit();
walk: while (try walker.next()) |entry| {
walk: while (try walker.next(io)) |entry| {
if (entry.kind != .file) continue;
if (std.mem.startsWith(u8, entry.basename, ".")) continue;
for (exempt_files) |p| {
@ -53,7 +53,7 @@ pub fn main() !void {
netbsd_src_path, entry.path,
});
netbsd_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
netbsd_src_dir.copyFile(entry.path, dest_dir, entry.path, io, .{}) catch |err| {
std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
netbsd_src_path, entry.path, dest_dir_path, entry.path, err,
});