fix compilation of incr-check

This commit is contained in:
Andrew Kelley 2025-12-19 19:42:52 -08:00
parent 7d955274bb
commit 50c585227e
15 changed files with 262 additions and 213 deletions

View file

@ -73,15 +73,15 @@ pub fn main() !void {
const code_dir_path = opt_code_dir orelse fatal("missing --code-dir argument", .{});
var in_file = try fs.cwd().openFile(input_path, .{});
defer in_file.close();
defer in_file.close(io);
var out_file = try fs.cwd().createFile(output_path, .{});
defer out_file.close();
defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
var out_file_writer = out_file.writer(&out_file_buffer);
var code_dir = try fs.cwd().openDir(code_dir_path, .{});
defer code_dir.close();
defer code_dir.close(io);
var in_file_reader = in_file.reader(io, &.{});
const input_file_bytes = try in_file_reader.interface.allocRemaining(arena, .limited(max_doc_file_size));

View file

@ -85,16 +85,16 @@ pub fn main() !void {
const tmp_dir_path = try std.fmt.allocPrint(arena, "{s}/tmp/{x}", .{
cache_root, std.crypto.random.int(u64),
});
fs.cwd().makePath(tmp_dir_path) catch |err|
fatal("unable to create tmp dir '{s}': {s}", .{ tmp_dir_path, @errorName(err) });
defer fs.cwd().deleteTree(tmp_dir_path) catch |err| std.log.err("unable to delete '{s}': {s}", .{
tmp_dir_path, @errorName(err),
fs.cwd().createDirPath(io, tmp_dir_path) catch |err|
fatal("unable to create tmp dir '{s}': {t}", .{ tmp_dir_path, err });
defer fs.cwd().deleteTree(io, tmp_dir_path) catch |err| std.log.err("unable to delete '{s}': {t}", .{
tmp_dir_path, err,
});
var out_file = try fs.cwd().createFile(output_path, .{});
defer out_file.close();
var out_file = try fs.cwd().createFile(io, output_path, .{});
defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
var out_file_writer = out_file.writer(&out_file_buffer);
var out_file_writer = out_file.writer(io, &out_file_buffer);
const out = &out_file_writer.interface;

View file

@ -1,6 +1,6 @@
const std = @import("std");
const Io = std.Io;
const fs = std.fs;
const Dir = std.Io.Dir;
const mem = std.mem;
const process = std.process;
const assert = std.debug.assert;
@ -96,9 +96,9 @@ pub fn main() anyerror!void {
fatal("no SDK found; you can provide one explicitly with '--sysroot' flag", .{});
};
var sdk_dir = try std.fs.cwd().openDir(sysroot_path, .{});
defer sdk_dir.close();
const sdk_info = try sdk_dir.readFileAlloc("SDKSettings.json", allocator, .limited(std.math.maxInt(u32)));
var sdk_dir = try Dir.cwd().openDir(io, sysroot_path, .{});
defer sdk_dir.close(io);
const sdk_info = try sdk_dir.readFileAlloc(io, "SDKSettings.json", allocator, .limited(std.math.maxInt(u32)));
const parsed_json = try std.json.parseFromSlice(struct {
DefaultProperties: struct { MACOSX_DEPLOYMENT_TARGET: []const u8 },
@ -135,8 +135,8 @@ fn fetchTarget(
const tmp_filename = "macos-headers";
const headers_list_filename = "macos-headers.o.d";
const tmp_path = try tmp.dir.realpathAlloc(arena, ".");
const tmp_file_path = try fs.path.join(arena, &[_][]const u8{ tmp_path, tmp_filename });
const headers_list_path = try fs.path.join(arena, &[_][]const u8{ tmp_path, headers_list_filename });
const tmp_file_path = try Dir.path.join(arena, &[_][]const u8{ tmp_path, tmp_filename });
const headers_list_path = try Dir.path.join(arena, &[_][]const u8{ tmp_path, headers_list_filename });
const macos_version = try std.fmt.allocPrint(arena, "-mmacosx-version-min={d}.{d}", .{
ver.major,
@ -176,10 +176,10 @@ fn fetchTarget(
}
// Read in the contents of `macos-headers.o.d`
const headers_list_file = try tmp.dir.openFile(headers_list_filename, .{});
defer headers_list_file.close();
const headers_list_file = try tmp.dir.openFile(io, headers_list_filename, .{});
defer headers_list_file.close(io);
var headers_dir = fs.cwd().openDir(headers_source_prefix, .{}) catch |err| switch (err) {
var headers_dir = Dir.cwd().openDir(headers_source_prefix, .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
=> fatal("path '{s}' not found or not a directory. Did you accidentally delete it?", .{
@ -187,13 +187,13 @@ fn fetchTarget(
}),
else => return err,
};
defer headers_dir.close();
defer headers_dir.close(io);
const dest_path = try target.fullName(arena);
try headers_dir.deleteTree(dest_path);
try headers_dir.deleteTree(io, dest_path);
var dest_dir = try headers_dir.makeOpenPath(dest_path, .{});
var dirs = std.StringHashMap(fs.Dir).init(arena);
var dest_dir = try headers_dir.createDirPathOpen(io, dest_path, .{});
var dirs = std.StringHashMap(Dir).init(arena);
try dirs.putNoClobber(".", dest_dir);
var headers_list_file_reader = headers_list_file.reader(io, &.{});
@ -206,25 +206,25 @@ fn fetchTarget(
if (mem.lastIndexOf(u8, line, prefix[0..])) |idx| {
const out_rel_path = line[idx + prefix.len + 1 ..];
const out_rel_path_stripped = mem.trim(u8, out_rel_path, " \\");
const dirname = fs.path.dirname(out_rel_path_stripped) orelse ".";
const dirname = Dir.path.dirname(out_rel_path_stripped) orelse ".";
const maybe_dir = try dirs.getOrPut(dirname);
if (!maybe_dir.found_existing) {
maybe_dir.value_ptr.* = try dest_dir.makeOpenPath(dirname, .{});
maybe_dir.value_ptr.* = try dest_dir.createDirPathOpen(io, dirname, .{});
}
const basename = fs.path.basename(out_rel_path_stripped);
const basename = Dir.path.basename(out_rel_path_stripped);
const line_stripped = mem.trim(u8, line, " \\");
const abs_dirname = fs.path.dirname(line_stripped).?;
var orig_subdir = try fs.cwd().openDir(abs_dirname, .{});
defer orig_subdir.close();
const abs_dirname = Dir.path.dirname(line_stripped).?;
var orig_subdir = try Dir.cwd().openDir(abs_dirname, .{});
defer orig_subdir.close(io);
try orig_subdir.copyFile(basename, maybe_dir.value_ptr.*, basename, .{});
}
}
var dir_it = dirs.iterator();
while (dir_it.next()) |entry| {
entry.value_ptr.close();
while (dir_it.next(io)) |entry| {
entry.value_ptr.close(io);
}
}

View file

@ -1,8 +1,9 @@
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const assert = std.debug.assert;
const info = std.log.info;
const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
@ -20,6 +21,10 @@ pub fn main() anyerror!void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var threaded: Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
if (args.len == 1) fatal("no command or option specified", .{});
@ -33,10 +38,10 @@ pub fn main() anyerror!void {
if (positionals.items.len != 1) fatal("expected one positional argument: [dir]", .{});
var dir = try std.fs.cwd().openDir(positionals.items[0], .{ .follow_symlinks = false });
defer dir.close();
var dir = try std.fs.cwd().openDir(io, positionals.items[0], .{ .follow_symlinks = false });
defer dir.close(io);
var paths = std.array_list.Managed([]const u8).init(arena);
try findHeaders(arena, dir, "", &paths);
try findHeaders(arena, io, dir, "", &paths);
const SortFn = struct {
pub fn lessThan(ctx: void, lhs: []const u8, rhs: []const u8) bool {
@ -64,7 +69,8 @@ pub fn main() anyerror!void {
fn findHeaders(
arena: Allocator,
dir: std.fs.Dir,
io: Io,
dir: Dir,
prefix: []const u8,
paths: *std.array_list.Managed([]const u8),
) anyerror!void {
@ -73,9 +79,9 @@ fn findHeaders(
switch (entry.kind) {
.directory => {
const path = try std.fs.path.join(arena, &.{ prefix, entry.name });
var subdir = try dir.openDir(entry.name, .{ .follow_symlinks = false });
defer subdir.close();
try findHeaders(arena, subdir, path, paths);
var subdir = try dir.openDir(io, entry.name, .{ .follow_symlinks = false });
defer subdir.close(io);
try findHeaders(arena, io, subdir, path, paths);
},
.file, .sym_link => {
const ext = std.fs.path.extension(entry.name);

View file

@ -175,6 +175,10 @@ pub fn main() !void {
defer arena.deinit();
const gpa = arena.allocator();
var threaded: Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(gpa);
if (args.len < 2 or mem.eql(u8, args[1], "--help")) {
const w, _ = std.debug.lockStderrWriter(&.{});
@ -188,8 +192,8 @@ pub fn main() !void {
var stdout_writer = std.fs.File.stdout().writerStreaming(&stdout_buffer);
const stdout = &stdout_writer.interface;
var linux_dir = try std.fs.cwd().openDir(linux_path, .{});
defer linux_dir.close();
var linux_dir = try std.fs.cwd().openDir(io, linux_path, .{});
defer linux_dir.close(io);
// As of 6.11, the largest table is 24195 bytes.
// 32k should be enough for now.
@ -198,7 +202,7 @@ pub fn main() !void {
// Fetch the kernel version from the Makefile variables.
const version = blk: {
const head = try linux_dir.readFile("Makefile", buf[0..128]);
const head = try linux_dir.readFile(io, "Makefile", buf[0..128]);
var lines = mem.tokenizeScalar(u8, head, '\n');
_ = lines.next(); // Skip SPDX identifier

View file

@ -1,5 +1,6 @@
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
@ -59,7 +60,7 @@ pub fn main() !void {
const zig_exe = opt_zig_exe orelse fatal("missing path to zig\n{s}", .{usage});
const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage});
const input_file_bytes = try std.fs.cwd().readFileAlloc(input_file_name, arena, .limited(std.math.maxInt(u32)));
const input_file_bytes = try Dir.cwd().readFileAlloc(io, input_file_name, arena, .limited(std.math.maxInt(u32)));
const case = try Case.parse(arena, io, input_file_bytes);
// Check now: if there are any targets using the `cbe` backend, we need the lib dir.
@ -71,25 +72,25 @@ pub fn main() !void {
}
}
const prog_node = std.Progress.start(.{});
const prog_node = std.Progress.start(io, .{});
defer prog_node.end();
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp_" ++ std.fmt.hex(rand_int);
var tmp_dir = try std.fs.cwd().makeOpenPath(tmp_dir_path, .{});
var tmp_dir = try Dir.cwd().createDirPathOpen(io, tmp_dir_path, .{});
defer {
tmp_dir.close();
tmp_dir.close(io);
if (!preserve_tmp) {
std.fs.cwd().deleteTree(tmp_dir_path) catch |err| {
std.log.warn("failed to delete tree '{s}': {s}", .{ tmp_dir_path, @errorName(err) });
Dir.cwd().deleteTree(io, tmp_dir_path) catch |err| {
std.log.warn("failed to delete tree '{s}': {t}", .{ tmp_dir_path, err });
};
}
}
// Convert paths to be relative to the cwd of the subprocess.
const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe);
const resolved_zig_exe = try Dir.path.relative(arena, tmp_dir_path, zig_exe);
const opt_resolved_lib_dir = if (opt_lib_dir) |lib_dir|
try std.fs.path.relative(arena, tmp_dir_path, lib_dir)
try Dir.path.relative(arena, tmp_dir_path, lib_dir)
else
null;
@ -164,7 +165,7 @@ pub fn main() !void {
var cc_child_args: std.ArrayList([]const u8) = .empty;
if (target.backend == .cbe) {
const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe|
try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe)
try Dir.path.relative(arena, tmp_dir_path, cc_zig_exe)
else
resolved_zig_exe;
@ -185,6 +186,7 @@ pub fn main() !void {
var eval: Eval = .{
.arena = arena,
.io = io,
.case = case,
.host = host,
.target = target,
@ -196,9 +198,9 @@ pub fn main() !void {
.cc_child_args = &cc_child_args,
};
try child.spawn();
try child.spawn(io);
errdefer {
_ = child.kill() catch {};
_ = child.kill(io) catch {};
}
var poller = Io.poll(arena, Eval.StreamEnum, .{
@ -228,10 +230,11 @@ pub fn main() !void {
const Eval = struct {
arena: Allocator,
io: Io,
host: std.Target,
case: Case,
target: Case.Target,
tmp_dir: std.fs.Dir,
tmp_dir: Dir,
tmp_dir_path: []const u8,
child: *std.process.Child,
allow_stderr: bool,
@ -245,17 +248,18 @@ const Eval = struct {
/// Currently this function assumes the previous updates have already been written.
fn write(eval: *Eval, update: Case.Update) void {
const io = eval.io;
for (update.changes) |full_contents| {
eval.tmp_dir.writeFile(.{
eval.tmp_dir.writeFile(io, .{
.sub_path = full_contents.name,
.data = full_contents.bytes,
}) catch |err| {
eval.fatal("failed to update '{s}': {s}", .{ full_contents.name, @errorName(err) });
eval.fatal("failed to update '{s}': {t}", .{ full_contents.name, err });
};
}
for (update.deletes) |doomed_name| {
eval.tmp_dir.deleteFile(doomed_name) catch |err| {
eval.fatal("failed to delete '{s}': {s}", .{ doomed_name, @errorName(err) });
eval.tmp_dir.deleteFile(io, doomed_name) catch |err| {
eval.fatal("failed to delete '{s}': {t}", .{ doomed_name, err });
};
}
}
@ -307,14 +311,14 @@ const Eval = struct {
}
const digest = r.takeArray(Cache.bin_digest_len) catch unreachable;
const result_dir = ".local-cache" ++ std.fs.path.sep_str ++ "o" ++ std.fs.path.sep_str ++ Cache.binToHex(digest.*);
const result_dir = ".local-cache" ++ Dir.path.sep_str ++ "o" ++ Dir.path.sep_str ++ Cache.binToHex(digest.*);
const bin_name = try std.zig.EmitArtifact.bin.cacheName(arena, .{
.root_name = "root", // corresponds to the module name "root"
.target = &eval.target.resolved,
.output_mode = .Exe,
});
const bin_path = try std.fs.path.join(arena, &.{ result_dir, bin_name });
const bin_path = try Dir.path.join(arena, &.{ result_dir, bin_name });
try eval.checkSuccessOutcome(update, bin_path, prog_node);
// This message indicates the end of the update.
@ -338,11 +342,12 @@ const Eval = struct {
}
fn checkErrorOutcome(eval: *Eval, update: Case.Update, error_bundle: std.zig.ErrorBundle) !void {
const io = eval.io;
const expected = switch (update.outcome) {
.unknown => return,
.compile_errors => |ce| ce,
.stdout, .exit_code => {
error_bundle.renderToStdErr(.{}, .auto);
try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': unexpected compile errors", .{update.name});
},
};
@ -351,7 +356,7 @@ const Eval = struct {
for (error_bundle.getMessages()) |err_idx| {
if (expected_idx == expected.errors.len) {
error_bundle.renderToStdErr(.{}, .auto);
try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': more errors than expected", .{update.name});
}
try eval.checkOneError(update, error_bundle, expected.errors[expected_idx], false, err_idx);
@ -359,7 +364,7 @@ const Eval = struct {
for (error_bundle.getNotes(err_idx)) |note_idx| {
if (expected_idx == expected.errors.len) {
error_bundle.renderToStdErr(.{}, .auto);
try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': more error notes than expected", .{update.name});
}
try eval.checkOneError(update, error_bundle, expected.errors[expected_idx], true, note_idx);
@ -368,7 +373,7 @@ const Eval = struct {
}
if (!std.mem.eql(u8, error_bundle.getCompileLogOutput(), expected.compile_log_output)) {
error_bundle.renderToStdErr(.{}, .auto);
try error_bundle.renderToStderr(io, .{}, .auto);
eval.fatal("update '{s}': unexpected compile log output", .{update.name});
}
}
@ -388,6 +393,8 @@ const Eval = struct {
const src = eb.getSourceLocation(err.src_loc);
const raw_filename = eb.nullTerminatedString(src.src_path);
const io = eval.io;
// We need to replace backslashes for consistency between platforms.
const filename = name: {
if (std.mem.indexOfScalar(u8, raw_filename, '\\') == null) break :name raw_filename;
@ -402,7 +409,7 @@ const Eval = struct {
expected.column != src.column + 1 or
!std.mem.eql(u8, expected.msg, msg))
{
eb.renderToStdErr(.{}, .auto);
eb.renderToStderr(io, .{}, .auto) catch {};
eval.fatal("update '{s}': compile error did not match expected error", .{update.name});
}
}
@ -429,8 +436,11 @@ const Eval = struct {
},
};
const io = eval.io;
var argv_buf: [2][]const u8 = undefined;
const argv: []const []const u8, const is_foreign: bool = switch (std.zig.system.getExternalExecutor(
io,
&eval.host,
&eval.target.resolved,
.{ .link_libc = eval.target.backend == .cbe },
@ -459,8 +469,7 @@ const Eval = struct {
const run_prog_node = prog_node.start("run generated executable", 0);
defer run_prog_node.end();
const result = std.process.Child.run(.{
.allocator = eval.arena,
const result = std.process.Child.run(eval.arena, io, .{
.argv = argv,
.cwd_dir = eval.tmp_dir,
.cwd = eval.tmp_dir_path,
@ -468,17 +477,17 @@ const Eval = struct {
if (is_foreign) {
// Chances are the foreign executor isn't available. Skip this evaluation.
if (eval.allow_stderr) {
std.log.warn("update '{s}': skipping execution of '{s}' via executor for foreign target '{s}': {s}", .{
std.log.warn("update '{s}': skipping execution of '{s}' via executor for foreign target '{s}': {t}", .{
update.name,
binary_path,
try eval.target.resolved.zigTriple(eval.arena),
@errorName(err),
err,
});
}
return;
}
eval.fatal("update '{s}': failed to run the generated executable '{s}': {s}", .{
update.name, binary_path, @errorName(err),
eval.fatal("update '{s}': failed to run the generated executable '{s}': {t}", .{
update.name, binary_path, err,
});
};
@ -514,11 +523,12 @@ const Eval = struct {
}
fn requestUpdate(eval: *Eval) !void {
const io = eval.io;
const header: std.zig.Client.Message.Header = .{
.tag = .update,
.bytes_len = 0,
};
var w = eval.child.stdin.?.writer(&.{});
var w = eval.child.stdin.?.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => return w.err.?,
};
@ -552,16 +562,13 @@ const Eval = struct {
try eval.cc_child_args.appendSlice(eval.arena, &.{ out_path, c_path });
defer eval.cc_child_args.items.len -= 2;
const result = std.process.Child.run(.{
.allocator = eval.arena,
const result = std.process.Child.run(eval.arena, eval.io, .{
.argv = eval.cc_child_args.items,
.cwd_dir = eval.tmp_dir,
.cwd = eval.tmp_dir_path,
.progress_node = child_prog_node,
}) catch |err| {
eval.fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
update.name, c_path, @errorName(err),
});
eval.fatal("update '{s}': failed to spawn zig cc for '{s}': {t}", .{ update.name, c_path, err });
};
switch (result.term) {
.Exited => |code| if (code != 0) {
@ -588,12 +595,13 @@ const Eval = struct {
}
fn fatal(eval: *Eval, comptime fmt: []const u8, args: anytype) noreturn {
eval.tmp_dir.close();
const io = eval.io;
eval.tmp_dir.close(io);
if (!eval.preserve_tmp_on_fatal) {
// Kill the child since it holds an open handle to its CWD which is the tmp dir path
_ = eval.child.kill() catch {};
std.fs.cwd().deleteTree(eval.tmp_dir_path) catch |err| {
std.log.warn("failed to delete tree '{s}': {s}", .{ eval.tmp_dir_path, @errorName(err) });
_ = eval.child.kill(io) catch {};
Dir.cwd().deleteTree(io, eval.tmp_dir_path) catch |err| {
std.log.warn("failed to delete tree '{s}': {t}", .{ eval.tmp_dir_path, err });
};
}
std.process.fatal(fmt, args);
@ -759,7 +767,7 @@ const Case = struct {
if (last_update.outcome != .unknown) fatal("line {d}: conflicting expect directive", .{line_n});
last_update.outcome = .{
.stdout = std.zig.string_literal.parseAlloc(arena, val) catch |err| {
fatal("line {d}: bad string literal: {s}", .{ line_n, @errorName(err) });
fatal("line {d}: bad string literal: {t}", .{ line_n, err });
},
};
} else if (std.mem.eql(u8, key, "expect_error")) {
@ -833,27 +841,29 @@ const Case = struct {
fn requestExit(child: *std.process.Child, eval: *Eval) void {
if (child.stdin == null) return;
const io = eval.io;
const header: std.zig.Client.Message.Header = .{
.tag = .exit,
.bytes_len = 0,
};
var w = eval.child.stdin.?.writer(&.{});
var w = eval.child.stdin.?.writer(io, &.{});
w.interface.writeStruct(header, .little) catch |err| switch (err) {
error.WriteFailed => switch (w.err.?) {
error.BrokenPipe => {},
else => |e| eval.fatal("failed to send exit: {s}", .{@errorName(e)}),
else => |e| eval.fatal("failed to send exit: {t}", .{e}),
},
};
// Send EOF to stdin.
child.stdin.?.close();
child.stdin.?.close(io);
child.stdin = null;
}
fn waitChild(child: *std.process.Child, eval: *Eval) void {
const io = eval.io;
requestExit(child, eval);
const term = child.wait() catch |err| eval.fatal("child process failed: {s}", .{@errorName(err)});
const term = child.wait(io) catch |err| eval.fatal("child process failed: {t}", .{err});
switch (term) {
.Exited => |code| if (code != 0) eval.fatal("compiler failed with code {d}", .{code}),
.Signal, .Stopped, .Unknown => eval.fatal("compiler terminated unexpectedly", .{}),

View file

@ -1,13 +1,16 @@
const std = @import("std");
const builtin = @import("builtin");
const fs = std.fs;
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const print = std.debug.print;
const mem = std.mem;
const testing = std.testing;
const Allocator = std.mem.Allocator;
const max_doc_file_size = 10 * 1024 * 1024;
const fatal = std.process.fatal;
const max_doc_file_size = 10 * 1024 * 1024;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
@ -23,16 +26,16 @@ pub fn main() !void {
defer threaded.deinit();
const io = threaded.io();
var in_file = try fs.cwd().openFile(input_file, .{ .mode = .read_only });
defer in_file.close();
var in_file = try Dir.cwd().openFile(input_file, .{ .mode = .read_only });
defer in_file.close(io);
var out_file = try fs.cwd().createFile(output_file, .{});
defer out_file.close();
var out_file = try Dir.cwd().createFile(output_file, .{});
defer out_file.close(io);
var out_file_buffer: [4096]u8 = undefined;
var out_file_writer = out_file.writer(&out_file_buffer);
var out_dir = try fs.cwd().openDir(fs.path.dirname(output_file).?, .{});
defer out_dir.close();
var out_dir = try Dir.cwd().openDir(Dir.path.dirname(output_file).?, .{});
defer out_dir.close(io);
var in_file_reader = in_file.reader(io, &.{});
const input_file_bytes = try in_file_reader.interface.allocRemaining(arena, .unlimited);
@ -266,7 +269,7 @@ const Code = struct {
};
};
fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype) !void {
fn walk(arena: Allocator, io: Io, tokenizer: *Tokenizer, out_dir: Dir, w: anytype) !void {
while (true) {
const token = tokenizer.next();
switch (token.id) {
@ -387,7 +390,7 @@ fn walk(arena: Allocator, tokenizer: *Tokenizer, out_dir: std.fs.Dir, w: anytype
var file = out_dir.createFile(basename, .{ .exclusive = true }) catch |err| {
fatal("unable to create file '{s}': {s}", .{ name, @errorName(err) });
};
defer file.close();
defer file.close(io);
var file_buffer: [1024]u8 = undefined;
var file_writer = file.writer(&file_buffer);
const code = &file_writer.interface;

View file

@ -12,6 +12,8 @@
//! You'll then have to manually update Zig source repo with these new files.
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const Arch = std.Target.Cpu.Arch;
const Abi = std.Target.Abi;
const OsTag = std.Target.Os.Tag;
@ -128,6 +130,11 @@ const LibCVendor = enum {
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const allocator = arena.allocator();
var threaded: Io.Threaded = .init(allocator);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(allocator);
var search_paths = std.array_list.Managed([]const u8).init(allocator);
var opt_out_dir: ?[]const u8 = null;
@ -232,28 +239,28 @@ pub fn main() !void {
=> &[_][]const u8{ search_path, libc_dir, "usr", "include" },
.musl => &[_][]const u8{ search_path, libc_dir, "usr", "local", "musl", "include" },
};
const target_include_dir = try std.fs.path.join(allocator, sub_path);
const target_include_dir = try Dir.path.join(allocator, sub_path);
var dir_stack = std.array_list.Managed([]const u8).init(allocator);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
var dir = Dir.cwd().openDir(io, full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
error.FileNotFound => continue :search,
error.AccessDenied => continue :search,
else => return err,
};
defer dir.close();
defer dir.close(io);
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
const full_path = try Dir.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),
.file, .sym_link => {
const rel_path = try std.fs.path.relative(allocator, target_include_dir, full_path);
const rel_path = try Dir.path.relative(allocator, target_include_dir, full_path);
const max_size = 2 * 1024 * 1024 * 1024;
const raw_bytes = try std.fs.cwd().readFileAlloc(full_path, allocator, .limited(max_size));
const raw_bytes = try Dir.cwd().readFileAlloc(full_path, allocator, .limited(max_size));
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try allocator.alloc(u8, 32);
@ -314,7 +321,7 @@ pub fn main() !void {
total_bytes,
total_bytes - max_bytes_saved,
});
try std.fs.cwd().makePath(out_dir);
try Dir.cwd().createDirPath(io, out_dir);
var missed_opportunity_bytes: usize = 0;
// iterate path_table. for each path, put all the hashes into a list. sort by hit_count.
@ -334,9 +341,9 @@ pub fn main() !void {
const best_contents = contents_list.pop().?;
if (best_contents.hit_count > 1) {
// worth it to make it generic
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = best_contents.bytes });
const full_path = try Dir.path.join(allocator, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = best_contents.bytes });
best_contents.is_generic = true;
while (contents_list.pop()) |contender| {
if (contender.hit_count > 1) {
@ -355,9 +362,9 @@ pub fn main() !void {
if (contents.is_generic) continue;
const dest_target = hash_kv.key_ptr.*;
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ out_dir, dest_target, path_kv.key_ptr.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = contents.bytes });
const full_path = try Dir.path.join(allocator, &[_][]const u8{ out_dir, dest_target, path_kv.key_ptr.* });
try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = contents.bytes });
}
}
}

View file

@ -15,6 +15,8 @@
//! You'll then have to manually update Zig source repo with these new files.
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const Arch = std.Target.Cpu.Arch;
const Abi = std.Target.Abi;
const assert = std.debug.assert;
@ -142,6 +144,11 @@ const PathTable = std.StringHashMap(*TargetToHash);
pub fn main() !void {
var arena_state = std.heap.ArenaAllocator.init(std.heap.page_allocator);
const arena = arena_state.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
var search_paths = std.array_list.Managed([]const u8).init(arena);
var opt_out_dir: ?[]const u8 = null;
@ -183,30 +190,30 @@ pub fn main() !void {
.arch = linux_target.arch,
};
search: for (search_paths.items) |search_path| {
const target_include_dir = try std.fs.path.join(arena, &.{
const target_include_dir = try Dir.path.join(arena, &.{
search_path, linux_target.name, "include",
});
var dir_stack = std.array_list.Managed([]const u8).init(arena);
try dir_stack.append(target_include_dir);
while (dir_stack.pop()) |full_dir_name| {
var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
var dir = Dir.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
error.FileNotFound => continue :search,
error.AccessDenied => continue :search,
else => return err,
};
defer dir.close();
defer dir.close(io);
var dir_it = dir.iterate();
while (try dir_it.next()) |entry| {
const full_path = try std.fs.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
const full_path = try Dir.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
switch (entry.kind) {
.directory => try dir_stack.append(full_path),
.file => {
const rel_path = try std.fs.path.relative(arena, target_include_dir, full_path);
const rel_path = try Dir.path.relative(arena, target_include_dir, full_path);
const max_size = 2 * 1024 * 1024 * 1024;
const raw_bytes = try std.fs.cwd().readFileAlloc(full_path, arena, .limited(max_size));
const raw_bytes = try Dir.cwd().readFileAlloc(full_path, arena, .limited(max_size));
const trimmed = std.mem.trim(u8, raw_bytes, " \r\n\t");
total_bytes += raw_bytes.len;
const hash = try arena.alloc(u8, 32);
@ -253,7 +260,7 @@ pub fn main() !void {
total_bytes,
total_bytes - max_bytes_saved,
});
try std.fs.cwd().makePath(out_dir);
try Dir.cwd().createDirPath(io, out_dir);
var missed_opportunity_bytes: usize = 0;
// iterate path_table. for each path, put all the hashes into a list. sort by hit_count.
@ -273,9 +280,9 @@ pub fn main() !void {
const best_contents = contents_list.pop().?;
if (best_contents.hit_count > 1) {
// worth it to make it generic
const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = best_contents.bytes });
const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, generic_name, path_kv.key_ptr.* });
try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = best_contents.bytes });
best_contents.is_generic = true;
while (contents_list.pop()) |contender| {
if (contender.hit_count > 1) {
@ -299,9 +306,9 @@ pub fn main() !void {
else => @tagName(dest_target.arch),
};
const out_subpath = try std.fmt.allocPrint(arena, "{s}-linux-any", .{arch_name});
const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, out_subpath, path_kv.key_ptr.* });
try std.fs.cwd().makePath(std.fs.path.dirname(full_path).?);
try std.fs.cwd().writeFile(.{ .sub_path = full_path, .data = contents.bytes });
const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, out_subpath, path_kv.key_ptr.* });
try Dir.cwd().createDirPath(io, Dir.path.dirname(full_path).?);
try Dir.cwd().writeFile(io, .{ .sub_path = full_path, .data = contents.bytes });
}
}
@ -316,8 +323,8 @@ pub fn main() !void {
"any-linux-any/linux/netfilter_ipv6/ip6t_HL.h",
};
for (bad_files) |bad_file| {
const full_path = try std.fs.path.join(arena, &[_][]const u8{ out_dir, bad_file });
try std.fs.cwd().deleteFile(full_path);
const full_path = try Dir.path.join(arena, &[_][]const u8{ out_dir, bad_file });
try Dir.cwd().deleteFile(io, full_path);
}
}

View file

@ -1,6 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
const fs = std.fs;
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const mem = std.mem;
const json = std.json;
const assert = std.debug.assert;
@ -1927,26 +1929,26 @@ pub fn main() anyerror!void {
// there shouldn't be any more argument after the optional filter
if (args.skip()) usageAndExit(args0, 1);
var zig_src_dir = try fs.cwd().openDir(zig_src_root, .{});
defer zig_src_dir.close();
var zig_src_dir = try Dir.cwd().openDir(io, zig_src_root, .{});
defer zig_src_dir.close(io);
const root_progress = std.Progress.start(.{ .estimated_total_items = targets.len });
const root_progress = std.Progress.start(io, .{ .estimated_total_items = targets.len });
defer root_progress.end();
var group: std.Io.Group = .init;
var group: Io.Group = .init;
defer group.cancel(io);
for (targets) |target| {
if (filter) |zig_name| {
if (!std.mem.eql(u8, target.zig_name, zig_name)) continue;
}
group.async(io, processOneTarget, .{.{
group.async(io, processOneTarget, .{ io, .{
.llvm_tblgen_exe = llvm_tblgen_exe,
.llvm_src_root = llvm_src_root,
.zig_src_dir = zig_src_dir,
.root_progress = root_progress,
.target = target,
}});
} });
}
group.wait(io);
@ -1955,12 +1957,12 @@ pub fn main() anyerror!void {
const Job = struct {
llvm_tblgen_exe: []const u8,
llvm_src_root: []const u8,
zig_src_dir: std.fs.Dir,
zig_src_dir: Dir,
root_progress: std.Progress.Node,
target: ArchTarget,
};
fn processOneTarget(job: Job) void {
fn processOneTarget(io: Io, job: Job) void {
errdefer |err| std.debug.panic("panic: {s}", .{@errorName(err)});
const target = job.target;
@ -2240,12 +2242,12 @@ fn processOneTarget(job: Job) void {
const render_progress = progress_node.start("rendering Zig code", 0);
var target_dir = try job.zig_src_dir.openDir("lib/std/Target", .{});
defer target_dir.close();
var target_dir = try job.zig_src_dir.openDir(io, "lib/std/Target", .{});
defer target_dir.close(io);
const zig_code_basename = try std.fmt.allocPrint(arena, "{s}.zig", .{target.zig_name});
var zig_code_file = try target_dir.createFile(zig_code_basename, .{});
defer zig_code_file.close();
var zig_code_file = try target_dir.createFile(io, zig_code_basename, .{});
defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);

View file

@ -1,5 +1,6 @@
const std = @import("std");
const fs = std.fs;
const Io = std.Io;
const Dir = std.Io.Dir;
const mem = std.mem;
const ascii = std.ascii;
@ -10,25 +11,29 @@ pub fn main() anyerror!void {
defer arena_state.deinit();
const arena = arena_state.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
if (args.len <= 1) printUsageAndExit(args[0]);
const zig_src_root = args[1];
if (mem.startsWith(u8, zig_src_root, "-")) printUsageAndExit(args[0]);
var zig_src_dir = try fs.cwd().openDir(zig_src_root, .{});
defer zig_src_dir.close();
var zig_src_dir = try Dir.cwd().openDir(io, zig_src_root, .{});
defer zig_src_dir.close(io);
const hash_sub_path = try fs.path.join(arena, &.{ "lib", "std", "hash" });
var hash_target_dir = try zig_src_dir.makeOpenPath(hash_sub_path, .{});
defer hash_target_dir.close();
const hash_sub_path = try Dir.path.join(arena, &.{ "lib", "std", "hash" });
var hash_target_dir = try zig_src_dir.createDirPathOpen(io, hash_sub_path, .{});
defer hash_target_dir.close(io);
const crc_sub_path = try fs.path.join(arena, &.{ "lib", "std", "hash", "crc" });
var crc_target_dir = try zig_src_dir.makeOpenPath(crc_sub_path, .{});
defer crc_target_dir.close();
const crc_sub_path = try Dir.path.join(arena, &.{ "lib", "std", "hash", "crc" });
var crc_target_dir = try zig_src_dir.createDirPathOpen(io, crc_sub_path, .{});
defer crc_target_dir.close(io);
var zig_code_file = try hash_target_dir.createFile("crc.zig", .{});
defer zig_code_file.close();
var zig_code_file = try hash_target_dir.createFile(io, "crc.zig", .{});
defer zig_code_file.close(io);
var zig_code_file_buffer: [4096]u8 = undefined;
var zig_code_file_writer = zig_code_file.writer(&zig_code_file_buffer);
const code_writer = &zig_code_file_writer.interface;
@ -51,8 +56,8 @@ pub fn main() anyerror!void {
\\
);
var zig_test_file = try crc_target_dir.createFile("test.zig", .{});
defer zig_test_file.close();
var zig_test_file = try crc_target_dir.createFile(io, "test.zig", .{});
defer zig_test_file.close(io);
var zig_test_file_buffer: [4096]u8 = undefined;
var zig_test_file_writer = zig_test_file.writer(&zig_test_file_buffer);
const test_writer = &zig_test_file_writer.interface;

View file

@ -5,6 +5,7 @@
//! `zig run tools/update_freebsd_libc.zig -- ~/Downloads/freebsd-src .`
const std = @import("std");
const Io = std.Io;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from FreeBSD.
@ -16,22 +17,24 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const freebsd_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/freebsd", .{zig_src_path});
var dest_dir = std.fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {s}", .{
dest_dir_path, @errorName(err),
});
var dest_dir = std.fs.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
defer dest_dir.close();
defer dest_dir.close(io);
var freebsd_src_dir = try std.fs.cwd().openDir(freebsd_src_path, .{});
defer freebsd_src_dir.close();
defer freebsd_src_dir.close(io);
// Copy updated files from upstream.
{
@ -57,7 +60,7 @@ pub fn main() !void {
@errorName(err),
});
if (err == error.FileNotFound) {
try dest_dir.deleteFile(entry.path);
try dest_dir.deleteFile(io, entry.path);
}
};
}

View file

@ -7,9 +7,11 @@
//! `zig run ../tools/update_glibc.zig -- ~/Downloads/glibc ..`
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
const mem = std.mem;
const log = std.log;
const fs = std.fs;
const fatal = std.process.fatal;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from glibc.
@ -41,21 +43,23 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const glibc_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/glibc", .{zig_src_path});
var dest_dir = fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
fatal("unable to open destination directory '{s}': {s}", .{
dest_dir_path, @errorName(err),
});
var dest_dir = Dir.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
fatal("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
};
defer dest_dir.close();
defer dest_dir.close(io);
var glibc_src_dir = try fs.cwd().openDir(glibc_src_path, .{});
defer glibc_src_dir.close();
var glibc_src_dir = try Dir.cwd().openDir(io, glibc_src_path, .{});
defer glibc_src_dir.close(io);
// Copy updated files from upstream.
{
@ -73,13 +77,11 @@ pub fn main() !void {
}
glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
glibc_src_path, entry.path,
dest_dir_path, entry.path,
@errorName(err),
log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
glibc_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
try dest_dir.deleteFile(entry.path);
try dest_dir.deleteFile(io, entry.path);
}
};
}
@ -88,20 +90,18 @@ pub fn main() !void {
// Warn about duplicated files inside glibc/include/* that can be omitted
// because they are already in generic-glibc/*.
var include_dir = dest_dir.openDir("include", .{ .iterate = true }) catch |err| {
fatal("unable to open directory '{s}/include': {s}", .{
dest_dir_path, @errorName(err),
});
var include_dir = dest_dir.openDir(io, "include", .{ .iterate = true }) catch |err| {
fatal("unable to open directory '{s}/include': {t}", .{ dest_dir_path, err });
};
defer include_dir.close();
defer include_dir.close(io);
const generic_glibc_path = try std.fmt.allocPrint(
arena,
"{s}/lib/libc/include/generic-glibc",
.{zig_src_path},
);
var generic_glibc_dir = try fs.cwd().openDir(generic_glibc_path, .{});
defer generic_glibc_dir.close();
var generic_glibc_dir = try Dir.cwd().openDir(io, generic_glibc_path, .{});
defer generic_glibc_dir.close(io);
var walker = try include_dir.walk(arena);
defer walker.deinit();
@ -146,8 +146,3 @@ pub fn main() !void {
}
}
}
fn fatal(comptime format: []const u8, args: anytype) noreturn {
log.err(format, args);
std.process.exit(1);
}

View file

@ -1,35 +1,41 @@
const std = @import("std");
const Io = std.Io;
const Dir = std.Io.Dir;
pub fn main() !void {
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const zig_src_lib_path = args[1];
const mingw_src_path = args[2];
const dest_mingw_crt_path = try std.fs.path.join(arena, &.{
const dest_mingw_crt_path = try Dir.path.join(arena, &.{
zig_src_lib_path, "libc", "mingw",
});
const src_mingw_crt_path = try std.fs.path.join(arena, &.{
const src_mingw_crt_path = try Dir.path.join(arena, &.{
mingw_src_path, "mingw-w64-crt",
});
// Update only the set of existing files we have already chosen to include
// in zig's installation.
var dest_crt_dir = std.fs.cwd().openDir(dest_mingw_crt_path, .{ .iterate = true }) catch |err| {
var dest_crt_dir = Dir.cwd().openDir(io, dest_mingw_crt_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ dest_mingw_crt_path, @errorName(err) });
std.process.exit(1);
};
defer dest_crt_dir.close();
defer dest_crt_dir.close(io);
var src_crt_dir = std.fs.cwd().openDir(src_mingw_crt_path, .{ .iterate = true }) catch |err| {
var src_crt_dir = Dir.cwd().openDir(io, src_mingw_crt_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ src_mingw_crt_path, @errorName(err) });
std.process.exit(1);
};
defer src_crt_dir.close();
defer src_crt_dir.close(io);
{
var walker = try dest_crt_dir.walk(arena);
@ -49,11 +55,11 @@ pub fn main() !void {
if (!keep) {
std.log.warn("deleting {s}", .{entry.path});
try dest_crt_dir.deleteFile(entry.path);
try dest_crt_dir.deleteFile(io, entry.path);
}
},
else => {
std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
},
};
@ -63,24 +69,24 @@ pub fn main() !void {
}
{
const dest_mingw_winpthreads_path = try std.fs.path.join(arena, &.{
const dest_mingw_winpthreads_path = try Dir.path.join(arena, &.{
zig_src_lib_path, "libc", "mingw", "winpthreads",
});
const src_mingw_libraries_winpthreads_src_path = try std.fs.path.join(arena, &.{
const src_mingw_libraries_winpthreads_src_path = try Dir.path.join(arena, &.{
mingw_src_path, "mingw-w64-libraries", "winpthreads", "src",
});
var dest_winpthreads_dir = std.fs.cwd().openDir(dest_mingw_winpthreads_path, .{ .iterate = true }) catch |err| {
var dest_winpthreads_dir = Dir.cwd().openDir(io, dest_mingw_winpthreads_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ dest_mingw_winpthreads_path, @errorName(err) });
std.process.exit(1);
};
defer dest_winpthreads_dir.close();
defer dest_winpthreads_dir.close(io);
var src_winpthreads_dir = std.fs.cwd().openDir(src_mingw_libraries_winpthreads_src_path, .{ .iterate = true }) catch |err| {
var src_winpthreads_dir = Dir.cwd().openDir(io, src_mingw_libraries_winpthreads_src_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open directory '{s}': {s}", .{ src_mingw_libraries_winpthreads_src_path, @errorName(err) });
std.process.exit(1);
};
defer src_winpthreads_dir.close();
defer src_winpthreads_dir.close(io);
{
var walker = try dest_winpthreads_dir.walk(arena);
@ -94,10 +100,10 @@ pub fn main() !void {
src_winpthreads_dir.copyFile(entry.path, dest_winpthreads_dir, entry.path, .{}) catch |err| switch (err) {
error.FileNotFound => {
std.log.warn("deleting {s}", .{entry.path});
try dest_winpthreads_dir.deleteFile(entry.path);
try dest_winpthreads_dir.deleteFile(io, entry.path);
},
else => {
std.log.err("unable to copy {s}: {s}", .{ entry.path, @errorName(err) });
std.log.err("unable to copy {s}: {t}", .{ entry.path, err });
fail = true;
},
};
@ -164,7 +170,7 @@ pub fn main() !void {
const kept_crt_files = [_][]const u8{
"COPYING",
"include" ++ std.fs.path.sep_str ++ "config.h",
"include" ++ Dir.path.sep_str ++ "config.h",
};
const def_exts = [_][]const u8{

View file

@ -5,6 +5,7 @@
//! `zig run tools/update_netbsd_libc.zig -- ~/Downloads/netbsd-src .`
const std = @import("std");
const Io = std.Io;
const exempt_files = [_][]const u8{
// This file is maintained by a separate project and does not come from NetBSD.
@ -16,22 +17,24 @@ pub fn main() !void {
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: Io.Threaded = .init(arena);
defer threaded.deinit();
const io = threaded.io();
const args = try std.process.argsAlloc(arena);
const netbsd_src_path = args[1];
const zig_src_path = args[2];
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/netbsd", .{zig_src_path});
var dest_dir = std.fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {s}", .{
dest_dir_path, @errorName(err),
});
var dest_dir = std.fs.cwd().openDir(io, dest_dir_path, .{ .iterate = true }) catch |err| {
std.log.err("unable to open destination directory '{s}': {t}", .{ dest_dir_path, err });
std.process.exit(1);
};
defer dest_dir.close();
defer dest_dir.close(io);
var netbsd_src_dir = try std.fs.cwd().openDir(netbsd_src_path, .{});
defer netbsd_src_dir.close();
var netbsd_src_dir = try std.fs.cwd().openDir(io, netbsd_src_path, .{});
defer netbsd_src_dir.close(io);
// Copy updated files from upstream.
{
@ -51,13 +54,11 @@ pub fn main() !void {
});
netbsd_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
netbsd_src_path, entry.path,
dest_dir_path, entry.path,
@errorName(err),
std.log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {t}", .{
netbsd_src_path, entry.path, dest_dir_path, entry.path, err,
});
if (err == error.FileNotFound) {
try dest_dir.deleteFile(entry.path);
try dest_dir.deleteFile(io, entry.path);
}
};
}