mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-03-08 02:24:33 +01:00
std.process.run: use Io.File.MultiReader
and delete the special-cased function
This commit is contained in:
parent
b2816f2698
commit
687123a85e
2 changed files with 28 additions and 91 deletions
|
|
@ -453,16 +453,16 @@ pub fn spawnPath(io: Io, dir: Io.Dir, options: SpawnOptions) SpawnError!Child {
|
|||
return io.vtable.processSpawnPath(io.userdata, dir, options);
|
||||
}
|
||||
|
||||
pub const RunError = SpawnError || Child.CollectOutputError;
|
||||
pub const RunError = SpawnError || error{
|
||||
StreamTooLong,
|
||||
} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error;
|
||||
|
||||
pub const RunOptions = struct {
|
||||
argv: []const []const u8,
|
||||
stderr_limit: Io.Limit = .unlimited,
|
||||
stdout_limit: Io.Limit = .unlimited,
|
||||
/// How many bytes to initially allocate for stderr.
|
||||
stderr_reserve_amount: usize = 1,
|
||||
/// How many bytes to initially allocate for stdout.
|
||||
stdout_reserve_amount: usize = 1,
|
||||
/// How many bytes to initially allocate for stderr and stdout.
|
||||
reserve_amount: usize = 64,
|
||||
|
||||
/// Set to change the current working directory when spawning the child process.
|
||||
cwd: ?[]const u8 = null,
|
||||
|
|
@ -516,29 +516,36 @@ pub fn run(gpa: Allocator, io: Io, options: RunOptions) RunError!RunResult {
|
|||
});
|
||||
defer child.kill(io);
|
||||
|
||||
var stdout: std.ArrayList(u8) = .empty;
|
||||
defer stdout.deinit(gpa);
|
||||
var stderr: std.ArrayList(u8) = .empty;
|
||||
defer stderr.deinit(gpa);
|
||||
var multi_reader_buffer: Io.File.MultiReader.Buffer(2) = undefined;
|
||||
var multi_reader: Io.File.MultiReader = undefined;
|
||||
multi_reader.init(gpa, io, multi_reader_buffer.toStreams(), &.{ child.stdout.?, child.stderr.? });
|
||||
defer multi_reader.deinit();
|
||||
|
||||
try stdout.ensureUnusedCapacity(gpa, options.stdout_reserve_amount);
|
||||
try stderr.ensureUnusedCapacity(gpa, options.stderr_reserve_amount);
|
||||
const stdout_reader = multi_reader.reader(0);
|
||||
const stderr_reader = multi_reader.reader(1);
|
||||
|
||||
try child.collectOutput(io, .{
|
||||
.allocator = gpa,
|
||||
.stdout = &stdout,
|
||||
.stderr = &stderr,
|
||||
.stdout_limit = options.stdout_limit,
|
||||
.stderr_limit = options.stderr_limit,
|
||||
.timeout = options.timeout,
|
||||
});
|
||||
while (multi_reader.fill(options.reserve_amount, options.timeout)) |_| {
|
||||
if (options.stdout_limit.toInt()) |limit| {
|
||||
if (stdout_reader.buffered().len > limit)
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
if (options.stderr_limit.toInt()) |limit| {
|
||||
if (stderr_reader.buffered().len > limit)
|
||||
return error.StreamTooLong;
|
||||
}
|
||||
} else |err| switch (err) {
|
||||
error.EndOfStream => {},
|
||||
else => |e| return e,
|
||||
}
|
||||
|
||||
try multi_reader.checkAnyError();
|
||||
|
||||
const term = try child.wait(io);
|
||||
|
||||
const stdout_slice = try stdout.toOwnedSlice(gpa);
|
||||
const stdout_slice = try multi_reader.toOwnedSlice(0);
|
||||
errdefer gpa.free(stdout_slice);
|
||||
|
||||
const stderr_slice = try stderr.toOwnedSlice(gpa);
|
||||
const stderr_slice = try multi_reader.toOwnedSlice(1);
|
||||
errdefer gpa.free(stderr_slice);
|
||||
|
||||
return .{
|
||||
|
|
|
|||
|
|
@ -124,73 +124,3 @@ pub fn wait(child: *Child, io: Io) WaitError!Term {
|
|||
assert(child.id != null);
|
||||
return io.vtable.childWait(io.userdata, child);
|
||||
}
|
||||
|
||||
pub const CollectOutputError = error{
|
||||
StreamTooLong,
|
||||
} || Io.ConcurrentError || Allocator.Error || Io.File.Reader.Error || Io.Timeout.Error;
|
||||
|
||||
pub const CollectOutputOptions = struct {
|
||||
stdout: *std.ArrayList(u8),
|
||||
stderr: *std.ArrayList(u8),
|
||||
/// Used for `stdout` and `stderr`. If not provided, only the existing
|
||||
/// capacity will be used.
|
||||
allocator: ?Allocator = null,
|
||||
stdout_limit: Io.Limit = .unlimited,
|
||||
stderr_limit: Io.Limit = .unlimited,
|
||||
timeout: Io.Timeout = .none,
|
||||
};
|
||||
|
||||
/// Collect the output from the process's stdout and stderr. Will return once
|
||||
/// all output has been collected. This does not mean that the process has
|
||||
/// ended. `wait` should still be called to wait for and clean up the process.
|
||||
///
|
||||
/// The process must have been started with stdout and stderr set to
|
||||
/// `process.SpawnOptions.StdIo.pipe`.
|
||||
pub fn collectOutput(child: *const Child, io: Io, options: CollectOutputOptions) CollectOutputError!void {
|
||||
const files: [2]Io.File = .{ child.stdout.?, child.stderr.? };
|
||||
const lists: [2]*std.ArrayList(u8) = .{ options.stdout, options.stderr };
|
||||
const limits: [2]Io.Limit = .{ options.stdout_limit, options.stderr_limit };
|
||||
var reads: [2]Io.Operation = undefined;
|
||||
var vecs: [2][1][]u8 = undefined;
|
||||
var ring: [2]u32 = undefined;
|
||||
var batch: Io.Batch = .init(&reads, &ring);
|
||||
defer {
|
||||
batch.cancel(io);
|
||||
while (batch.next()) |op| {
|
||||
lists[op].items.len += reads[op].file_read_streaming.status.result catch continue;
|
||||
}
|
||||
}
|
||||
var remaining: usize = 0;
|
||||
for (0.., &reads, &lists, &files, &vecs) |op, *read, list, file, *vec| {
|
||||
if (options.allocator) |gpa| try list.ensureUnusedCapacity(gpa, 1);
|
||||
const cap = list.unusedCapacitySlice();
|
||||
if (cap.len == 0) return error.StreamTooLong;
|
||||
vec[0] = cap;
|
||||
read.* = .{ .file_read_streaming = .{
|
||||
.file = file,
|
||||
.data = vec,
|
||||
} };
|
||||
batch.add(op);
|
||||
remaining += 1;
|
||||
}
|
||||
while (remaining > 0) {
|
||||
try batch.wait(io, options.timeout);
|
||||
while (batch.next()) |op| {
|
||||
const n = reads[op].file_read_streaming.status.result catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
remaining -= 1;
|
||||
continue;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
lists[op].items.len += n;
|
||||
if (lists[op].items.len > @intFromEnum(limits[op])) return error.StreamTooLong;
|
||||
if (options.allocator) |gpa| try lists[op].ensureUnusedCapacity(gpa, 1);
|
||||
const cap = lists[op].unusedCapacitySlice();
|
||||
if (cap.len == 0) return error.StreamTooLong;
|
||||
vecs[op][0] = cap;
|
||||
reads[op].file_read_streaming.status = .{ .unstarted = {} };
|
||||
batch.add(op);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue