mirror of
https://codeberg.org/ziglang/zig.git
synced 2026-03-08 04:24:33 +01:00
Reviewed-on: https://codeberg.org/ziglang/zig/pulls/31205 Reviewed-by: Andrew Kelley <andrew@ziglang.org>
1853 lines
76 KiB
Zig
1853 lines
76 KiB
Zig
const runner = @This();
|
|
const builtin = @import("builtin");
|
|
|
|
const std = @import("std");
|
|
const Io = std.Io;
|
|
const assert = std.debug.assert;
|
|
const fmt = std.fmt;
|
|
const mem = std.mem;
|
|
const process = std.process;
|
|
const File = std.Io.File;
|
|
const Step = std.Build.Step;
|
|
const Watch = std.Build.Watch;
|
|
const WebServer = std.Build.WebServer;
|
|
const Allocator = std.mem.Allocator;
|
|
const fatal = std.process.fatal;
|
|
const Writer = std.Io.Writer;
|
|
|
|
pub const root = @import("@build");
|
|
pub const dependencies = @import("@dependencies");
|
|
|
|
pub const std_options: std.Options = .{
|
|
.side_channels_mitigations = .none,
|
|
.http_disable_tls = true,
|
|
};
|
|
|
|
pub fn main(init: process.Init.Minimal) !void {
|
|
// The build runner is often short-lived, but thanks to `--watch` and `--webui`, that's not
|
|
// always the case. So, we do need a true gpa for some things.
|
|
var debug_gpa_state: std.heap.DebugAllocator(.{}) = .init;
|
|
defer _ = debug_gpa_state.deinit();
|
|
const gpa = debug_gpa_state.allocator();
|
|
|
|
var threaded: std.Io.Threaded = .init(gpa, .{
|
|
.environ = init.environ,
|
|
.argv0 = .init(init.args),
|
|
});
|
|
defer threaded.deinit();
|
|
const io = threaded.io();
|
|
|
|
// ...but we'll back our arena by `std.heap.page_allocator` for efficiency.
|
|
var arena_instance: std.heap.ArenaAllocator = .init(std.heap.page_allocator);
|
|
defer arena_instance.deinit();
|
|
const arena = arena_instance.allocator();
|
|
|
|
const args = try init.args.toSlice(arena);
|
|
|
|
// skip my own exe name
|
|
var arg_idx: usize = 1;
|
|
|
|
const zig_exe = nextArg(args, &arg_idx) orelse fatal("missing zig compiler path", .{});
|
|
const zig_lib_dir = nextArg(args, &arg_idx) orelse fatal("missing zig lib directory path", .{});
|
|
const build_root = nextArg(args, &arg_idx) orelse fatal("missing build root directory path", .{});
|
|
const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{});
|
|
const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{});
|
|
|
|
const cwd: Io.Dir = .cwd();
|
|
|
|
const zig_lib_directory: std.Build.Cache.Directory = .{
|
|
.path = zig_lib_dir,
|
|
.handle = try cwd.openDir(io, zig_lib_dir, .{}),
|
|
};
|
|
|
|
const build_root_directory: std.Build.Cache.Directory = .{
|
|
.path = build_root,
|
|
.handle = try cwd.openDir(io, build_root, .{}),
|
|
};
|
|
|
|
const local_cache_directory: std.Build.Cache.Directory = .{
|
|
.path = cache_root,
|
|
.handle = try cwd.createDirPathOpen(io, cache_root, .{}),
|
|
};
|
|
|
|
const global_cache_directory: std.Build.Cache.Directory = .{
|
|
.path = global_cache_root,
|
|
.handle = try cwd.createDirPathOpen(io, global_cache_root, .{}),
|
|
};
|
|
|
|
var graph: std.Build.Graph = .{
|
|
.io = io,
|
|
.arena = arena,
|
|
.cache = .{
|
|
.io = io,
|
|
.gpa = gpa,
|
|
.manifest_dir = try local_cache_directory.handle.createDirPathOpen(io, "h", .{}),
|
|
.cwd = try process.currentPathAlloc(io, arena),
|
|
},
|
|
.zig_exe = zig_exe,
|
|
.environ_map = try init.environ.createMap(arena),
|
|
.global_cache_root = global_cache_directory,
|
|
.zig_lib_directory = zig_lib_directory,
|
|
.host = .{
|
|
.query = .{},
|
|
.result = try std.zig.system.resolveTargetQuery(io, .{}),
|
|
},
|
|
.time_report = false,
|
|
};
|
|
|
|
graph.cache.addPrefix(.{ .path = null, .handle = cwd });
|
|
graph.cache.addPrefix(build_root_directory);
|
|
graph.cache.addPrefix(local_cache_directory);
|
|
graph.cache.addPrefix(global_cache_directory);
|
|
graph.cache.hash.addBytes(builtin.zig_version_string);
|
|
|
|
const builder = try std.Build.create(
|
|
&graph,
|
|
build_root_directory,
|
|
local_cache_directory,
|
|
dependencies.root_deps,
|
|
);
|
|
|
|
var targets = std.array_list.Managed([]const u8).init(arena);
|
|
var debug_log_scopes = std.array_list.Managed([]const u8).init(arena);
|
|
|
|
var install_prefix: ?[]const u8 = null;
|
|
var dir_list = std.Build.DirList{};
|
|
var error_style: ErrorStyle = .verbose;
|
|
var multiline_errors: MultilineErrors = .indent;
|
|
var summary: ?Summary = null;
|
|
var max_rss: u64 = 0;
|
|
var skip_oom_steps = false;
|
|
var test_timeout_ns: ?u64 = null;
|
|
var color: Color = .auto;
|
|
var help_menu = false;
|
|
var steps_menu = false;
|
|
var output_tmp_nonce: ?[16]u8 = null;
|
|
var watch = false;
|
|
var fuzz: ?std.Build.Fuzz.Mode = null;
|
|
var debounce_interval_ms: u16 = 50;
|
|
var webui_listen: ?Io.net.IpAddress = null;
|
|
|
|
if (std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(&graph.environ_map)) |str| {
|
|
if (std.meta.stringToEnum(ErrorStyle, str)) |style| {
|
|
error_style = style;
|
|
}
|
|
}
|
|
|
|
if (std.zig.EnvVar.ZIG_BUILD_MULTILINE_ERRORS.get(&graph.environ_map)) |str| {
|
|
if (std.meta.stringToEnum(MultilineErrors, str)) |style| {
|
|
multiline_errors = style;
|
|
}
|
|
}
|
|
|
|
while (nextArg(args, &arg_idx)) |arg| {
|
|
if (mem.startsWith(u8, arg, "-Z")) {
|
|
if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg});
|
|
output_tmp_nonce = arg[2..18].*;
|
|
} else if (mem.startsWith(u8, arg, "-D")) {
|
|
const option_contents = arg[2..];
|
|
if (option_contents.len == 0)
|
|
fatalWithHint("expected option name after '-D'", .{});
|
|
if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
|
|
const option_name = option_contents[0..name_end];
|
|
const option_value = option_contents[name_end + 1 ..];
|
|
if (try builder.addUserInputOption(option_name, option_value))
|
|
fatal(" access the help menu with 'zig build -h'", .{});
|
|
} else {
|
|
if (try builder.addUserInputFlag(option_contents))
|
|
fatal(" access the help menu with 'zig build -h'", .{});
|
|
}
|
|
} else if (mem.startsWith(u8, arg, "-")) {
|
|
if (mem.eql(u8, arg, "--verbose")) {
|
|
builder.verbose = true;
|
|
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
|
|
help_menu = true;
|
|
} else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) {
|
|
install_prefix = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) {
|
|
steps_menu = true;
|
|
} else if (mem.startsWith(u8, arg, "-fsys=")) {
|
|
const name = arg["-fsys=".len..];
|
|
graph.system_library_options.put(arena, name, .user_enabled) catch @panic("OOM");
|
|
} else if (mem.startsWith(u8, arg, "-fno-sys=")) {
|
|
const name = arg["-fno-sys=".len..];
|
|
graph.system_library_options.put(arena, name, .user_disabled) catch @panic("OOM");
|
|
} else if (mem.eql(u8, arg, "--release")) {
|
|
builder.release_mode = .any;
|
|
} else if (mem.startsWith(u8, arg, "--release=")) {
|
|
const text = arg["--release=".len..];
|
|
builder.release_mode = std.meta.stringToEnum(std.Build.ReleaseMode, text) orelse {
|
|
fatalWithHint("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{
|
|
arg, text,
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--prefix-lib-dir")) {
|
|
dir_list.lib_dir = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--prefix-exe-dir")) {
|
|
dir_list.exe_dir = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--prefix-include-dir")) {
|
|
dir_list.include_dir = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--sysroot")) {
|
|
builder.sysroot = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--maxrss")) {
|
|
const max_rss_text = nextArgOrFatal(args, &arg_idx);
|
|
max_rss = std.fmt.parseIntSizeSuffix(max_rss_text, 10) catch |err| {
|
|
std.debug.print("invalid byte size: '{s}': {s}\n", .{
|
|
max_rss_text, @errorName(err),
|
|
});
|
|
process.exit(1);
|
|
};
|
|
} else if (mem.eql(u8, arg, "--skip-oom-steps")) {
|
|
skip_oom_steps = true;
|
|
} else if (mem.eql(u8, arg, "--test-timeout")) {
|
|
const units: []const struct { []const u8, u64 } = &.{
|
|
.{ "ns", 1 },
|
|
.{ "nanosecond", 1 },
|
|
.{ "us", std.time.ns_per_us },
|
|
.{ "microsecond", std.time.ns_per_us },
|
|
.{ "ms", std.time.ns_per_ms },
|
|
.{ "millisecond", std.time.ns_per_ms },
|
|
.{ "s", std.time.ns_per_s },
|
|
.{ "second", std.time.ns_per_s },
|
|
.{ "m", std.time.ns_per_min },
|
|
.{ "minute", std.time.ns_per_min },
|
|
.{ "h", std.time.ns_per_hour },
|
|
.{ "hour", std.time.ns_per_hour },
|
|
};
|
|
const timeout_str = nextArgOrFatal(args, &arg_idx);
|
|
const num_end_idx = std.mem.findLastNone(u8, timeout_str, "abcdefghijklmnopqrstuvwxyz") orelse fatal(
|
|
"invalid timeout '{s}': expected unit (ns, us, ms, s, m, h)",
|
|
.{timeout_str},
|
|
);
|
|
const num_str = timeout_str[0 .. num_end_idx + 1];
|
|
const unit_str = timeout_str[num_end_idx + 1 ..];
|
|
const unit_factor: f64 = for (units) |unit_and_factor| {
|
|
if (std.mem.eql(u8, unit_str, unit_and_factor[0])) {
|
|
break @floatFromInt(unit_and_factor[1]);
|
|
}
|
|
} else fatal(
|
|
"invalid timeout '{s}': invalid unit '{s}' (expected ns, us, ms, s, m, h)",
|
|
.{ timeout_str, unit_str },
|
|
);
|
|
const num_parsed = std.fmt.parseFloat(f64, num_str) catch |err| fatal(
|
|
"invalid timeout '{s}': invalid number '{s}' ({t})",
|
|
.{ timeout_str, num_str, err },
|
|
);
|
|
test_timeout_ns = std.math.lossyCast(u64, unit_factor * num_parsed);
|
|
} else if (mem.eql(u8, arg, "--search-prefix")) {
|
|
const search_prefix = nextArgOrFatal(args, &arg_idx);
|
|
builder.addSearchPrefix(search_prefix);
|
|
} else if (mem.eql(u8, arg, "--libc")) {
|
|
builder.libc_file = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--color")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected [auto|on|off] after '{s}'", .{arg});
|
|
color = std.meta.stringToEnum(Color, next_arg) orelse {
|
|
fatalWithHint("expected [auto|on|off] after '{s}', found '{s}'", .{
|
|
arg, next_arg,
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--error-style")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected style after '{s}'", .{arg});
|
|
error_style = std.meta.stringToEnum(ErrorStyle, next_arg) orelse {
|
|
fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
|
|
};
|
|
} else if (mem.eql(u8, arg, "--multiline-errors")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected style after '{s}'", .{arg});
|
|
multiline_errors = std.meta.stringToEnum(MultilineErrors, next_arg) orelse {
|
|
fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg });
|
|
};
|
|
} else if (mem.eql(u8, arg, "--summary")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected [all|new|failures|line|none] after '{s}'", .{arg});
|
|
summary = std.meta.stringToEnum(Summary, next_arg) orelse {
|
|
fatalWithHint("expected [all|new|failures|line|none] after '{s}', found '{s}'", .{
|
|
arg, next_arg,
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--seed")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected u32 after '{s}'", .{arg});
|
|
graph.random_seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| {
|
|
fatal("unable to parse seed '{s}' as unsigned 32-bit integer: {s}\n", .{
|
|
next_arg, @errorName(err),
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--build-id")) {
|
|
builder.build_id = .fast;
|
|
} else if (mem.startsWith(u8, arg, "--build-id=")) {
|
|
const style = arg["--build-id=".len..];
|
|
builder.build_id = std.zig.BuildId.parse(style) catch |err| {
|
|
fatal("unable to parse --build-id style '{s}': {s}", .{
|
|
style, @errorName(err),
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--debounce")) {
|
|
const next_arg = nextArg(args, &arg_idx) orelse
|
|
fatalWithHint("expected u16 after '{s}'", .{arg});
|
|
debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| {
|
|
fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {t}\n", .{
|
|
next_arg, err,
|
|
});
|
|
};
|
|
} else if (mem.eql(u8, arg, "--webui")) {
|
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
|
} else if (mem.startsWith(u8, arg, "--webui=")) {
|
|
const addr_str = arg["--webui=".len..];
|
|
if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{});
|
|
webui_listen = Io.net.IpAddress.parseLiteral(addr_str) catch |err| {
|
|
fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) });
|
|
};
|
|
} else if (mem.eql(u8, arg, "--debug-log")) {
|
|
const next_arg = nextArgOrFatal(args, &arg_idx);
|
|
try debug_log_scopes.append(next_arg);
|
|
} else if (mem.eql(u8, arg, "--debug-pkg-config")) {
|
|
builder.debug_pkg_config = true;
|
|
} else if (mem.eql(u8, arg, "--debug-rt")) {
|
|
graph.debug_compiler_runtime_libs = .Debug;
|
|
} else if (mem.cutPrefix(u8, arg, "--debug-rt=")) |rest| {
|
|
graph.debug_compiler_runtime_libs =
|
|
std.meta.stringToEnum(std.builtin.OptimizeMode, rest) orelse
|
|
fatal("unrecognized optimization mode: '{s}'", .{rest});
|
|
} else if (mem.eql(u8, arg, "--debug-compile-errors")) {
|
|
builder.debug_compile_errors = true;
|
|
} else if (mem.eql(u8, arg, "--debug-incremental")) {
|
|
builder.debug_incremental = true;
|
|
} else if (mem.eql(u8, arg, "--system")) {
|
|
// The usage text shows another argument after this parameter
|
|
// but it is handled by the parent process. The build runner
|
|
// only sees this flag.
|
|
graph.system_package_mode = true;
|
|
} else if (mem.eql(u8, arg, "--libc-runtimes") or mem.eql(u8, arg, "--glibc-runtimes")) {
|
|
// --glibc-runtimes was the old name of the flag; kept for compatibility for now.
|
|
builder.libc_runtimes_dir = nextArgOrFatal(args, &arg_idx);
|
|
} else if (mem.eql(u8, arg, "--verbose-link")) {
|
|
builder.verbose_link = true;
|
|
} else if (mem.eql(u8, arg, "--verbose-air")) {
|
|
builder.verbose_air = true;
|
|
} else if (mem.eql(u8, arg, "--verbose-llvm-ir")) {
|
|
builder.verbose_llvm_ir = "-";
|
|
} else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) {
|
|
builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..];
|
|
} else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) {
|
|
builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..];
|
|
} else if (mem.eql(u8, arg, "--verbose-cimport")) {
|
|
builder.verbose_cimport = true;
|
|
} else if (mem.eql(u8, arg, "--verbose-cc")) {
|
|
builder.verbose_cc = true;
|
|
} else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) {
|
|
builder.verbose_llvm_cpu_features = true;
|
|
} else if (mem.eql(u8, arg, "--watch")) {
|
|
watch = true;
|
|
} else if (mem.eql(u8, arg, "--time-report")) {
|
|
graph.time_report = true;
|
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
|
} else if (mem.eql(u8, arg, "--fuzz")) {
|
|
fuzz = .{ .forever = undefined };
|
|
if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) };
|
|
} else if (mem.startsWith(u8, arg, "--fuzz=")) {
|
|
const value = arg["--fuzz=".len..];
|
|
if (value.len == 0) fatal("missing argument to --fuzz", .{});
|
|
|
|
const unit: u8 = value[value.len - 1];
|
|
const digits = switch (unit) {
|
|
'0'...'9' => value,
|
|
'K', 'M', 'G' => value[0 .. value.len - 1],
|
|
else => fatal(
|
|
"invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]",
|
|
.{},
|
|
),
|
|
};
|
|
|
|
const amount = std.fmt.parseInt(u64, digits, 10) catch {
|
|
fatal(
|
|
"invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]",
|
|
.{},
|
|
);
|
|
};
|
|
|
|
const normalized_amount = std.math.mul(u64, amount, switch (unit) {
|
|
else => unreachable,
|
|
'0'...'9' => 1,
|
|
'K' => 1000,
|
|
'M' => 1_000_000,
|
|
'G' => 1_000_000_000,
|
|
}) catch fatal("fuzzing limit amount overflows u64", .{});
|
|
|
|
fuzz = .{
|
|
.limit = .{
|
|
.amount = normalized_amount,
|
|
},
|
|
};
|
|
} else if (mem.eql(u8, arg, "-fincremental")) {
|
|
graph.incremental = true;
|
|
} else if (mem.eql(u8, arg, "-fno-incremental")) {
|
|
graph.incremental = false;
|
|
} else if (mem.eql(u8, arg, "-fwine")) {
|
|
builder.enable_wine = true;
|
|
} else if (mem.eql(u8, arg, "-fno-wine")) {
|
|
builder.enable_wine = false;
|
|
} else if (mem.eql(u8, arg, "-fqemu")) {
|
|
builder.enable_qemu = true;
|
|
} else if (mem.eql(u8, arg, "-fno-qemu")) {
|
|
builder.enable_qemu = false;
|
|
} else if (mem.eql(u8, arg, "-fwasmtime")) {
|
|
builder.enable_wasmtime = true;
|
|
} else if (mem.eql(u8, arg, "-fno-wasmtime")) {
|
|
builder.enable_wasmtime = false;
|
|
} else if (mem.eql(u8, arg, "-frosetta")) {
|
|
builder.enable_rosetta = true;
|
|
} else if (mem.eql(u8, arg, "-fno-rosetta")) {
|
|
builder.enable_rosetta = false;
|
|
} else if (mem.eql(u8, arg, "-fdarling")) {
|
|
builder.enable_darling = true;
|
|
} else if (mem.eql(u8, arg, "-fno-darling")) {
|
|
builder.enable_darling = false;
|
|
} else if (mem.eql(u8, arg, "-fallow-so-scripts")) {
|
|
graph.allow_so_scripts = true;
|
|
} else if (mem.eql(u8, arg, "-fno-allow-so-scripts")) {
|
|
graph.allow_so_scripts = false;
|
|
} else if (mem.eql(u8, arg, "-freference-trace")) {
|
|
builder.reference_trace = 256;
|
|
} else if (mem.startsWith(u8, arg, "-freference-trace=")) {
|
|
const num = arg["-freference-trace=".len..];
|
|
builder.reference_trace = std.fmt.parseUnsigned(u32, num, 10) catch |err| {
|
|
std.debug.print("unable to parse reference_trace count '{s}': {s}", .{ num, @errorName(err) });
|
|
process.exit(1);
|
|
};
|
|
} else if (mem.eql(u8, arg, "-fno-reference-trace")) {
|
|
builder.reference_trace = null;
|
|
} else if (mem.cutPrefix(u8, arg, "-j")) |text| {
|
|
const n = std.fmt.parseUnsigned(u32, text, 10) catch |err|
|
|
fatal("unable to parse jobs count '{s}': {t}", .{ text, err });
|
|
if (n < 1) fatal("number of jobs must be at least 1", .{});
|
|
threaded.setAsyncLimit(.limited(n));
|
|
} else if (mem.eql(u8, arg, "--")) {
|
|
builder.args = argsRest(args, arg_idx);
|
|
break;
|
|
} else {
|
|
fatalWithHint("unrecognized argument: '{s}'", .{arg});
|
|
}
|
|
} else {
|
|
try targets.append(arg);
|
|
}
|
|
}
|
|
|
|
const NO_COLOR = std.zig.EnvVar.NO_COLOR.isSet(&graph.environ_map);
|
|
const CLICOLOR_FORCE = std.zig.EnvVar.CLICOLOR_FORCE.isSet(&graph.environ_map);
|
|
|
|
graph.stderr_mode = switch (color) {
|
|
.auto => try .detect(io, .stderr(), NO_COLOR, CLICOLOR_FORCE),
|
|
.on => .escape_codes,
|
|
.off => .no_color,
|
|
};
|
|
|
|
if (webui_listen != null) {
|
|
if (watch) fatal("using '--webui' and '--watch' together is not yet supported; consider omitting '--watch' in favour of the web UI \"Rebuild\" button", .{});
|
|
if (builtin.single_threaded) fatal("'--webui' is not yet supported on single-threaded hosts", .{});
|
|
}
|
|
|
|
const main_progress_node = std.Progress.start(io, .{
|
|
.disable_printing = (color == .off),
|
|
});
|
|
defer main_progress_node.end();
|
|
|
|
builder.debug_log_scopes = debug_log_scopes.items;
|
|
builder.resolveInstallPrefix(install_prefix, dir_list);
|
|
{
|
|
var prog_node = main_progress_node.start("Configure", 0);
|
|
defer prog_node.end();
|
|
try builder.runBuild(root);
|
|
createModuleDependencies(builder) catch @panic("OOM");
|
|
}
|
|
|
|
if (graph.needed_lazy_dependencies.entries.len != 0) {
|
|
var buffer: std.ArrayList(u8) = .empty;
|
|
for (graph.needed_lazy_dependencies.keys()) |k| {
|
|
try buffer.appendSlice(arena, k);
|
|
try buffer.append(arena, '\n');
|
|
}
|
|
const s = std.fs.path.sep_str;
|
|
const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{}));
|
|
local_cache_directory.handle.writeFile(io, .{
|
|
.sub_path = tmp_sub_path,
|
|
.data = buffer.items,
|
|
.flags = .{ .exclusive = true },
|
|
}) catch |err| {
|
|
fatal("unable to write configuration results to '{f}{s}': {s}", .{
|
|
local_cache_directory, tmp_sub_path, @errorName(err),
|
|
});
|
|
};
|
|
process.exit(3); // Indicate configure phase failed with meaningful stdout.
|
|
}
|
|
|
|
if (builder.validateUserInputDidItFail()) {
|
|
fatal(" access the help menu with 'zig build -h'", .{});
|
|
}
|
|
|
|
validateSystemLibraryOptions(builder);
|
|
|
|
if (help_menu) {
|
|
var w = initStdoutWriter(io);
|
|
printUsage(builder, w) catch return stdout_writer_allocation.err.?;
|
|
w.flush() catch return stdout_writer_allocation.err.?;
|
|
return;
|
|
}
|
|
|
|
if (steps_menu) {
|
|
var w = initStdoutWriter(io);
|
|
printSteps(builder, w) catch return stdout_writer_allocation.err.?;
|
|
w.flush() catch return stdout_writer_allocation.err.?;
|
|
return;
|
|
}
|
|
|
|
var run: Run = .{
|
|
.gpa = gpa,
|
|
|
|
.available_rss = max_rss,
|
|
.max_rss_is_default = false,
|
|
.max_rss_mutex = .init,
|
|
.skip_oom_steps = skip_oom_steps,
|
|
.unit_test_timeout_ns = test_timeout_ns,
|
|
|
|
.watch = watch,
|
|
.web_server = undefined, // set after `prepare`
|
|
.memory_blocked_steps = .empty,
|
|
.step_stack = .empty,
|
|
|
|
.error_style = error_style,
|
|
.multiline_errors = multiline_errors,
|
|
.summary = summary orelse if (watch or webui_listen != null) .line else .failures,
|
|
};
|
|
defer {
|
|
run.memory_blocked_steps.deinit(gpa);
|
|
run.step_stack.deinit(gpa);
|
|
}
|
|
|
|
if (run.available_rss == 0) {
|
|
run.available_rss = process.totalSystemMemory() catch std.math.maxInt(u64);
|
|
run.max_rss_is_default = true;
|
|
}
|
|
|
|
prepare(arena, builder, targets.items, &run, graph.random_seed) catch |err| switch (err) {
|
|
error.DependencyLoopDetected => {
|
|
// Perhaps in the future there could be an Advanced Options flag
|
|
// such as --debug-build-runner-leaks which would make this code
|
|
// return instead of calling exit.
|
|
_ = io.lockStderr(&.{}, graph.stderr_mode) catch {};
|
|
process.exit(1);
|
|
},
|
|
else => |e| return e,
|
|
};
|
|
|
|
var w: Watch = w: {
|
|
if (!watch) break :w undefined;
|
|
if (!Watch.have_impl) fatal("--watch not yet implemented for {t}", .{builtin.os.tag});
|
|
break :w try .init(graph.cache.cwd);
|
|
};
|
|
|
|
const now = Io.Clock.Timestamp.now(io, .awake);
|
|
|
|
run.web_server = if (webui_listen) |listen_address| ws: {
|
|
if (builtin.single_threaded) unreachable; // `fatal` above
|
|
break :ws .init(.{
|
|
.gpa = gpa,
|
|
.graph = &graph,
|
|
.all_steps = run.step_stack.keys(),
|
|
.root_prog_node = main_progress_node,
|
|
.watch = watch,
|
|
.listen_address = listen_address,
|
|
.base_timestamp = now,
|
|
});
|
|
} else null;
|
|
|
|
if (run.web_server) |*ws| {
|
|
ws.start() catch |err| fatal("failed to start web server: {t}", .{err});
|
|
}
|
|
|
|
rebuild: while (true) : (if (run.error_style.clearOnUpdate()) {
|
|
const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode);
|
|
defer io.unlockStderr();
|
|
try stderr.file_writer.interface.writeAll("\x1B[2J\x1B[3J\x1B[H");
|
|
}) {
|
|
if (run.web_server) |*ws| ws.startBuild();
|
|
|
|
try runStepNames(
|
|
builder,
|
|
targets.items,
|
|
main_progress_node,
|
|
&run,
|
|
fuzz,
|
|
);
|
|
|
|
if (run.web_server) |*web_server| {
|
|
if (fuzz) |mode| if (mode != .forever) fatal(
|
|
"error: limited fuzzing is not implemented yet for --webui",
|
|
.{},
|
|
);
|
|
|
|
web_server.finishBuild(.{ .fuzz = fuzz != null });
|
|
}
|
|
|
|
if (run.web_server) |*ws| {
|
|
assert(!watch); // fatal error after CLI parsing
|
|
while (true) switch (try ws.wait()) {
|
|
.rebuild => {
|
|
for (run.step_stack.keys()) |step| {
|
|
step.state = .precheck_done;
|
|
step.pending_deps = @intCast(step.dependencies.items.len);
|
|
step.reset(gpa);
|
|
}
|
|
continue :rebuild;
|
|
},
|
|
};
|
|
}
|
|
|
|
// Comptime-known guard to prevent including the logic below when `!Watch.have_impl`.
|
|
if (!Watch.have_impl) unreachable;
|
|
|
|
try w.update(gpa, run.step_stack.keys());
|
|
|
|
// Wait until a file system notification arrives. Read all such events
|
|
// until the buffer is empty. Then wait for a debounce interval, resetting
|
|
// if any more events come in. After the debounce interval has passed,
|
|
// trigger a rebuild on all steps with modified inputs, as well as their
|
|
// recursive dependants.
|
|
var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined;
|
|
const caption = std.fmt.bufPrint(&caption_buf, "watching {d} directories, {d} processes", .{
|
|
w.dir_count, countSubProcesses(run.step_stack.keys()),
|
|
}) catch &caption_buf;
|
|
var debouncing_node = main_progress_node.start(caption, 0);
|
|
var in_debounce = false;
|
|
while (true) switch (try w.wait(gpa, io, if (in_debounce) .{ .ms = debounce_interval_ms } else .none)) {
|
|
.timeout => {
|
|
assert(in_debounce);
|
|
debouncing_node.end();
|
|
markFailedStepsDirty(gpa, run.step_stack.keys());
|
|
continue :rebuild;
|
|
},
|
|
.dirty => if (!in_debounce) {
|
|
in_debounce = true;
|
|
debouncing_node.end();
|
|
debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0);
|
|
},
|
|
.clean => {},
|
|
};
|
|
}
|
|
}
|
|
|
|
fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void {
|
|
for (all_steps) |step| switch (step.state) {
|
|
.dependency_failure, .failure, .skipped => _ = step.invalidateResult(gpa),
|
|
else => continue,
|
|
};
|
|
// Now that all dirty steps have been found, the remaining steps that
|
|
// succeeded from last run shall be marked "cached".
|
|
for (all_steps) |step| switch (step.state) {
|
|
.success => step.result_cached = true,
|
|
else => continue,
|
|
};
|
|
}
|
|
|
|
fn countSubProcesses(all_steps: []const *Step) usize {
|
|
var count: usize = 0;
|
|
for (all_steps) |s| {
|
|
count += @intFromBool(s.getZigProcess() != null);
|
|
}
|
|
return count;
|
|
}
|
|
|
|
const Run = struct {
|
|
gpa: Allocator,
|
|
|
|
available_rss: usize,
|
|
max_rss_is_default: bool,
|
|
max_rss_mutex: Io.Mutex,
|
|
skip_oom_steps: bool,
|
|
unit_test_timeout_ns: ?u64,
|
|
watch: bool,
|
|
web_server: if (!builtin.single_threaded) ?WebServer else ?noreturn,
|
|
/// Allocated into `gpa`.
|
|
memory_blocked_steps: std.ArrayList(*Step),
|
|
/// Allocated into `gpa`.
|
|
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
|
|
|
|
error_style: ErrorStyle,
|
|
multiline_errors: MultilineErrors,
|
|
summary: Summary,
|
|
};
|
|
|
|
fn prepare(
|
|
arena: Allocator,
|
|
b: *std.Build,
|
|
step_names: []const []const u8,
|
|
run: *Run,
|
|
seed: u32,
|
|
) !void {
|
|
const gpa = run.gpa;
|
|
const step_stack = &run.step_stack;
|
|
|
|
if (step_names.len == 0) {
|
|
try step_stack.put(gpa, b.default_step, {});
|
|
} else {
|
|
try step_stack.ensureUnusedCapacity(gpa, step_names.len);
|
|
for (0..step_names.len) |i| {
|
|
const step_name = step_names[step_names.len - i - 1];
|
|
const s = b.top_level_steps.get(step_name) orelse {
|
|
std.debug.print("no step named '{s}'\n access the help menu with 'zig build -h'\n", .{step_name});
|
|
process.exit(1);
|
|
};
|
|
step_stack.putAssumeCapacity(&s.step, {});
|
|
}
|
|
}
|
|
|
|
const starting_steps = try arena.dupe(*Step, step_stack.keys());
|
|
|
|
var rng = std.Random.DefaultPrng.init(seed);
|
|
const rand = rng.random();
|
|
rand.shuffle(*Step, starting_steps);
|
|
|
|
for (starting_steps) |s| {
|
|
try constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand);
|
|
}
|
|
|
|
{
|
|
// Check that we have enough memory to complete the build.
|
|
var any_problems = false;
|
|
for (step_stack.keys()) |s| {
|
|
if (s.max_rss == 0) continue;
|
|
if (s.max_rss > run.available_rss) {
|
|
if (run.skip_oom_steps) {
|
|
s.state = .skipped_oom;
|
|
for (s.dependants.items) |dependant| {
|
|
dependant.pending_deps -= 1;
|
|
}
|
|
} else {
|
|
std.debug.print("{s}{s}: this step declares an upper bound of {d} bytes of memory, exceeding the available {d} bytes of memory\n", .{
|
|
s.owner.dep_prefix, s.name, s.max_rss, run.available_rss,
|
|
});
|
|
any_problems = true;
|
|
}
|
|
}
|
|
}
|
|
if (any_problems) {
|
|
if (run.max_rss_is_default) {
|
|
std.debug.print("note: use --maxrss to override the default", .{});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn runStepNames(
|
|
b: *std.Build,
|
|
step_names: []const []const u8,
|
|
parent_prog_node: std.Progress.Node,
|
|
run: *Run,
|
|
fuzz: ?std.Build.Fuzz.Mode,
|
|
) !void {
|
|
const gpa = run.gpa;
|
|
const graph = b.graph;
|
|
const io = graph.io;
|
|
const step_stack = &run.step_stack;
|
|
|
|
{
|
|
// Collect the initial set of tasks (those with no outstanding dependencies) into a buffer,
|
|
// then spawn them. The buffer is so that we don't race with `makeStep` and end up thinking
|
|
// a step is initial when it actually became ready due to an earlier initial step.
|
|
var initial_set: std.ArrayList(*Step) = .empty;
|
|
defer initial_set.deinit(gpa);
|
|
try initial_set.ensureUnusedCapacity(gpa, step_stack.count());
|
|
for (step_stack.keys()) |s| {
|
|
if (s.state == .precheck_done and s.pending_deps == 0) {
|
|
initial_set.appendAssumeCapacity(s);
|
|
}
|
|
}
|
|
|
|
const step_prog = parent_prog_node.start("steps", step_stack.count());
|
|
defer step_prog.end();
|
|
|
|
var group: Io.Group = .init;
|
|
defer group.cancel(io);
|
|
// Start working on all of the initial steps...
|
|
for (initial_set.items) |s| try stepReady(&group, b, s, step_prog, run);
|
|
// ...and `makeStep` will trigger every other step when their last dependency finishes.
|
|
try group.await(io);
|
|
}
|
|
|
|
assert(run.memory_blocked_steps.items.len == 0);
|
|
|
|
var test_pass_count: usize = 0;
|
|
var test_skip_count: usize = 0;
|
|
var test_fail_count: usize = 0;
|
|
var test_crash_count: usize = 0;
|
|
var test_timeout_count: usize = 0;
|
|
|
|
var test_count: usize = 0;
|
|
|
|
var success_count: usize = 0;
|
|
var skipped_count: usize = 0;
|
|
var failure_count: usize = 0;
|
|
var pending_count: usize = 0;
|
|
var total_compile_errors: usize = 0;
|
|
|
|
var cleanup_task = io.async(cleanTmpFiles, .{ io, step_stack.keys() });
|
|
defer cleanup_task.await(io);
|
|
|
|
for (step_stack.keys()) |s| {
|
|
test_pass_count += s.test_results.passCount();
|
|
test_skip_count += s.test_results.skip_count;
|
|
test_fail_count += s.test_results.fail_count;
|
|
test_crash_count += s.test_results.crash_count;
|
|
test_timeout_count += s.test_results.timeout_count;
|
|
|
|
test_count += s.test_results.test_count;
|
|
|
|
switch (s.state) {
|
|
.precheck_unstarted => unreachable,
|
|
.precheck_started => unreachable,
|
|
.precheck_done => unreachable,
|
|
.dependency_failure => pending_count += 1,
|
|
.success => success_count += 1,
|
|
.skipped, .skipped_oom => skipped_count += 1,
|
|
.failure => {
|
|
failure_count += 1;
|
|
const compile_errors_len = s.result_error_bundle.errorMessageCount();
|
|
if (compile_errors_len > 0) {
|
|
total_compile_errors += compile_errors_len;
|
|
}
|
|
},
|
|
}
|
|
}
|
|
|
|
if (fuzz) |mode| blk: {
|
|
switch (builtin.os.tag) {
|
|
// Current implementation depends on two things that need to be ported to Windows:
|
|
// * Memory-mapping to share data between the fuzzer and build runner.
|
|
// * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving
|
|
// many addresses to source locations).
|
|
.windows => fatal("--fuzz not yet implemented for {t}", .{builtin.os.tag}),
|
|
else => {},
|
|
}
|
|
if (@bitSizeOf(usize) != 64) {
|
|
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
|
|
// being compatible with file system's u64 return value. This is not the case
|
|
// on 32-bit platforms.
|
|
// Affects or affected by issues #5185, #22523, and #22464.
|
|
fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});
|
|
}
|
|
|
|
switch (mode) {
|
|
.forever => break :blk,
|
|
.limit => {},
|
|
}
|
|
|
|
assert(mode == .limit);
|
|
var f = std.Build.Fuzz.init(
|
|
gpa,
|
|
io,
|
|
step_stack.keys(),
|
|
parent_prog_node,
|
|
mode,
|
|
) catch |err| fatal("failed to start fuzzer: {t}", .{err});
|
|
defer f.deinit();
|
|
|
|
f.start();
|
|
try f.waitAndPrintReport();
|
|
}
|
|
|
|
// Every test has a state
|
|
assert(test_pass_count + test_skip_count + test_fail_count + test_crash_count + test_timeout_count == test_count);
|
|
|
|
if (failure_count == 0) {
|
|
std.Progress.setStatus(.success);
|
|
} else {
|
|
std.Progress.setStatus(.failure);
|
|
}
|
|
|
|
summary: {
|
|
switch (run.summary) {
|
|
.all, .new, .line => {},
|
|
.failures => if (failure_count == 0) break :summary,
|
|
.none => break :summary,
|
|
}
|
|
|
|
const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode);
|
|
defer io.unlockStderr();
|
|
const t = stderr.terminal();
|
|
const w = &stderr.file_writer.interface;
|
|
|
|
const total_count = success_count + failure_count + pending_count + skipped_count;
|
|
t.setColor(.cyan) catch {};
|
|
t.setColor(.bold) catch {};
|
|
w.writeAll("Build Summary: ") catch {};
|
|
t.setColor(.reset) catch {};
|
|
w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {};
|
|
{
|
|
t.setColor(.dim) catch {};
|
|
var first = true;
|
|
if (skipped_count > 0) {
|
|
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (failure_count > 0) {
|
|
w.print("{s}{d} failed", .{ if (first) " (" else ", ", failure_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (!first) w.writeByte(')') catch {};
|
|
t.setColor(.reset) catch {};
|
|
}
|
|
|
|
if (test_count > 0) {
|
|
w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {};
|
|
t.setColor(.dim) catch {};
|
|
var first = true;
|
|
if (test_skip_count > 0) {
|
|
w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (test_fail_count > 0) {
|
|
w.print("{s}{d} failed", .{ if (first) " (" else ", ", test_fail_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (test_crash_count > 0) {
|
|
w.print("{s}{d} crashed", .{ if (first) " (" else ", ", test_crash_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (test_timeout_count > 0) {
|
|
w.print("{s}{d} timed out", .{ if (first) " (" else ", ", test_timeout_count }) catch {};
|
|
first = false;
|
|
}
|
|
if (!first) w.writeByte(')') catch {};
|
|
t.setColor(.reset) catch {};
|
|
}
|
|
|
|
w.writeAll("\n") catch {};
|
|
|
|
if (run.summary == .line) break :summary;
|
|
|
|
// Print a fancy tree with build results.
|
|
var step_stack_copy = try step_stack.clone(gpa);
|
|
defer step_stack_copy.deinit(gpa);
|
|
|
|
var print_node: PrintNode = .{ .parent = null };
|
|
if (step_names.len == 0) {
|
|
print_node.last = true;
|
|
printTreeStep(b, b.default_step, run, t, &print_node, &step_stack_copy) catch {};
|
|
} else {
|
|
const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: {
|
|
var i: usize = step_names.len;
|
|
while (i > 0) {
|
|
i -= 1;
|
|
const step = b.top_level_steps.get(step_names[i]).?.step;
|
|
const found = switch (run.summary) {
|
|
.all, .line, .none => unreachable,
|
|
.failures => step.state != .success,
|
|
.new => !step.result_cached,
|
|
};
|
|
if (found) break :blk i;
|
|
}
|
|
break :blk b.top_level_steps.count();
|
|
};
|
|
for (step_names, 0..) |step_name, i| {
|
|
const tls = b.top_level_steps.get(step_name).?;
|
|
print_node.last = i + 1 == last_index;
|
|
printTreeStep(b, &tls.step, run, t, &print_node, &step_stack_copy) catch {};
|
|
}
|
|
}
|
|
w.writeByte('\n') catch {};
|
|
}
|
|
|
|
if (run.watch or run.web_server != null) return;
|
|
|
|
// Perhaps in the future there could be an Advanced Options flag such as
|
|
// --debug-build-runner-leaks which would make this code return instead of
|
|
// calling exit.
|
|
|
|
const code: u8 = code: {
|
|
if (failure_count == 0) break :code 0; // success
|
|
if (run.error_style.verboseContext()) break :code 1; // failure; print build command
|
|
break :code 2; // failure; do not print build command
|
|
};
|
|
_ = io.lockStderr(&.{}, graph.stderr_mode) catch {};
|
|
process.exit(code);
|
|
}
|
|
|
|
const PrintNode = struct {
|
|
parent: ?*PrintNode,
|
|
last: bool = false,
|
|
};
|
|
|
|
fn printPrefix(node: *PrintNode, stderr: Io.Terminal) !void {
|
|
const parent = node.parent orelse return;
|
|
const writer = stderr.writer;
|
|
if (parent.parent == null) return;
|
|
try printPrefix(parent, stderr);
|
|
if (parent.last) {
|
|
try writer.writeAll(" ");
|
|
} else {
|
|
try writer.writeAll(switch (stderr.mode) {
|
|
.escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │
|
|
else => "| ",
|
|
});
|
|
}
|
|
}
|
|
|
|
fn printChildNodePrefix(stderr: Io.Terminal) !void {
|
|
try stderr.writer.writeAll(switch (stderr.mode) {
|
|
.escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─
|
|
else => "+- ",
|
|
});
|
|
}
|
|
|
|
fn printStepStatus(s: *Step, stderr: Io.Terminal, run: *const Run) !void {
|
|
const writer = stderr.writer;
|
|
switch (s.state) {
|
|
.precheck_unstarted => unreachable,
|
|
.precheck_started => unreachable,
|
|
.precheck_done => unreachable,
|
|
|
|
.dependency_failure => {
|
|
try stderr.setColor(.dim);
|
|
try writer.writeAll(" transitive failure\n");
|
|
try stderr.setColor(.reset);
|
|
},
|
|
|
|
.success => {
|
|
try stderr.setColor(.green);
|
|
if (s.result_cached) {
|
|
try writer.writeAll(" cached");
|
|
} else if (s.test_results.test_count > 0) {
|
|
const pass_count = s.test_results.passCount();
|
|
assert(s.test_results.test_count == pass_count + s.test_results.skip_count);
|
|
try writer.print(" {d} pass", .{pass_count});
|
|
if (s.test_results.skip_count > 0) {
|
|
try stderr.setColor(.reset);
|
|
try writer.writeAll(", ");
|
|
try stderr.setColor(.yellow);
|
|
try writer.print("{d} skip", .{s.test_results.skip_count});
|
|
}
|
|
try stderr.setColor(.reset);
|
|
try writer.print(" ({d} total)", .{s.test_results.test_count});
|
|
} else {
|
|
try writer.writeAll(" success");
|
|
}
|
|
try stderr.setColor(.reset);
|
|
if (s.result_duration_ns) |ns| {
|
|
try stderr.setColor(.dim);
|
|
if (ns >= std.time.ns_per_min) {
|
|
try writer.print(" {d}m", .{ns / std.time.ns_per_min});
|
|
} else if (ns >= std.time.ns_per_s) {
|
|
try writer.print(" {d}s", .{ns / std.time.ns_per_s});
|
|
} else if (ns >= std.time.ns_per_ms) {
|
|
try writer.print(" {d}ms", .{ns / std.time.ns_per_ms});
|
|
} else if (ns >= std.time.ns_per_us) {
|
|
try writer.print(" {d}us", .{ns / std.time.ns_per_us});
|
|
} else {
|
|
try writer.print(" {d}ns", .{ns});
|
|
}
|
|
try stderr.setColor(.reset);
|
|
}
|
|
if (s.result_peak_rss != 0) {
|
|
const rss = s.result_peak_rss;
|
|
try stderr.setColor(.dim);
|
|
if (rss >= 1000_000_000) {
|
|
try writer.print(" MaxRSS:{d}G", .{rss / 1000_000_000});
|
|
} else if (rss >= 1000_000) {
|
|
try writer.print(" MaxRSS:{d}M", .{rss / 1000_000});
|
|
} else if (rss >= 1000) {
|
|
try writer.print(" MaxRSS:{d}K", .{rss / 1000});
|
|
} else {
|
|
try writer.print(" MaxRSS:{d}B", .{rss});
|
|
}
|
|
try stderr.setColor(.reset);
|
|
}
|
|
try writer.writeAll("\n");
|
|
},
|
|
.skipped => {
|
|
try stderr.setColor(.yellow);
|
|
try writer.writeAll(" skipped\n");
|
|
try stderr.setColor(.reset);
|
|
},
|
|
.skipped_oom => {
|
|
try stderr.setColor(.yellow);
|
|
try writer.writeAll(" skipped (not enough memory)");
|
|
try stderr.setColor(.dim);
|
|
try writer.print(" upper bound of {d} exceeded runner limit ({d})\n", .{ s.max_rss, run.available_rss });
|
|
try stderr.setColor(.reset);
|
|
},
|
|
.failure => {
|
|
try printStepFailure(s, stderr, false);
|
|
try stderr.setColor(.reset);
|
|
},
|
|
}
|
|
}
|
|
|
|
fn printStepFailure(s: *Step, stderr: Io.Terminal, dim: bool) !void {
|
|
const w = stderr.writer;
|
|
if (s.result_error_bundle.errorMessageCount() > 0) {
|
|
try stderr.setColor(.red);
|
|
try w.print(" {d} errors\n", .{
|
|
s.result_error_bundle.errorMessageCount(),
|
|
});
|
|
} else if (!s.test_results.isSuccess()) {
|
|
// These first values include all of the test "statuses". Every test is either passsed,
|
|
// skipped, failed, crashed, or timed out.
|
|
try stderr.setColor(.green);
|
|
try w.print(" {d} pass", .{s.test_results.passCount()});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
if (s.test_results.skip_count > 0) {
|
|
try w.writeAll(", ");
|
|
try stderr.setColor(.yellow);
|
|
try w.print("{d} skip", .{s.test_results.skip_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
if (s.test_results.fail_count > 0) {
|
|
try w.writeAll(", ");
|
|
try stderr.setColor(.red);
|
|
try w.print("{d} fail", .{s.test_results.fail_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
if (s.test_results.crash_count > 0) {
|
|
try w.writeAll(", ");
|
|
try stderr.setColor(.red);
|
|
try w.print("{d} crash", .{s.test_results.crash_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
if (s.test_results.timeout_count > 0) {
|
|
try w.writeAll(", ");
|
|
try stderr.setColor(.red);
|
|
try w.print("{d} timeout", .{s.test_results.timeout_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
try w.print(" ({d} total)", .{s.test_results.test_count});
|
|
|
|
// Memory leaks are intentionally written after the total, because is isn't a test *status*,
|
|
// but just a flag that any tests -- even passed ones -- can have. We also use a different
|
|
// separator, so it looks like:
|
|
// 2 pass, 1 skip, 2 fail (5 total); 2 leaks
|
|
if (s.test_results.leak_count > 0) {
|
|
try w.writeAll("; ");
|
|
try stderr.setColor(.red);
|
|
try w.print("{d} leaks", .{s.test_results.leak_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
|
|
// It's usually not helpful to know how many error logs there were because they tend to
|
|
// just come with other errors (e.g. crashes and leaks print stack traces, and clean
|
|
// failures print error traces). So only mention them if they're the only thing causing
|
|
// the failure.
|
|
const show_err_logs: bool = show: {
|
|
var alt_results = s.test_results;
|
|
alt_results.log_err_count = 0;
|
|
break :show alt_results.isSuccess();
|
|
};
|
|
if (show_err_logs) {
|
|
try w.writeAll("; ");
|
|
try stderr.setColor(.red);
|
|
try w.print("{d} error logs", .{s.test_results.log_err_count});
|
|
try stderr.setColor(.reset);
|
|
if (dim) try stderr.setColor(.dim);
|
|
}
|
|
|
|
try w.writeAll("\n");
|
|
} else if (s.result_error_msgs.items.len > 0) {
|
|
try stderr.setColor(.red);
|
|
try w.writeAll(" failure\n");
|
|
} else {
|
|
assert(s.result_stderr.len > 0);
|
|
try stderr.setColor(.red);
|
|
try w.writeAll(" w\n");
|
|
}
|
|
}
|
|
|
|
fn printTreeStep(
|
|
b: *std.Build,
|
|
s: *Step,
|
|
run: *const Run,
|
|
stderr: Io.Terminal,
|
|
parent_node: *PrintNode,
|
|
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
|
|
) !void {
|
|
const writer = stderr.writer;
|
|
const first = step_stack.swapRemove(s);
|
|
const summary = run.summary;
|
|
const skip = switch (summary) {
|
|
.none, .line => unreachable,
|
|
.all => false,
|
|
.new => s.result_cached,
|
|
.failures => s.state == .success,
|
|
};
|
|
if (skip) return;
|
|
try printPrefix(parent_node, stderr);
|
|
|
|
if (parent_node.parent != null) {
|
|
if (parent_node.last) {
|
|
try printChildNodePrefix(stderr);
|
|
} else {
|
|
try writer.writeAll(switch (stderr.mode) {
|
|
.escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─
|
|
else => "+- ",
|
|
});
|
|
}
|
|
}
|
|
|
|
if (!first) try stderr.setColor(.dim);
|
|
|
|
// dep_prefix omitted here because it is redundant with the tree.
|
|
try writer.writeAll(s.name);
|
|
|
|
if (first) {
|
|
try printStepStatus(s, stderr, run);
|
|
|
|
const last_index = if (summary == .all) s.dependencies.items.len -| 1 else blk: {
|
|
var i: usize = s.dependencies.items.len;
|
|
while (i > 0) {
|
|
i -= 1;
|
|
|
|
const step = s.dependencies.items[i];
|
|
const found = switch (summary) {
|
|
.all, .line, .none => unreachable,
|
|
.failures => step.state != .success,
|
|
.new => !step.result_cached,
|
|
};
|
|
if (found) break :blk i;
|
|
}
|
|
break :blk s.dependencies.items.len -| 1;
|
|
};
|
|
for (s.dependencies.items, 0..) |dep, i| {
|
|
var print_node: PrintNode = .{
|
|
.parent = parent_node,
|
|
.last = i == last_index,
|
|
};
|
|
try printTreeStep(b, dep, run, stderr, &print_node, step_stack);
|
|
}
|
|
} else {
|
|
if (s.dependencies.items.len == 0) {
|
|
try writer.writeAll(" (reused)\n");
|
|
} else {
|
|
try writer.print(" (+{d} more reused dependencies)\n", .{
|
|
s.dependencies.items.len,
|
|
});
|
|
}
|
|
try stderr.setColor(.reset);
|
|
}
|
|
}
|
|
|
|
/// Traverse the dependency graph depth-first and make it undirected by having
|
|
/// steps know their dependants (they only know dependencies at start).
|
|
/// Along the way, check that there is no dependency loop, and record the steps
|
|
/// in traversal order in `step_stack`.
|
|
/// Each step has its dependencies traversed in random order, this accomplishes
|
|
/// two things:
|
|
/// - `step_stack` will be in randomized-depth-first order, so the build runner
|
|
/// spawns initial steps in a random order
|
|
/// - each step's `dependants` list is also filled in a random order, so that
|
|
/// when it finishes executing in `makeStep`, it spawns next steps to run in
|
|
/// random order
|
|
fn constructGraphAndCheckForDependencyLoop(
|
|
gpa: Allocator,
|
|
b: *std.Build,
|
|
s: *Step,
|
|
step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void),
|
|
rand: std.Random,
|
|
) !void {
|
|
switch (s.state) {
|
|
.precheck_started => {
|
|
std.debug.print("dependency loop detected:\n {s}\n", .{s.name});
|
|
return error.DependencyLoopDetected;
|
|
},
|
|
.precheck_unstarted => {
|
|
s.state = .precheck_started;
|
|
|
|
try step_stack.ensureUnusedCapacity(gpa, s.dependencies.items.len);
|
|
|
|
// We dupe to avoid shuffling the steps in the summary, it depends
|
|
// on s.dependencies' order.
|
|
const deps = gpa.dupe(*Step, s.dependencies.items) catch @panic("OOM");
|
|
defer gpa.free(deps);
|
|
|
|
rand.shuffle(*Step, deps);
|
|
|
|
for (deps) |dep| {
|
|
try step_stack.put(gpa, dep, {});
|
|
try dep.dependants.append(b.allocator, s);
|
|
constructGraphAndCheckForDependencyLoop(gpa, b, dep, step_stack, rand) catch |err| {
|
|
if (err == error.DependencyLoopDetected) {
|
|
std.debug.print(" {s}\n", .{s.name});
|
|
}
|
|
return err;
|
|
};
|
|
}
|
|
|
|
s.state = .precheck_done;
|
|
s.pending_deps = @intCast(s.dependencies.items.len);
|
|
},
|
|
.precheck_done => {},
|
|
|
|
// These don't happen until we actually run the step graph.
|
|
.dependency_failure => unreachable,
|
|
.success => unreachable,
|
|
.failure => unreachable,
|
|
.skipped => unreachable,
|
|
.skipped_oom => unreachable,
|
|
}
|
|
}
|
|
|
|
/// Runs the "make" function of the single step `s`, updates its state, and then spawns newly-ready
|
|
/// dependant steps in `group`. If `s` makes an RSS claim (i.e. `s.max_rss != 0`), the caller must
|
|
/// have already subtracted this value from `run.available_rss`. This function will release the RSS
|
|
/// claim (i.e. add `s.max_rss` back into `run.available_rss`) and queue any viable memory-blocked
|
|
/// steps after "make" completes for `s`.
|
|
fn makeStep(
|
|
group: *Io.Group,
|
|
b: *std.Build,
|
|
s: *Step,
|
|
root_prog_node: std.Progress.Node,
|
|
run: *Run,
|
|
) Io.Cancelable!void {
|
|
const graph = b.graph;
|
|
const io = graph.io;
|
|
const gpa = run.gpa;
|
|
|
|
{
|
|
const step_prog_node = root_prog_node.start(s.name, 0);
|
|
defer step_prog_node.end();
|
|
|
|
if (run.web_server) |*ws| ws.updateStepStatus(s, .wip);
|
|
|
|
const new_state: Step.State = for (s.dependencies.items) |dep| {
|
|
switch (@atomicLoad(Step.State, &dep.state, .monotonic)) {
|
|
.precheck_unstarted => unreachable,
|
|
.precheck_started => unreachable,
|
|
.precheck_done => unreachable,
|
|
|
|
.failure,
|
|
.dependency_failure,
|
|
.skipped_oom,
|
|
=> break .dependency_failure,
|
|
|
|
.success, .skipped => {},
|
|
}
|
|
} else if (s.make(.{
|
|
.progress_node = step_prog_node,
|
|
.watch = run.watch,
|
|
.web_server = if (run.web_server) |*ws| ws else null,
|
|
.unit_test_timeout_ns = run.unit_test_timeout_ns,
|
|
.gpa = gpa,
|
|
})) state: {
|
|
break :state .success;
|
|
} else |err| switch (err) {
|
|
error.MakeFailed => .failure,
|
|
error.MakeSkipped => .skipped,
|
|
};
|
|
|
|
@atomicStore(Step.State, &s.state, new_state, .monotonic);
|
|
|
|
switch (new_state) {
|
|
.precheck_unstarted => unreachable,
|
|
.precheck_started => unreachable,
|
|
.precheck_done => unreachable,
|
|
|
|
.failure,
|
|
.dependency_failure,
|
|
.skipped_oom,
|
|
=> {
|
|
if (run.web_server) |*ws| ws.updateStepStatus(s, .failure);
|
|
std.Progress.setStatus(.failure_working);
|
|
},
|
|
|
|
.success,
|
|
.skipped,
|
|
=> {
|
|
if (run.web_server) |*ws| ws.updateStepStatus(s, .success);
|
|
},
|
|
}
|
|
}
|
|
|
|
// No matter the result, we want to display error/warning messages.
|
|
if (s.result_error_bundle.errorMessageCount() > 0 or
|
|
s.result_error_msgs.items.len > 0 or
|
|
s.result_stderr.len > 0)
|
|
{
|
|
const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode);
|
|
defer io.unlockStderr();
|
|
printErrorMessages(gpa, s, .{}, stderr.terminal(), run.error_style, run.multiline_errors) catch {};
|
|
}
|
|
|
|
if (s.max_rss != 0) {
|
|
var dispatch_set: std.ArrayList(*Step) = .empty;
|
|
defer dispatch_set.deinit(gpa);
|
|
|
|
// Release our RSS claim and kick off some blocked steps if possible. We use `dispatch_set`
|
|
// as a staging buffer to avoid recursing into `makeStep` while `run.max_rss_mutex` is held.
|
|
{
|
|
try run.max_rss_mutex.lock(io);
|
|
defer run.max_rss_mutex.unlock(io);
|
|
run.available_rss += s.max_rss;
|
|
dispatch_set.ensureUnusedCapacity(gpa, run.memory_blocked_steps.items.len) catch @panic("OOM");
|
|
while (run.memory_blocked_steps.getLastOrNull()) |candidate| {
|
|
if (run.available_rss < candidate.max_rss) break;
|
|
assert(run.memory_blocked_steps.pop() == candidate);
|
|
dispatch_set.appendAssumeCapacity(candidate);
|
|
}
|
|
}
|
|
for (dispatch_set.items) |candidate| {
|
|
group.async(io, makeStep, .{ group, b, candidate, root_prog_node, run });
|
|
}
|
|
}
|
|
|
|
for (s.dependants.items) |dependant| {
|
|
// `.acq_rel` synchronizes with itself to ensure all dependencies' final states are visible when this hits 0.
|
|
if (@atomicRmw(u32, &dependant.pending_deps, .Sub, 1, .acq_rel) == 1) {
|
|
try stepReady(group, b, dependant, root_prog_node, run);
|
|
}
|
|
}
|
|
}
|
|
|
|
fn stepReady(
|
|
group: *Io.Group,
|
|
b: *std.Build,
|
|
s: *Step,
|
|
root_prog_node: std.Progress.Node,
|
|
run: *Run,
|
|
) !void {
|
|
const io = b.graph.io;
|
|
if (s.max_rss != 0) {
|
|
try run.max_rss_mutex.lock(io);
|
|
defer run.max_rss_mutex.unlock(io);
|
|
if (run.available_rss < s.max_rss) {
|
|
// Running this step right now could possibly exceed the allotted RSS.
|
|
run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM");
|
|
return;
|
|
}
|
|
run.available_rss -= s.max_rss;
|
|
}
|
|
group.async(io, makeStep, .{ group, b, s, root_prog_node, run });
|
|
}
|
|
|
|
pub fn printErrorMessages(
|
|
gpa: Allocator,
|
|
failing_step: *Step,
|
|
options: std.zig.ErrorBundle.RenderOptions,
|
|
stderr: Io.Terminal,
|
|
error_style: ErrorStyle,
|
|
multiline_errors: MultilineErrors,
|
|
) !void {
|
|
const writer = stderr.writer;
|
|
if (error_style.verboseContext()) {
|
|
// Provide context for where these error messages are coming from by
|
|
// printing the corresponding Step subtree.
|
|
var step_stack: std.ArrayList(*Step) = .empty;
|
|
defer step_stack.deinit(gpa);
|
|
try step_stack.append(gpa, failing_step);
|
|
while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
|
|
try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]);
|
|
}
|
|
|
|
// Now, `step_stack` has the subtree that we want to print, in reverse order.
|
|
try stderr.setColor(.dim);
|
|
var indent: usize = 0;
|
|
while (step_stack.pop()) |s| : (indent += 1) {
|
|
if (indent > 0) {
|
|
try writer.splatByteAll(' ', (indent - 1) * 3);
|
|
try printChildNodePrefix(stderr);
|
|
}
|
|
|
|
try writer.writeAll(s.name);
|
|
|
|
if (s == failing_step) {
|
|
try printStepFailure(s, stderr, true);
|
|
} else {
|
|
try writer.writeAll("\n");
|
|
}
|
|
}
|
|
try stderr.setColor(.reset);
|
|
} else {
|
|
// Just print the failing step itself.
|
|
try stderr.setColor(.dim);
|
|
try writer.writeAll(failing_step.name);
|
|
try printStepFailure(failing_step, stderr, true);
|
|
try stderr.setColor(.reset);
|
|
}
|
|
|
|
if (failing_step.result_stderr.len > 0) {
|
|
try writer.writeAll(failing_step.result_stderr);
|
|
if (!mem.endsWith(u8, failing_step.result_stderr, "\n")) {
|
|
try writer.writeAll("\n");
|
|
}
|
|
}
|
|
|
|
try failing_step.result_error_bundle.renderToTerminal(options, stderr);
|
|
|
|
for (failing_step.result_error_msgs.items) |msg| {
|
|
try stderr.setColor(.red);
|
|
try writer.writeAll("error:");
|
|
try stderr.setColor(.reset);
|
|
if (std.mem.indexOfScalar(u8, msg, '\n') == null) {
|
|
try writer.print(" {s}\n", .{msg});
|
|
} else switch (multiline_errors) {
|
|
.indent => {
|
|
var it = std.mem.splitScalar(u8, msg, '\n');
|
|
try writer.print(" {s}\n", .{it.first()});
|
|
while (it.next()) |line| {
|
|
try writer.print(" {s}\n", .{line});
|
|
}
|
|
},
|
|
.newline => try writer.print("\n{s}\n", .{msg}),
|
|
.none => try writer.print(" {s}\n", .{msg}),
|
|
}
|
|
}
|
|
|
|
if (error_style.verboseContext()) {
|
|
if (failing_step.result_failed_command) |cmd_str| {
|
|
try stderr.setColor(.red);
|
|
try writer.writeAll("failed command: ");
|
|
try stderr.setColor(.reset);
|
|
try writer.writeAll(cmd_str);
|
|
try writer.writeByte('\n');
|
|
}
|
|
}
|
|
|
|
try writer.writeByte('\n');
|
|
}
|
|
|
|
fn printSteps(builder: *std.Build, w: *Writer) !void {
|
|
const arena = builder.graph.arena;
|
|
for (builder.top_level_steps.values()) |top_level_step| {
|
|
const name = if (&top_level_step.step == builder.default_step)
|
|
try fmt.allocPrint(arena, "{s} (default)", .{top_level_step.step.name})
|
|
else
|
|
top_level_step.step.name;
|
|
try w.print(" {s:<28} {s}\n", .{ name, top_level_step.description });
|
|
}
|
|
}
|
|
|
|
fn printUsage(b: *std.Build, w: *Writer) !void {
|
|
const arena = b.graph.arena;
|
|
|
|
try w.print(
|
|
\\Usage: {s} build [steps] [options]
|
|
\\
|
|
\\Steps:
|
|
\\
|
|
, .{b.graph.zig_exe});
|
|
try printSteps(b, w);
|
|
try w.writeAll(
|
|
\\
|
|
\\Project-Specific Options:
|
|
\\
|
|
);
|
|
|
|
if (b.available_options_list.items.len == 0) {
|
|
try w.print(" (none)\n", .{});
|
|
} else {
|
|
for (b.available_options_list.items) |option| {
|
|
const name = try fmt.allocPrint(arena, " -D{s}=[{t}]", .{ option.name, option.type_id });
|
|
try w.print("{s:<30} {s}\n", .{ name, option.description });
|
|
if (option.enum_options) |enum_options| {
|
|
const padding = " " ** 33;
|
|
try w.writeAll(padding ++ "Supported Values:\n");
|
|
for (enum_options) |enum_option| {
|
|
try w.print(padding ++ " {s}\n", .{enum_option});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
try w.writeAll(
|
|
\\
|
|
\\System Integration Options:
|
|
\\ --search-prefix [path] Add a path to look for binaries, libraries, headers
|
|
\\ --sysroot [path] Set the system root directory (usually /)
|
|
\\ --libc [file] Provide a file which specifies libc paths
|
|
\\
|
|
\\ --system [pkgdir] Disable package fetching; enable all integrations
|
|
\\ -fsys=[name] Enable a system integration
|
|
\\ -fno-sys=[name] Disable a system integration
|
|
\\
|
|
\\ -fdarling, -fno-darling Integration with system-installed Darling to
|
|
\\ execute macOS programs on Linux hosts
|
|
\\ (default: no)
|
|
\\ -fqemu, -fno-qemu Integration with system-installed QEMU to execute
|
|
\\ foreign-architecture programs on Linux hosts
|
|
\\ (default: no)
|
|
\\ --libc-runtimes [path] Enhances QEMU integration by providing dynamic libc
|
|
\\ (e.g. glibc or musl) built for multiple foreign
|
|
\\ architectures, allowing execution of non-native
|
|
\\ programs that link with libc.
|
|
\\ -frosetta, -fno-rosetta Rely on Rosetta to execute x86_64 programs on
|
|
\\ ARM64 macOS hosts. (default: no)
|
|
\\ -fwasmtime, -fno-wasmtime Integration with system-installed wasmtime to
|
|
\\ execute WASI binaries. (default: no)
|
|
\\ -fwine, -fno-wine Integration with system-installed Wine to execute
|
|
\\ Windows programs on Linux hosts. (default: no)
|
|
\\
|
|
\\ Available System Integrations: Enabled:
|
|
\\
|
|
);
|
|
if (b.graph.system_library_options.entries.len == 0) {
|
|
try w.writeAll(" (none) -\n");
|
|
} else {
|
|
for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| {
|
|
const status = switch (v) {
|
|
.declared_enabled => "yes",
|
|
.declared_disabled => "no",
|
|
.user_enabled, .user_disabled => unreachable, // already emitted error
|
|
};
|
|
try w.print(" {s:<43} {s}\n", .{ k, status });
|
|
}
|
|
}
|
|
|
|
try w.writeAll(
|
|
\\
|
|
\\General Options:
|
|
\\ -h, --help Print this help and exit
|
|
\\ -l, --list-steps Print available steps
|
|
\\
|
|
\\ -p, --prefix [path] Where to install files (default: zig-out)
|
|
\\ --prefix-lib-dir [path] Where to install libraries
|
|
\\ --prefix-exe-dir [path] Where to install executables
|
|
\\ --prefix-include-dir [path] Where to install C header files
|
|
\\ --release[=mode] Request release mode, optionally specifying a
|
|
\\ preferred optimization mode: fast, safe, small
|
|
\\
|
|
\\ --verbose Print commands before executing them
|
|
\\ --color [auto|off|on] Enable or disable colored error messages
|
|
\\ --error-style [style] Control how build errors are printed
|
|
\\ verbose (Default) Report errors with full context
|
|
\\ minimal Report errors after summary, excluding context like command lines
|
|
\\ verbose_clear Like 'verbose', but clear the terminal at the start of each update
|
|
\\ minimal_clear Like 'minimal', but clear the terminal at the start of each update
|
|
\\ --multiline-errors [style] Control how multi-line error messages are printed
|
|
\\ indent (Default) Indent non-initial lines to align with initial line
|
|
\\ newline Include a leading newline so that the error message is on its own lines
|
|
\\ none Print as usual so the first line is misaligned
|
|
\\ --summary [mode] Control the printing of the build summary
|
|
\\ all Print the build summary in its entirety
|
|
\\ new Omit cached steps
|
|
\\ failures (Default if short-lived) Only print failed steps
|
|
\\ line (Default if long-lived) Only print the single-line summary
|
|
\\ none Do not print the build summary
|
|
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
|
|
\\ --maxrss <bytes> Limit memory usage (default is to use available memory)
|
|
\\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss
|
|
\\ --test-timeout <timeout> Limit execution time of unit tests, terminating if exceeded.
|
|
\\ The timeout must include a unit: ns, us, ms, s, m, h
|
|
\\ --watch Continuously rebuild when source files are modified
|
|
\\ --debounce <ms> Delay before rebuilding after changed file detected
|
|
\\ --webui[=ip] Enable the web interface on the given IP address
|
|
\\ --fuzz[=limit] Continuously search for unit test failures with an optional
|
|
\\ limit to the max number of iterations. The argument supports
|
|
\\ an optional 'K', 'M', or 'G' suffix (e.g. '10K'). Implies
|
|
\\ '--webui' when no limit is specified.
|
|
\\ --time-report Force full rebuild and provide detailed information on
|
|
\\ compilation time of Zig source code (implies '--webui')
|
|
\\ -fincremental Enable incremental compilation
|
|
\\ -fno-incremental Disable incremental compilation
|
|
\\
|
|
\\Package Management Options:
|
|
\\ --fetch[=mode] Fetch dependency tree (optionally choose laziness) and exit
|
|
\\ needed (Default) Lazy dependencies are fetched as needed
|
|
\\ all Lazy dependencies are always fetched
|
|
\\ --fork=[path] Override one or more projects from dependency tree
|
|
\\
|
|
\\Advanced Options:
|
|
\\ -freference-trace[=num] How many lines of reference trace should be shown per compile error
|
|
\\ -fno-reference-trace Disable reference trace
|
|
\\ -fallow-so-scripts Allows .so files to be GNU ld scripts
|
|
\\ -fno-allow-so-scripts (default) .so files must be ELF files
|
|
\\ --build-file [file] Override path to build.zig
|
|
\\ --cache-dir [path] Override path to local Zig cache directory
|
|
\\ --global-cache-dir [path] Override path to global Zig cache directory
|
|
\\ --zig-lib-dir [arg] Override path to Zig lib directory
|
|
\\ --build-runner [file] Override path to build runner
|
|
\\ --seed [integer] For shuffling dependency traversal order (default: random)
|
|
\\ --build-id[=style] At a minor link-time expense, embeds a build ID in binaries
|
|
\\ fast 8-byte non-cryptographic hash (COFF, ELF, WASM)
|
|
\\ sha1, tree 20-byte cryptographic hash (ELF, WASM)
|
|
\\ md5 16-byte cryptographic hash (ELF)
|
|
\\ uuid 16-byte random UUID (ELF, WASM)
|
|
\\ 0x[hexstring] Constant ID, maximum 32 bytes (ELF, WASM)
|
|
\\ none (default) No build ID
|
|
\\ --debug-log [scope] Enable debugging the compiler
|
|
\\ --debug-pkg-config Fail if unknown pkg-config flags encountered
|
|
\\ --debug-rt Debug compiler runtime libraries
|
|
\\ --verbose-link Enable compiler debug output for linking
|
|
\\ --verbose-air Enable compiler debug output for Zig AIR
|
|
\\ --verbose-llvm-ir[=file] Enable compiler debug output for LLVM IR
|
|
\\ --verbose-llvm-bc=[file] Enable compiler debug output for LLVM BC
|
|
\\ --verbose-cimport Enable compiler debug output for C imports
|
|
\\ --verbose-cc Enable compiler debug output for C compilation
|
|
\\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features
|
|
\\
|
|
);
|
|
}
|
|
|
|
fn nextArg(args: []const [:0]const u8, idx: *usize) ?[:0]const u8 {
|
|
if (idx.* >= args.len) return null;
|
|
defer idx.* += 1;
|
|
return args[idx.*];
|
|
}
|
|
|
|
fn nextArgOrFatal(args: []const [:0]const u8, idx: *usize) [:0]const u8 {
|
|
return nextArg(args, idx) orelse {
|
|
std.debug.print("expected argument after '{s}'\n access the help menu with 'zig build -h'\n", .{args[idx.* - 1]});
|
|
process.exit(1);
|
|
};
|
|
}
|
|
|
|
fn argsRest(args: []const [:0]const u8, idx: usize) ?[]const [:0]const u8 {
|
|
if (idx >= args.len) return null;
|
|
return args[idx..];
|
|
}
|
|
|
|
const Color = std.zig.Color;
|
|
const ErrorStyle = enum {
|
|
verbose,
|
|
minimal,
|
|
verbose_clear,
|
|
minimal_clear,
|
|
fn verboseContext(s: ErrorStyle) bool {
|
|
return switch (s) {
|
|
.verbose, .verbose_clear => true,
|
|
.minimal, .minimal_clear => false,
|
|
};
|
|
}
|
|
fn clearOnUpdate(s: ErrorStyle) bool {
|
|
return switch (s) {
|
|
.verbose, .minimal => false,
|
|
.verbose_clear, .minimal_clear => true,
|
|
};
|
|
}
|
|
};
|
|
const MultilineErrors = enum { indent, newline, none };
|
|
const Summary = enum { all, new, failures, line, none };
|
|
|
|
fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn {
|
|
std.debug.print(f ++ "\n access the help menu with 'zig build -h'\n", args);
|
|
process.exit(1);
|
|
}
|
|
|
|
fn validateSystemLibraryOptions(b: *std.Build) void {
|
|
var bad = false;
|
|
for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| {
|
|
switch (v) {
|
|
.user_disabled, .user_enabled => {
|
|
// The user tried to enable or disable a system library integration, but
|
|
// the build script did not recognize that option.
|
|
std.debug.print("system library name not recognized by build script: '{s}'\n", .{k});
|
|
bad = true;
|
|
},
|
|
.declared_disabled, .declared_enabled => {},
|
|
}
|
|
}
|
|
if (bad) {
|
|
std.debug.print(" access the help menu with 'zig build -h'\n", .{});
|
|
process.exit(1);
|
|
}
|
|
}
|
|
|
|
/// Starting from all top-level steps in `b`, traverses the entire step graph
|
|
/// and adds all step dependencies implied by module graphs.
|
|
fn createModuleDependencies(b: *std.Build) Allocator.Error!void {
|
|
const arena = b.graph.arena;
|
|
|
|
var all_steps: std.AutoArrayHashMapUnmanaged(*Step, void) = .empty;
|
|
var next_step_idx: usize = 0;
|
|
|
|
try all_steps.ensureUnusedCapacity(arena, b.top_level_steps.count());
|
|
for (b.top_level_steps.values()) |tls| {
|
|
all_steps.putAssumeCapacityNoClobber(&tls.step, {});
|
|
}
|
|
|
|
while (next_step_idx < all_steps.count()) {
|
|
const step = all_steps.keys()[next_step_idx];
|
|
next_step_idx += 1;
|
|
|
|
// Set up any implied dependencies for this step. It's important that we do this first, so
|
|
// that the loop below discovers steps implied by the module graph.
|
|
try createModuleDependenciesForStep(step);
|
|
|
|
try all_steps.ensureUnusedCapacity(arena, step.dependencies.items.len);
|
|
for (step.dependencies.items) |other_step| {
|
|
all_steps.putAssumeCapacity(other_step, {});
|
|
}
|
|
}
|
|
}
|
|
|
|
/// If the given `Step` is a `Step.Compile`, adds any dependencies for that step which
|
|
/// are implied by the module graph rooted at `step.cast(Step.Compile).?.root_module`.
|
|
fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void {
|
|
const root_module = if (step.cast(Step.Compile)) |cs| root: {
|
|
break :root cs.root_module;
|
|
} else return; // not a compile step so no module dependencies
|
|
|
|
// Starting from `root_module`, discover all modules in this graph.
|
|
const modules = root_module.getGraph().modules;
|
|
|
|
// For each of those modules, set up the implied step dependencies.
|
|
for (modules) |mod| {
|
|
if (mod.root_source_file) |lp| lp.addStepDependencies(step);
|
|
for (mod.include_dirs.items) |include_dir| switch (include_dir) {
|
|
.path,
|
|
.path_system,
|
|
.path_after,
|
|
.framework_path,
|
|
.framework_path_system,
|
|
.embed_path,
|
|
=> |lp| lp.addStepDependencies(step),
|
|
|
|
.other_step => |other| {
|
|
other.getEmittedIncludeTree().addStepDependencies(step);
|
|
step.dependOn(&other.step);
|
|
},
|
|
|
|
.config_header_step => |other| step.dependOn(&other.step),
|
|
};
|
|
for (mod.lib_paths.items) |lp| lp.addStepDependencies(step);
|
|
for (mod.rpaths.items) |rpath| switch (rpath) {
|
|
.lazy_path => |lp| lp.addStepDependencies(step),
|
|
.special => {},
|
|
};
|
|
for (mod.link_objects.items) |link_object| switch (link_object) {
|
|
.static_path,
|
|
.assembly_file,
|
|
=> |lp| lp.addStepDependencies(step),
|
|
.other_step => |other| step.dependOn(&other.step),
|
|
.system_lib => {},
|
|
.c_source_file => |source| source.file.addStepDependencies(step),
|
|
.c_source_files => |source_files| source_files.root.addStepDependencies(step),
|
|
.win32_resource_file => |rc_source| {
|
|
rc_source.file.addStepDependencies(step);
|
|
for (rc_source.include_paths) |lp| lp.addStepDependencies(step);
|
|
},
|
|
};
|
|
}
|
|
}
|
|
|
|
var stdio_buffer_allocation: [256]u8 = undefined;
|
|
var stdout_writer_allocation: Io.File.Writer = undefined;
|
|
|
|
fn initStdoutWriter(io: Io) *Writer {
|
|
stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation);
|
|
return &stdout_writer_allocation.interface;
|
|
}
|
|
|
|
fn cleanTmpFiles(io: Io, steps: []const *Step) void {
|
|
for (steps) |step| {
|
|
const wf = step.cast(std.Build.Step.WriteFile) orelse continue;
|
|
if (wf.mode != .tmp) continue;
|
|
const path = wf.generated_directory.path orelse continue;
|
|
Io.Dir.cwd().deleteTree(io, path) catch |err| {
|
|
std.log.warn("failed to delete {s}: {t}", .{ path, err });
|
|
};
|
|
}
|
|
}
|