std.debug: simplify printLineFromFile

This commit is contained in:
Andrew Kelley 2025-12-08 16:41:24 -08:00
parent 9ccd68de0b
commit 314c906dba
9 changed files with 46 additions and 79 deletions

View file

@ -824,7 +824,7 @@ fn runStepNames(
}
if (@bitSizeOf(usize) != 64) {
// Current implementation depends on posix.mmap()'s second parameter, `length: usize`,
// being compatible with `std.fs.getEndPos() u64`'s return value. This is not the case
// being compatible with file system's u64 return value. This is not the case
// on 32-bit platforms.
// Affects or affected by issues #5185, #22523, and #22464.
fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)});

View file

@ -413,7 +413,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
};
defer coverage_file.close(io);
const file_size = coverage_file.getEndPos() catch |err| {
const file_size = coverage_file.length(io) catch |err| {
log.err("unable to check len of coverage file '{f}': {t}", .{ coverage_file_path, err });
return error.AlreadyReported;
};

View file

@ -47,7 +47,7 @@ test "write a file, read it, then delete it" {
var file = try tmp.dir.openFile(io, tmp_file_name, .{});
defer file.close(io);
const file_size = try file.getEndPos();
const file_size = try file.length(io);
const expected_file_size: u64 = "begin".len + data.len + "end".len;
try expectEqual(expected_file_size, file_size);
@ -77,7 +77,7 @@ test "File seek ops" {
// Seek to the end
try file.seekFromEnd(0);
try expect((try file.getPos()) == try file.getEndPos());
try expect((try file.getPos()) == try file.length(io));
// Negative delta
try file.seekBy(-4096);
try expect((try file.getPos()) == 4096);
@ -100,17 +100,17 @@ test "setEndPos" {
defer file.close(io);
// Verify that the file size changes and the file offset is not moved
try expect((try file.getEndPos()) == 0);
try expect((try file.length(io)) == 0);
try expect((try file.getPos()) == 0);
try file.setEndPos(8192);
try expect((try file.getEndPos()) == 8192);
try expect((try file.length(io)) == 8192);
try expect((try file.getPos()) == 0);
try file.seekTo(100);
try file.setEndPos(4096);
try expect((try file.getEndPos()) == 4096);
try expect((try file.length(io)) == 4096);
try expect((try file.getPos()) == 100);
try file.setEndPos(0);
try expect((try file.getEndPos()) == 0);
try expect((try file.length(io)) == 0);
try expect((try file.getPos()) == 100);
}

View file

@ -558,9 +558,9 @@ pub fn defaultPanic(
stderr.print("{s}\n", .{msg}) catch break :trace;
if (@errorReturnTrace()) |t| if (t.index > 0) {
stderr.writeStreamingAll("error return context:\n") catch break :trace;
stderr.writeAll("error return context:\n") catch break :trace;
writeStackTrace(t, stderr, tty_config) catch break :trace;
stderr.writeStreamingAll("\nstack trace:\n") catch break :trace;
stderr.writeAll("\nstack trace:\n") catch break :trace;
};
writeCurrentStackTrace(.{
.first_address = first_trace_addr orelse @returnAddress(),
@ -617,6 +617,8 @@ pub const StackUnwindOptions = struct {
///
/// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it.
pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) StackTrace {
var threaded: Io.Threaded = .init_single_threaded;
const io = threaded.ioBasic();
const empty_trace: StackTrace = .{ .index = 0, .instruction_addresses = &.{} };
if (!std.options.allow_stack_tracing) return empty_trace;
var it: StackIterator = .init(options.context);
@ -628,7 +630,7 @@ pub noinline fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf:
// Ideally, we would iterate the whole stack so that the `index` in the returned trace was
// indicative of how many frames were skipped. However, this has a significant runtime cost
// in some cases, so at least for now, we don't do that.
while (index < addr_buf.len) switch (it.next()) {
while (index < addr_buf.len) switch (it.next(io)) {
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
.end => break,
.frame => |ret_addr| {
@ -684,7 +686,7 @@ pub noinline fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Wri
var total_frames: usize = 0;
var wait_for = options.first_address;
var printed_any_frame = false;
while (true) switch (it.next()) {
while (true) switch (it.next(io)) {
.switch_to_fp => |unwind_error| {
switch (StackIterator.fp_usability) {
.useless, .unsafe => {},
@ -1196,54 +1198,26 @@ fn printLineFromFile(io: Io, writer: *Writer, source_location: SourceLocation) !
// Need this to always block even in async I/O mode, because this could potentially
// be called from e.g. the event loop code crashing.
const cwd: Io.Dir = .cwd();
var f = try cwd.openFile(io, source_location.file_name, .{});
defer f.close(io);
var file = try cwd.openFile(io, source_location.file_name, .{});
defer file.close(io);
// TODO fstat and make sure that the file has the correct size
var buf: [4096]u8 = undefined;
var amt_read = try f.read(buf[0..]);
const line_start = seek: {
var current_line_start: usize = 0;
var next_line: usize = 1;
while (next_line != source_location.line) {
const slice = buf[current_line_start..amt_read];
if (mem.findScalar(u8, slice, '\n')) |pos| {
next_line += 1;
if (pos == slice.len - 1) {
amt_read = try f.read(buf[0..]);
current_line_start = 0;
} else current_line_start += pos + 1;
} else if (amt_read < buf.len) {
return error.EndOfFile;
} else {
amt_read = try f.read(buf[0..]);
current_line_start = 0;
}
var buffer: [4096]u8 = undefined;
var file_reader: File.Reader = .init(file, io, &buffer);
const r = &file_reader.interface;
var line_index: usize = 0;
while (r.takeDelimiterExclusive('\n')) |line| {
line_index += 1;
if (line_index == source_location.line) {
// TODO delete hard tabs from the language
mem.replaceScalar(u8, line, '\t', ' ');
try writer.writeAll(line);
// Make sure printing last line of file inserts extra newline.
try writer.writeByte('\n');
return;
}
break :seek current_line_start;
};
const slice = buf[line_start..amt_read];
if (mem.findScalar(u8, slice, '\n')) |pos| {
const line = slice[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' ');
return writer.writeAll(line);
} else { // Line is the last inside the buffer, and requires another read to find delimiter. Alternatively the file ends.
mem.replaceScalar(u8, slice, '\t', ' ');
try writer.writeAll(slice);
while (amt_read == buf.len) {
amt_read = try f.read(buf[0..]);
if (mem.findScalar(u8, buf[0..amt_read], '\n')) |pos| {
const line = buf[0 .. pos + 1];
mem.replaceScalar(u8, line, '\t', ' ');
return writer.writeAll(line);
} else {
const line = buf[0..amt_read];
mem.replaceScalar(u8, line, '\t', ' ');
try writer.writeAll(line);
}
}
// Make sure printing last line of file inserts extra newline
try writer.writeByte('\n');
} else |err| {
return err;
}
}
@ -1598,7 +1572,7 @@ pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContex
// We're still holding the mutex but that's fine as we're going to
// call abort().
const stderr, _ = lockStderrWriter(&.{});
stderr().writeAll("aborting due to recursive panic\n") catch {};
stderr.writeAll("aborting due to recursive panic\n") catch {};
},
else => {}, // Panicked while printing the recursive panic message.
}

View file

@ -123,6 +123,7 @@ pub const LoadError = error{
pub fn load(
gpa: Allocator,
io: Io,
elf_file: Io.File,
opt_build_id: ?[]const u8,
di_search_paths: *const DebugInfoSearchPaths,
@ -131,7 +132,7 @@ pub fn load(
errdefer arena_instance.deinit();
const arena = arena_instance.allocator();
var result = loadInner(arena, elf_file, null) catch |err| switch (err) {
var result = loadInner(arena, io, elf_file, null) catch |err| switch (err) {
error.CrcMismatch => unreachable, // we passed crc as null
else => |e| return e,
};
@ -156,7 +157,7 @@ pub fn load(
if (build_id.len < 3) break :build_id;
for (di_search_paths.global_debug) |global_debug| {
if (try loadSeparateDebugFile(arena, &result, null, "{s}/.build-id/{x}/{x}.debug", .{
if (try loadSeparateDebugFile(arena, io, &result, null, "{s}/.build-id/{x}/{x}.debug", .{
global_debug,
build_id[0..1],
build_id[1..],
@ -164,7 +165,7 @@ pub fn load(
}
if (di_search_paths.debuginfod_client) |components| {
if (try loadSeparateDebugFile(arena, &result, null, "{s}{s}/{x}/debuginfo", .{
if (try loadSeparateDebugFile(arena, io, &result, null, "{s}{s}/{x}/debuginfo", .{
components[0],
components[1],
build_id,
@ -181,18 +182,18 @@ pub fn load(
const exe_dir = di_search_paths.exe_dir orelse break :debug_link;
if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}", .{
if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}", .{
exe_dir,
debug_filename,
})) |mapped| break :load_di mapped;
if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/.debug/{s}", .{
if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/.debug/{s}", .{
exe_dir,
debug_filename,
})) |mapped| break :load_di mapped;
for (di_search_paths.global_debug) |global_debug| {
// This looks like a bug; it isn't. They really do embed the absolute path to the
// exe's dirname, *under* the global debug path.
if (try loadSeparateDebugFile(arena, &result, debug_crc, "{s}/{s}/{s}", .{
if (try loadSeparateDebugFile(arena, io, &result, debug_crc, "{s}/{s}/{s}", .{
global_debug,
exe_dir,
debug_filename,
@ -378,7 +379,7 @@ fn loadSeparateDebugFile(
const elf_file = Io.Dir.cwd().openFile(io, path, .{}) catch return null;
defer elf_file.close(io);
const result = loadInner(arena, elf_file, opt_crc) catch |err| switch (err) {
const result = loadInner(arena, io, elf_file, opt_crc) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
error.CrcMismatch => return null,
else => return null,
@ -423,13 +424,14 @@ const LoadInnerResult = struct {
};
fn loadInner(
arena: Allocator,
io: Io,
elf_file: Io.File,
opt_crc: ?u32,
) (LoadError || error{ CrcMismatch, Streaming, Canceled })!LoadInnerResult {
const mapped_mem: []align(std.heap.page_size_min) const u8 = mapped: {
const file_len = std.math.cast(
usize,
elf_file.getEndPos() catch |err| switch (err) {
elf_file.length(io) catch |err| switch (err) {
error.PermissionDenied => unreachable, // not asking for PROT_EXEC
else => |e| return e,
},

View file

@ -520,7 +520,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c
const file_len = std.math.cast(
usize,
file.getEndPos() catch return error.ReadFailed,
file.length(io) catch return error.ReadFailed,
) orelse return error.ReadFailed;
return posix.mmap(

View file

@ -326,7 +326,7 @@ const Module = struct {
const load_result = if (mod.name.len > 0) res: {
var file = Io.Dir.cwd().openFile(io, mod.name, .{}) catch return error.MissingDebugInfo;
defer file.close(io);
break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name));
break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(mod.name));
} else res: {
const path = std.process.executablePathAlloc(io, gpa) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
@ -335,7 +335,7 @@ const Module = struct {
defer gpa.free(path);
var file = Io.Dir.cwd().openFile(io, path, .{}) catch return error.MissingDebugInfo;
defer file.close(io);
break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path));
break :res std.debug.ElfFile.load(gpa, io, file, mod.build_id, &.native(path));
};
var elf_file = load_result catch |err| switch (err) {

View file

@ -622,7 +622,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c
};
defer file.close(io);
const file_end_pos = file.getEndPos() catch |err| switch (err) {
const file_end_pos = file.length(io) catch |err| switch (err) {
error.Unexpected => |e| return e,
else => return error.ReadFailed,
};

View file

@ -163,15 +163,6 @@ pub const File = extern struct {
}
}
fn getEndPos(self: *File) SeekError!u64 {
const start_pos = try self.getPosition();
// ignore error
defer self.setPosition(start_pos) catch {};
try self.setPosition(end_of_file);
return self.getPosition();
}
pub fn setPosition(self: *File, position: u64) SeekError!void {
switch (self._set_position(self, position)) {
.success => {},