link: update to new file system APIs

This commit is contained in:
Andrew Kelley 2025-12-12 16:30:44 -08:00
parent 22b0eea3c0
commit 54e4a3456c
26 changed files with 489 additions and 358 deletions

View file

@ -494,7 +494,14 @@ pub const WritePositionalError = Writer.Error || error{Unseekable};
/// See also:
/// * `writer`
pub fn writePositional(file: File, io: Io, buffer: []const []const u8, offset: u64) WritePositionalError!usize {
return io.vtable.fileWritePositional(io.userdata, file, buffer, offset);
return io.vtable.fileWritePositional(io.userdata, file, &.{}, buffer, 1, offset);
}
/// Equivalent to creating a positional writer, writing `bytes`, and then flushing.
pub fn writePositionalAll(file: File, io: Io, bytes: []const u8, offset: u64) WritePositionalError!void {
var index: usize = 0;
while (index < bytes.len)
index += try io.vtable.fileWritePositional(io.userdata, file, &.{}, &.{bytes[index..]}, 1, offset + index);
}
pub const SeekError = error{

View file

@ -620,7 +620,7 @@ pub const File = struct {
emit.sub_path, std.crypto.random.int(u32),
});
defer gpa.free(tmp_sub_path);
try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, .{});
try emit.root_dir.handle.copyFile(emit.sub_path, emit.root_dir.handle, tmp_sub_path, io, .{});
try emit.root_dir.handle.rename(tmp_sub_path, emit.root_dir.handle, emit.sub_path, io);
switch (builtin.os.tag) {
.linux => std.posix.ptrace(std.os.linux.PTRACE.ATTACH, pid, 0, 0) catch |err| {
@ -852,10 +852,12 @@ pub const File = struct {
}
}
pub fn releaseLock(self: *File) void {
if (self.lock) |*lock| {
lock.release();
self.lock = null;
pub fn releaseLock(base: *File) void {
const comp = base.comp;
const io = comp.io;
if (base.lock) |*lock| {
lock.release(io);
base.lock = null;
}
}
@ -908,6 +910,7 @@ pub const File = struct {
/// `arena` has the lifetime of the call to `Compilation.update`.
pub fn flush(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
const comp = base.comp;
const io = comp.io;
if (comp.clang_preprocessor_mode == .yes or comp.clang_preprocessor_mode == .pch) {
dev.check(.clang_command);
const emit = base.emit;
@ -918,12 +921,19 @@ pub const File = struct {
assert(comp.c_object_table.count() == 1);
const the_key = comp.c_object_table.keys()[0];
const cached_pp_file_path = the_key.status.success.object_path;
cached_pp_file_path.root_dir.handle.copyFile(cached_pp_file_path.sub_path, emit.root_dir.handle, emit.sub_path, .{}) catch |err| {
Io.Dir.copyFile(
cached_pp_file_path.root_dir.handle,
cached_pp_file_path.sub_path,
emit.root_dir.handle,
emit.sub_path,
io,
.{},
) catch |err| {
const diags = &base.comp.link_diags;
return diags.fail("failed to copy '{f}' to '{f}': {s}", .{
return diags.fail("failed to copy '{f}' to '{f}': {t}", .{
std.fmt.alt(@as(Path, cached_pp_file_path), .formatEscapeChar),
std.fmt.alt(@as(Path, emit), .formatEscapeChar),
@errorName(err),
err,
});
};
return;
@ -1119,7 +1129,7 @@ pub const File = struct {
const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
const buf = try gpa.alloc(u8, size);
defer gpa.free(buf);
const n = try file.preadAll(buf, 0);
const n = try file.readPositionalAll(io, buf, 0);
if (buf.len != n) return error.UnexpectedEndOfFile;
var ld_script = try LdScript.parse(gpa, diags, path, buf);
defer ld_script.deinit(gpa);
@ -1184,6 +1194,32 @@ pub const File = struct {
}
}
/// Legacy function for old linker code
pub fn copyRangeAll(base: *File, old_offset: u64, new_offset: u64, size: u64) !void {
const comp = base.comp;
const io = comp.io;
const file = base.file.?;
return copyRangeAll2(io, file, file, old_offset, new_offset, size);
}
/// Legacy function for old linker code
pub fn copyRangeAll2(io: Io, src_file: Io.File, dst_file: Io.File, old_offset: u64, new_offset: u64, size: u64) !void {
var write_buffer: [2048]u8 = undefined;
var file_reader = src_file.reader(io, &.{});
file_reader.pos = old_offset;
var file_writer = dst_file.writer(io, &write_buffer);
file_writer.pos = new_offset;
const size_u = std.math.cast(usize, size) orelse return error.Overflow;
const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
error.WriteFailed => return file_writer.err.?,
};
assert(n == size_u);
file_writer.interface.flush() catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};
}
pub const Tag = enum {
coff2,
elf,
@ -1243,7 +1279,7 @@ pub const File = struct {
// with 0o755 permissions, but it works appropriately if the system is configured
// more leniently. As another data point, C's fopen seems to open files with the
// 666 mode.
const executable_mode: Io.FilePermissions = if (builtin.target.os.tag == .windows)
const executable_mode: Io.File.Permissions = if (builtin.target.os.tag == .windows)
.default_file
else
.fromMode(0o777);

View file

@ -1,3 +1,22 @@
const Coff = @This();
const builtin = @import("builtin");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const codegen = @import("../codegen.zig");
const Compilation = @import("../Compilation.zig");
const InternPool = @import("../InternPool.zig");
const link = @import("../link.zig");
const MappedFile = @import("MappedFile.zig");
const target_util = @import("../target.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");
base: link.File,
mf: MappedFile,
nodes: std.MultiArrayList(Node),
@ -1729,22 +1748,20 @@ pub fn flush(
const comp = coff.base.comp;
if (comp.compiler_rt_dyn_lib) |crt_file| {
const gpa = comp.gpa;
const io = comp.io;
const compiler_rt_sub_path = try std.fs.path.join(gpa, &.{
std.fs.path.dirname(coff.base.emit.sub_path) orelse "",
std.fs.path.basename(crt_file.full_object_path.sub_path),
});
defer gpa.free(compiler_rt_sub_path);
crt_file.full_object_path.root_dir.handle.copyFile(
std.Io.Dir.copyFile(
crt_file.full_object_path.root_dir.handle,
crt_file.full_object_path.sub_path,
coff.base.emit.root_dir.handle,
compiler_rt_sub_path,
io,
.{},
) catch |err| switch (err) {
else => |e| return comp.link_diags.fail("Copy '{s}' failed: {s}", .{
compiler_rt_sub_path,
@errorName(e),
}),
};
) catch |err| return comp.link_diags.fail("copy '{s}' failed: {t}", .{ compiler_rt_sub_path, err });
}
}
@ -2461,19 +2478,3 @@ pub fn printNode(
}
}
}
const assert = std.debug.assert;
const builtin = @import("builtin");
const codegen = @import("../codegen.zig");
const Compilation = @import("../Compilation.zig");
const Coff = @This();
const InternPool = @import("../InternPool.zig");
const link = @import("../link.zig");
const log = std.log.scoped(.link);
const MappedFile = @import("MappedFile.zig");
const native_endian = builtin.cpu.arch.endian();
const std = @import("std");
const target_util = @import("../target.zig");
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");

View file

@ -48,6 +48,7 @@ pub const UpdateError = error{
EndOfStream,
Underflow,
UnexpectedEndOfFile,
NonResizable,
} ||
codegen.GenerateSymbolError ||
Io.File.OpenError ||
@ -155,11 +156,14 @@ const DebugInfo = struct {
fn declAbbrevCode(debug_info: *DebugInfo, unit: Unit.Index, entry: Entry.Index) !AbbrevCode {
const dwarf: *Dwarf = @fieldParentPtr("debug_info", debug_info);
const comp = dwarf.bin_file.comp;
const io = comp.io;
const unit_ptr = debug_info.section.getUnit(unit);
const entry_ptr = unit_ptr.getEntry(entry);
if (entry_ptr.len < AbbrevCode.decl_bytes) return .null;
var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined;
if (try dwarf.getFile().?.preadAll(
if (try dwarf.getFile().?.readPositionalAll(
io,
&abbrev_code_buf,
debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
) != abbrev_code_buf.len) return error.InputOutput;
@ -639,13 +643,10 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
const n = try dwarf.getFile().?.copyRangeAll(
sec.off(dwarf) + unit.off,
dwarf.getFile().?,
sec.off(dwarf) + new_off,
unit.len,
);
if (n != unit.len) return error.InputOutput;
const comp = dwarf.bin_file.comp;
const io = comp.io;
const file = dwarf.getFile().?;
try link.File.copyRangeAll2(io, file, file, sec.off(dwarf) + unit.off, sec.off(dwarf) + new_off, unit.len);
unit.off = new_off;
}
@ -675,10 +676,14 @@ const Unit = struct {
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off);
const comp = dwarf.bin_file.comp;
const io = comp.io;
try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
const comp = dwarf.bin_file.comp;
const io = comp.io;
const start = unit.off + unit.header_len + if (unit.last.unwrap()) |last_entry| end: {
const last_entry_ptr = unit.getEntry(last_entry);
break :end last_entry_ptr.off + last_entry_ptr.len;
@ -708,7 +713,7 @@ const Unit = struct {
assert(fw.end == extended_op_bytes + op_len_bytes);
fw.writeByte(DW.LNE.padding) catch unreachable;
assert(fw.end >= unit.trailer_len and fw.end <= len);
return dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + start);
return dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + start);
}
var trailer_aw: Writer.Allocating = try .initCapacity(dwarf.gpa, len);
defer trailer_aw.deinit();
@ -768,7 +773,7 @@ const Unit = struct {
assert(tw.end == unit.trailer_len);
tw.splatByteAll(fill_byte, len - unit.trailer_len) catch unreachable;
assert(tw.end == len);
try dwarf.getFile().?.pwriteAll(trailer_aw.written(), sec.off(dwarf) + start);
try dwarf.getFile().?.writePositionalAll(io, trailer_aw.written(), sec.off(dwarf) + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
@ -854,6 +859,8 @@ const Entry = struct {
dwarf: *Dwarf,
) (UpdateError || Writer.Error)!void {
assert(entry.len > 0);
const comp = dwarf.bin_file.comp;
const io = comp.io;
const start = entry.off + entry.len;
if (sec == &dwarf.debug_frame.section) {
const len = if (entry.next.unwrap()) |next_entry|
@ -863,11 +870,11 @@ const Entry = struct {
var unit_len_buf: [8]u8 = undefined;
const unit_len_bytes = unit_len_buf[0..dwarf.sectionOffsetBytes()];
dwarf.writeInt(unit_len_bytes, len - dwarf.unitLengthBytes());
try dwarf.getFile().?.pwriteAll(unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
try dwarf.getFile().?.writePositionalAll(io, unit_len_bytes, sec.off(dwarf) + unit.off + unit.header_len + entry.off);
const buf = try dwarf.gpa.alloc(u8, len - entry.len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.CFA.nop);
try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start);
try dwarf.getFile().?.writePositionalAll(io, buf, sec.off(dwarf) + unit.off + unit.header_len + start);
return;
}
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
@ -926,7 +933,7 @@ const Entry = struct {
},
} else assert(!sec.pad_entries_to_ideal and len == 0);
assert(fw.end <= len);
try dwarf.getFile().?.pwriteAll(fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
try dwarf.getFile().?.writePositionalAll(io, fw.buffered(), sec.off(dwarf) + unit.off + unit.header_len + start);
}
fn resize(
@ -969,11 +976,13 @@ const Entry = struct {
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == entry_ptr.len);
try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
const comp = dwarf.bin_file.comp;
const io = comp.io;
try dwarf.getFile().?.writePositionalAll(io, contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
_ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf));
_ = try dwarf.getFile().?.readPositionalAll(io, buf, sec.off(dwarf));
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
@ -4702,6 +4711,8 @@ fn updateContainerTypeWriterError(
}
pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedInst.Index) UpdateError!void {
const comp = dwarf.bin_file.comp;
const io = comp.io;
const ip = &zcu.intern_pool;
const inst_info = zir_index.resolveFull(ip).?;
@ -4721,7 +4732,7 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI
const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod.?) orelse return);
const entry = unit.getEntry(dwarf.decls.get(zir_index) orelse return);
try dwarf.getFile().?.pwriteAll(&line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
try dwarf.getFile().?.writePositionalAll(io, &line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
}
pub fn freeNav(dwarf: *Dwarf, nav_index: InternPool.Nav.Index) void {
@ -4758,6 +4769,8 @@ pub fn flush(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Error)!void {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const comp = dwarf.bin_file.comp;
const io = comp.io;
{
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, .anyerror_type);
@ -4977,7 +4990,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf));
try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_str.section.off(dwarf));
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
@ -5089,7 +5102,7 @@ fn flushWriterError(dwarf: *Dwarf, pt: Zcu.PerThread) (FlushError || Writer.Erro
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf));
try dwarf.getFile().?.writePositionalAll(io, contents, dwarf.debug_line_str.section.off(dwarf));
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_loclists.section.dirty) {
@ -6411,9 +6424,11 @@ fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
}
fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void {
const comp = dwarf.bin_file.comp;
const io = comp.io;
var buf: [8]u8 = undefined;
dwarf.writeInt(buf[0..size], target);
try dwarf.getFile().?.pwriteAll(buf[0..size], source);
try dwarf.getFile().?.writePositionalAll(io, buf[0..size], source);
}
fn unitLengthBytes(dwarf: *Dwarf) u32 {

View file

@ -582,14 +582,7 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment:
new_offset,
});
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
// TODO figure out what to about this error condition - how to communicate it up.
if (amt != existing_size) return error.InputOutput;
try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
shdr.sh_offset = new_offset;
} else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
@ -745,7 +738,7 @@ pub fn loadInput(self: *Elf, input: link.Input) !void {
.res => unreachable,
.dso_exact => @panic("TODO"),
.object => |obj| try parseObject(self, obj),
.archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
.archive => |obj| try parseArchive(gpa, io, diags, &self.file_handles, &self.files, target, debug_fmt_strip, default_sym_version, &self.objects, obj, is_static_lib),
.dso => |dso| try parseDso(gpa, io, diags, dso, &self.shared_objects, &self.files, target),
}
}
@ -1055,9 +1048,11 @@ fn dumpArgvInit(self: *Elf, arena: Allocator) !void {
}
pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
const diags = &self.base.comp.link_diags;
const obj = link.openObject(path, false, false) catch |err| {
switch (diags.failParse(path, "failed to open object: {s}", .{@errorName(err)})) {
const comp = self.base.comp;
const io = comp.io;
const diags = &comp.link_diags;
const obj = link.openObject(io, path, false, false) catch |err| {
switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}
};
@ -1065,10 +1060,11 @@ pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
}
fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
const diags = &self.base.comp.link_diags;
const comp = self.base.comp;
const diags = &comp.link_diags;
self.parseObject(obj) catch |err| switch (err) {
error.LinkFailure => return, // already reported
else => |e| diags.addParseError(obj.path, "failed to parse object: {s}", .{@errorName(e)}),
else => |e| diags.addParseError(obj.path, "failed to parse object: {t}", .{e}),
};
}
@ -1076,10 +1072,12 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.comp.gpa;
const diags = &self.base.comp.link_diags;
const target = &self.base.comp.root_mod.resolved_target.result;
const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
const comp = self.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const diags = &comp.link_diags;
const target = &comp.root_mod.resolved_target.result;
const debug_fmt_strip = comp.config.debug_format == .strip;
const default_sym_version = self.default_sym_version;
const file_handles = &self.file_handles;
@ -1098,14 +1096,15 @@ fn parseObject(self: *Elf, obj: link.Input.Object) !void {
try self.objects.append(gpa, index);
const object = self.file(index).?.object;
try object.parseCommon(gpa, diags, obj.path, handle, target);
try object.parseCommon(gpa, io, diags, obj.path, handle, target);
if (!self.base.isStaticLib()) {
try object.parse(gpa, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
try object.parse(gpa, io, diags, obj.path, handle, target, debug_fmt_strip, default_sym_version);
}
}
fn parseArchive(
gpa: Allocator,
io: Io,
diags: *Diags,
file_handles: *std.ArrayList(File.Handle),
files: *std.MultiArrayList(File.Entry),
@ -1120,7 +1119,7 @@ fn parseArchive(
defer tracy.end();
const fh = try addFileHandle(gpa, file_handles, obj.file);
var archive = try Archive.parse(gpa, diags, file_handles, obj.path, fh);
var archive = try Archive.parse(gpa, io, diags, file_handles, obj.path, fh);
defer archive.deinit(gpa);
const init_alive = if (is_static_lib) true else obj.must_link;
@ -1131,9 +1130,9 @@ fn parseArchive(
const object = &files.items(.data)[index].object;
object.index = index;
object.alive = init_alive;
try object.parseCommon(gpa, diags, obj.path, obj.file, target);
try object.parseCommon(gpa, io, diags, obj.path, obj.file, target);
if (!is_static_lib)
try object.parse(gpa, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
try object.parse(gpa, io, diags, obj.path, obj.file, target, debug_fmt_strip, default_sym_version);
try objects.append(gpa, index);
}
}
@ -1153,7 +1152,7 @@ fn parseDso(
const handle = dso.file;
const stat = Stat.fromFs(try handle.stat(io));
var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
var header = try SharedObject.parseHeader(gpa, io, diags, dso.path, handle, stat, target);
defer header.deinit(gpa);
const soname = header.soname() orelse dso.path.basename();
@ -1167,7 +1166,7 @@ fn parseDso(
gop.value_ptr.* = index;
var parsed = try SharedObject.parse(gpa, &header, handle);
var parsed = try SharedObject.parse(gpa, io, &header, handle);
errdefer parsed.deinit(gpa);
const duped_path: Path = .{
@ -2897,13 +2896,7 @@ pub fn allocateAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
// Get size actually commited to the output file.
const existing_size = self.sectionSize(shndx);
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@ -2939,13 +2932,7 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = self.sectionSize(@intCast(shndx));
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
try self.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@ -4075,10 +4062,10 @@ fn fmtDumpState(self: *Elf, writer: *std.Io.Writer) std.Io.Writer.Error!void {
}
/// Caller owns the memory.
pub fn preadAllAlloc(allocator: Allocator, handle: Io.File, offset: u64, size: u64) ![]u8 {
pub fn preadAllAlloc(allocator: Allocator, io: Io, io_file: Io.File, offset: u64, size: u64) ![]u8 {
const buffer = try allocator.alloc(u8, math.cast(usize, size) orelse return error.Overflow);
errdefer allocator.free(buffer);
const amt = try handle.preadAll(buffer, offset);
const amt = try io_file.readPositionalAll(io, buffer, offset);
if (amt != size) return error.InputOutput;
return buffer;
}
@ -4444,10 +4431,10 @@ pub fn stringTableLookup(strtab: []const u8, off: u32) [:0]const u8 {
pub fn pwriteAll(elf_file: *Elf, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = elf_file.base.comp;
const io = comp.io;
const diags = &comp.link_diags;
elf_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
elf_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
return diags.fail("failed to write: {t}", .{err});
}
pub fn setLength(elf_file: *Elf, length: u64) error{LinkFailure}!void {

View file

@ -34,17 +34,17 @@ pub fn parse(
path: Path,
handle_index: File.HandleIndex,
) !Archive {
const handle = file_handles.items[handle_index];
const file = file_handles.items[handle_index];
var pos: usize = 0;
{
var magic_buffer: [elf.ARMAG.len]u8 = undefined;
const n = try handle.preadAll(&magic_buffer, pos);
const n = try file.readPositionalAll(io, &magic_buffer, pos);
if (n != magic_buffer.len) return error.BadMagic;
if (!mem.eql(u8, &magic_buffer, elf.ARMAG)) return error.BadMagic;
pos += magic_buffer.len;
}
const size = (try handle.stat(io)).size;
const size = (try file.stat(io)).size;
var objects: std.ArrayList(Object) = .empty;
defer objects.deinit(gpa);
@ -55,7 +55,7 @@ pub fn parse(
while (pos < size) {
var hdr: elf.ar_hdr = undefined;
{
const n = try handle.preadAll(mem.asBytes(&hdr), pos);
const n = try file.readPositionalAll(io, mem.asBytes(&hdr), pos);
if (n != @sizeOf(elf.ar_hdr)) return error.UnexpectedEndOfFile;
}
pos += @sizeOf(elf.ar_hdr);
@ -72,7 +72,7 @@ pub fn parse(
if (hdr.isSymtab() or hdr.isSymtab64()) continue;
if (hdr.isStrtab()) {
try strtab.resize(gpa, obj_size);
const amt = try handle.preadAll(strtab.items, pos);
const amt = try file.readPositionalAll(io, strtab.items, pos);
if (amt != obj_size) return error.InputOutput;
continue;
}

View file

@ -90,7 +90,9 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
}
pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
assert(!list.dirty);
@ -121,12 +123,14 @@ pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype,
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
}
try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
try elf_file.base.file.?.writePositionalAll(io, buffer.written(), list.offset(elf_file));
buffer.clearRetainingCapacity();
}
pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
@ -152,7 +156,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf
@memcpy(out_code, code);
}
try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
try elf_file.base.file.?.writePositionalAll(io, buffer.items, list.offset(elf_file));
buffer.clearRetainingCapacity();
}

View file

@ -92,6 +92,7 @@ pub fn deinit(self: *Object, gpa: Allocator) void {
pub fn parse(
self: *Object,
gpa: Allocator,
io: Io,
diags: *Diags,
/// For error reporting purposes only.
path: Path,
@ -105,7 +106,7 @@ pub fn parse(
// Allocate atom index 0 to null atom
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) });
try self.initAtoms(gpa, diags, path, handle, debug_fmt_strip, target);
try self.initAtoms(gpa, io, diags, path, handle, debug_fmt_strip, target);
try self.initSymbols(gpa, default_sym_version);
for (self.shdrs.items, 0..) |shdr, i| {
@ -114,7 +115,7 @@ pub fn parse(
if ((target.cpu.arch == .x86_64 and shdr.sh_type == elf.SHT_X86_64_UNWIND) or
mem.eql(u8, self.getString(atom_ptr.name_offset), ".eh_frame"))
{
try self.parseEhFrame(gpa, handle, @intCast(i), target);
try self.parseEhFrame(gpa, io, handle, @intCast(i), target);
}
}
}
@ -131,7 +132,7 @@ pub fn parseCommon(
const offset = if (self.archive) |ar| ar.offset else 0;
const file_size = (try handle.stat(io)).size;
const header_buffer = try Elf.preadAllAlloc(gpa, handle, offset, @sizeOf(elf.Elf64_Ehdr));
const header_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset, @sizeOf(elf.Elf64_Ehdr));
defer gpa.free(header_buffer);
self.header = @as(*align(1) const elf.Elf64_Ehdr, @ptrCast(header_buffer)).*;
if (!mem.eql(u8, self.header.?.e_ident[0..4], elf.MAGIC)) {
@ -155,7 +156,7 @@ pub fn parseCommon(
return diags.failParse(path, "corrupt header: section header table extends past the end of file", .{});
}
const shdrs_buffer = try Elf.preadAllAlloc(gpa, handle, offset + shoff, shsize);
const shdrs_buffer = try Elf.preadAllAlloc(gpa, io, handle, offset + shoff, shsize);
defer gpa.free(shdrs_buffer);
const shdrs = @as([*]align(1) const elf.Elf64_Shdr, @ptrCast(shdrs_buffer.ptr))[0..shnum];
try self.shdrs.appendUnalignedSlice(gpa, shdrs);
@ -168,7 +169,7 @@ pub fn parseCommon(
}
}
const shstrtab = try self.preadShdrContentsAlloc(gpa, handle, self.header.?.e_shstrndx);
const shstrtab = try self.preadShdrContentsAlloc(gpa, io, handle, self.header.?.e_shstrndx);
defer gpa.free(shstrtab);
for (self.shdrs.items) |shdr| {
if (shdr.sh_name >= shstrtab.len) {
@ -186,7 +187,7 @@ pub fn parseCommon(
const shdr = self.shdrs.items[index];
self.first_global = shdr.sh_info;
const raw_symtab = try self.preadShdrContentsAlloc(gpa, handle, index);
const raw_symtab = try self.preadShdrContentsAlloc(gpa, io, handle, index);
defer gpa.free(raw_symtab);
const nsyms = math.divExact(usize, raw_symtab.len, @sizeOf(elf.Elf64_Sym)) catch {
return diags.failParse(path, "symbol table not evenly divisible", .{});
@ -194,7 +195,7 @@ pub fn parseCommon(
const symtab = @as([*]align(1) const elf.Elf64_Sym, @ptrCast(raw_symtab.ptr))[0..nsyms];
const strtab_bias = @as(u32, @intCast(self.strtab.items.len));
const strtab = try self.preadShdrContentsAlloc(gpa, handle, shdr.sh_link);
const strtab = try self.preadShdrContentsAlloc(gpa, io, handle, shdr.sh_link);
defer gpa.free(strtab);
try self.strtab.appendSlice(gpa, strtab);
@ -290,6 +291,7 @@ pub fn validateEFlags(
fn initAtoms(
self: *Object,
gpa: Allocator,
io: Io,
diags: *Diags,
path: Path,
handle: Io.File,
@ -325,7 +327,7 @@ fn initAtoms(
};
const shndx: u32 = @intCast(i);
const group_raw_data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
const group_raw_data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(group_raw_data);
const group_nmembers = math.divExact(usize, group_raw_data.len, @sizeOf(u32)) catch {
return diags.failParse(path, "corrupt section group: not evenly divisible ", .{});
@ -366,7 +368,7 @@ fn initAtoms(
const shndx: u32 = @intCast(i);
if (self.skipShdr(shndx, debug_fmt_strip)) continue;
const size, const alignment = if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) blk: {
const data = try self.preadShdrContentsAlloc(gpa, handle, shndx);
const data = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(data);
const chdr = @as(*align(1) const elf.Elf64_Chdr, @ptrCast(data.ptr)).*;
break :blk .{ chdr.ch_size, Alignment.fromNonzeroByteUnits(chdr.ch_addralign) };
@ -387,7 +389,7 @@ fn initAtoms(
elf.SHT_REL, elf.SHT_RELA => {
const atom_index = self.atoms_indexes.items[shdr.sh_info];
if (self.atom(atom_index)) |atom_ptr| {
const relocs = try self.preadRelocsAlloc(gpa, handle, @intCast(i));
const relocs = try self.preadRelocsAlloc(gpa, io, handle, @intCast(i));
defer gpa.free(relocs);
atom_ptr.relocs_section_index = @intCast(i);
const rel_index: u32 = @intCast(self.relocs.items.len);
@ -449,6 +451,7 @@ fn initSymbols(
fn parseEhFrame(
self: *Object,
gpa: Allocator,
io: Io,
handle: Io.File,
shndx: u32,
target: *const std.Target,
@ -458,12 +461,12 @@ fn parseEhFrame(
else => {},
} else null;
const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
defer gpa.free(raw);
const data_start: u32 = @intCast(self.eh_frame_data.items.len);
try self.eh_frame_data.appendSlice(gpa, raw);
const relocs = if (relocs_shndx) |index|
try self.preadRelocsAlloc(gpa, handle, index)
try self.preadRelocsAlloc(gpa, io, handle, index)
else
&[0]elf.Elf64_Rela{};
defer gpa.free(relocs);
@ -1132,6 +1135,9 @@ pub fn updateArSize(self: *Object, elf_file: *Elf) !void {
}
pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const size = std.math.cast(usize, self.output_ar_state.size) orelse return error.Overflow;
const offset: u64 = if (self.archive) |ar| ar.offset else 0;
const name = fs.path.basename(self.path.sub_path);
@ -1144,10 +1150,9 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
});
try writer.writeAll(mem.asBytes(&hdr));
const handle = elf_file.fileHandle(self.file_handle);
const gpa = elf_file.base.comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
const amt = try handle.preadAll(data, offset);
const amt = try handle.readPositionalAll(io, data, offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@ -1220,11 +1225,12 @@ pub fn writeSymtab(self: *Object, elf_file: *Elf) void {
/// Caller owns the memory.
pub fn codeDecompressAlloc(self: *Object, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const comp = elf_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const atom_ptr = self.atom(atom_index).?;
const shdr = atom_ptr.inputShdr(elf_file);
const handle = elf_file.fileHandle(self.file_handle);
const data = try self.preadShdrContentsAlloc(gpa, handle, atom_ptr.input_section_index);
const data = try self.preadShdrContentsAlloc(gpa, io, handle, atom_ptr.input_section_index);
defer if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) gpa.free(data);
if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) {
@ -1340,18 +1346,18 @@ fn addString(self: *Object, gpa: Allocator, str: []const u8) !u32 {
}
/// Caller owns the memory.
fn preadShdrContentsAlloc(self: Object, gpa: Allocator, handle: Io.File, index: u32) ![]u8 {
fn preadShdrContentsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, index: u32) ![]u8 {
assert(index < self.shdrs.items.len);
const offset = if (self.archive) |ar| ar.offset else 0;
const shdr = self.shdrs.items[index];
const sh_offset = math.cast(u64, shdr.sh_offset) orelse return error.Overflow;
const sh_size = math.cast(u64, shdr.sh_size) orelse return error.Overflow;
return Elf.preadAllAlloc(gpa, handle, offset + sh_offset, sh_size);
return Elf.preadAllAlloc(gpa, io, handle, offset + sh_offset, sh_size);
}
/// Caller owns the memory.
fn preadRelocsAlloc(self: Object, gpa: Allocator, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
const raw = try self.preadShdrContentsAlloc(gpa, handle, shndx);
fn preadRelocsAlloc(self: Object, gpa: Allocator, io: Io, handle: Io.File, shndx: u32) ![]align(1) const elf.Elf64_Rela {
const raw = try self.preadShdrContentsAlloc(gpa, io, handle, shndx);
const num = @divExact(raw.len, @sizeOf(elf.Elf64_Rela));
return @as([*]align(1) const elf.Elf64_Rela, @ptrCast(raw.ptr))[0..num];
}

View file

@ -109,16 +109,17 @@ pub const Parsed = struct {
pub fn parseHeader(
gpa: Allocator,
io: Io,
diags: *Diags,
file_path: Path,
fs_file: Io.File,
file: Io.File,
stat: Stat,
target: *const std.Target,
) !Header {
var ehdr: elf.Elf64_Ehdr = undefined;
{
const buf = mem.asBytes(&ehdr);
const amt = try fs_file.preadAll(buf, 0);
const amt = try file.readPositionalAll(io, buf, 0);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
if (!mem.eql(u8, ehdr.e_ident[0..4], "\x7fELF")) return error.BadMagic;
@ -135,7 +136,7 @@ pub fn parseHeader(
errdefer gpa.free(sections);
{
const buf = mem.sliceAsBytes(sections);
const amt = try fs_file.preadAll(buf, shoff);
const amt = try file.readPositionalAll(io, buf, shoff);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@ -160,7 +161,7 @@ pub fn parseHeader(
const dynamic_table = try gpa.alloc(elf.Elf64_Dyn, n);
errdefer gpa.free(dynamic_table);
const buf = mem.sliceAsBytes(dynamic_table);
const amt = try fs_file.preadAll(buf, shdr.sh_offset);
const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :dt dynamic_table;
} else &.{};
@ -175,7 +176,7 @@ pub fn parseHeader(
const strtab_shdr = sections[dynsym_shdr.sh_link];
const n = std.math.cast(usize, strtab_shdr.sh_size) orelse return error.Overflow;
const buf = try strtab.addManyAsSlice(gpa, n);
const amt = try fs_file.preadAll(buf, strtab_shdr.sh_offset);
const amt = try file.readPositionalAll(io, buf, strtab_shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
}
@ -207,9 +208,10 @@ pub fn parseHeader(
pub fn parse(
gpa: Allocator,
io: Io,
/// Moves resources from header. Caller may unconditionally deinit.
header: *Header,
fs_file: Io.File,
file: Io.File,
) !Parsed {
const symtab = if (header.dynsym_sect_index) |index| st: {
const shdr = header.sections[index];
@ -217,7 +219,7 @@ pub fn parse(
const symtab = try gpa.alloc(elf.Elf64_Sym, n);
errdefer gpa.free(symtab);
const buf = mem.sliceAsBytes(symtab);
const amt = try fs_file.preadAll(buf, shdr.sh_offset);
const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :st symtab;
} else &.{};
@ -228,7 +230,7 @@ pub fn parse(
if (header.verdef_sect_index) |shndx| {
const shdr = header.sections[shndx];
const verdefs = try Elf.preadAllAlloc(gpa, fs_file, shdr.sh_offset, shdr.sh_size);
const verdefs = try Elf.preadAllAlloc(gpa, io, file, shdr.sh_offset, shdr.sh_size);
defer gpa.free(verdefs);
var offset: u32 = 0;
@ -254,7 +256,7 @@ pub fn parse(
const versyms = try gpa.alloc(elf.Versym, symtab.len);
errdefer gpa.free(versyms);
const buf = mem.sliceAsBytes(versyms);
const amt = try fs_file.preadAll(buf, shdr.sh_offset);
const amt = try file.readPositionalAll(io, buf, shdr.sh_offset);
if (amt != buf.len) return error.UnexpectedEndOfFile;
break :vs versyms;
} else &.{};

View file

@ -740,7 +740,9 @@ pub fn checkDuplicates(self: *ZigObject, dupes: anytype, elf_file: *Elf) error{O
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const shsize: u64 = switch (elf_file.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
.p64 => @sizeOf(elf.Elf64_Shdr),
@ -753,7 +755,7 @@ pub fn readFileContents(self: *ZigObject, elf_file: *Elf) !void {
const size = std.math.cast(usize, end_pos) orelse return error.Overflow;
try self.data.resize(gpa, size);
const amt = try elf_file.base.file.?.preadAll(self.data.items, 0);
const amt = try elf_file.base.file.?.readPositionalAll(io, self.data.items, 0);
if (amt != size) return error.InputOutput;
}
@ -901,13 +903,15 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
/// Returns atom's code.
/// Caller owns the memory.
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const atom_ptr = self.atom(atom_index).?;
const file_offset = atom_ptr.offset(elf_file);
const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
const amt = try elf_file.base.file.?.preadAll(code, file_offset);
const amt = try elf_file.base.file.?.readPositionalAll(io, code, file_offset);
if (amt != code.len) {
log.err("fetching code for {s} failed", .{atom_ptr.name(elf_file)});
return error.InputOutput;
@ -1365,6 +1369,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = elf_file.base.comp;
const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -1449,8 +1455,8 @@ fn updateNavCode(
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing {f} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
}
@ -1467,6 +1473,8 @@ fn updateTlv(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
const comp = elf_file.base.comp;
const io = comp.io;
const nav = ip.getNav(nav_index);
log.debug("updateTlv {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
@ -1503,8 +1511,8 @@ fn updateTlv(
const shdr = elf_file.sections.items(.shdr)[shndx];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
elf_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {s}", .{@errorName(err)});
elf_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
return elf_file.base.cgFail(nav_index, "failed to write to output file: {t}", .{err});
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
atom_ptr.name(elf_file),
file_offset,
@ -2003,6 +2011,8 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
}
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
const comp = elf_file.base.comp;
const io = comp.io;
const atom_ptr = tr_sym.atom(elf_file).?;
const fileoff = atom_ptr.offset(elf_file);
const source_addr = tr_sym.address(.{}, elf_file);
@ -2012,7 +2022,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
try elf_file.base.file.?.pwriteAll(out, fileoff);
try elf_file.base.file.?.writePositionalAll(io, out, fileoff);
if (elf_file.base.child_pid) |pid| {
switch (builtin.os.tag) {

View file

@ -1,3 +1,23 @@
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const math = std.math;
const mem = std.mem;
const Path = std.Build.Cache.Path;
const log = std.log.scoped(.link);
const state_log = std.log.scoped(.link_state);
const build_options = @import("build_options");
const eh_frame = @import("eh_frame.zig");
const link = @import("../../link.zig");
const Archive = @import("Archive.zig");
const Compilation = @import("../../Compilation.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");
pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
const gpa = comp.gpa;
const io = comp.io;
@ -127,7 +147,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation) !void {
assert(writer.buffered().len == total_size);
try elf_file.base.file.?.setLength(io, total_size);
try elf_file.base.file.?.pwriteAll(writer.buffered(), 0);
try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;
}
@ -331,13 +351,7 @@ fn allocateAllocSections(elf_file: *Elf) !void {
if (shdr.sh_offset > 0) {
const existing_size = elf_file.sectionSize(@intCast(shndx));
const amt = try elf_file.base.file.?.copyRangeAll(
shdr.sh_offset,
elf_file.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
try elf_file.base.copyRangeAll(shdr.sh_offset, new_offset, existing_size);
}
shdr.sh_offset = new_offset;
@ -361,7 +375,9 @@ fn writeAtoms(elf_file: *Elf) !void {
}
fn writeSyntheticSections(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const slice = elf_file.sections.slice();
const SortRelocs = struct {
@ -398,7 +414,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + shdr.sh_size,
});
try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
if (elf_file.section_indexes.eh_frame) |shndx| {
@ -418,7 +434,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset + sh_size,
});
assert(writer.buffered().len == sh_size - existing_size);
try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset + existing_size);
try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset + existing_size);
}
if (elf_file.section_indexes.eh_frame_rela) |shndx| {
const shdr = slice.items(.shdr)[shndx];
@ -436,7 +452,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
try elf_file.base.file.?.pwriteAll(@ptrCast(relocs.items), shdr.sh_offset);
try elf_file.base.file.?.writePositionalAll(io, @ptrCast(relocs.items), shdr.sh_offset);
}
try writeGroups(elf_file);
@ -445,7 +461,9 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
fn writeGroups(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const comp = elf_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
for (elf_file.group_sections.items) |cgs| {
const shdr = elf_file.sections.items(.shdr)[cgs.shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
@ -458,25 +476,6 @@ fn writeGroups(elf_file: *Elf) !void {
shdr.sh_offset,
shdr.sh_offset + shdr.sh_size,
});
try elf_file.base.file.?.pwriteAll(writer.buffered(), shdr.sh_offset);
try elf_file.base.file.?.writePositionalAll(io, writer.buffered(), shdr.sh_offset);
}
}
const assert = std.debug.assert;
const build_options = @import("build_options");
const eh_frame = @import("eh_frame.zig");
const elf = std.elf;
const link = @import("../../link.zig");
const log = std.log.scoped(.link);
const math = std.math;
const mem = std.mem;
const state_log = std.log.scoped(.link_state);
const Path = std.Build.Cache.Path;
const std = @import("std");
const Archive = @import("Archive.zig");
const Compilation = @import("../../Compilation.zig");
const Elf = @import("../Elf.zig");
const File = @import("file.zig").File;
const Object = @import("Object.zig");
const Symbol = @import("Symbol.zig");

View file

@ -406,6 +406,7 @@ fn coffLink(lld: *Lld, arena: Allocator) !void {
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
io,
.{},
);
} else {
@ -756,6 +757,7 @@ fn findLib(arena: Allocator, io: Io, name: []const u8, lib_directories: []const
fn elfLink(lld: *Lld, arena: Allocator) !void {
const comp = lld.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const diags = &comp.link_diags;
const base = &lld.base;
const elf = &lld.ofmt.elf;
@ -822,6 +824,7 @@ fn elfLink(lld: *Lld, arena: Allocator) !void {
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
io,
.{},
);
} else {
@ -1336,6 +1339,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
const wasm = &lld.ofmt.wasm;
const gpa = comp.gpa;
const io = comp.io;
const directory = base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{base.emit.sub_path});
@ -1378,6 +1382,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
the_object_path.sub_path,
directory.handle,
base.emit.sub_path,
io,
.{},
);
} else {
@ -1571,7 +1576,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
comp.config.output_mode == .Exe)
{
// chmod does not interact with umask, so we use a conservative -rwxr--r-- here.
Io.Dir.cwd().setFilePermissions(full_out_path, .fromMode(0o744), .{}) catch |err|
Io.Dir.cwd().setFilePermissions(io, full_out_path, .fromMode(0o744), .{}) catch |err|
return diags.fail("{s}: failed to enable executable permissions: {t}", .{ full_out_path, err });
}
}
@ -1579,6 +1584,7 @@ fn wasmLink(lld: *Lld, arena: Allocator) !void {
fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !void {
const io = comp.io;
const gpa = comp.gpa;
if (comp.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
@ -1596,7 +1602,7 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
}
var stderr: []u8 = &.{};
defer comp.gpa.free(stderr);
defer gpa.free(stderr);
var child = std.process.Child.init(argv, arena);
const term = (if (comp.clang_passthrough_mode) term: {
@ -1612,8 +1618,8 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
child.spawn(io) catch |err| break :term err;
var stderr_reader = child.stderr.?.readerStreaming(io, &.{});
stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
break :term child.wait();
stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
break :term child.wait(io);
}) catch |first_err| term: {
const err = switch (first_err) {
error.NameTooLong => err: {
@ -1622,8 +1628,8 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp";
const rsp_file = try comp.dirs.local_cache.handle.createFile(io, rsp_path, .{});
defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
defer comp.dirs.local_cache.handle.deleteFile(io, rsp_path) catch |err|
log.warn("failed to delete response file {s}: {t}", .{ rsp_path, err });
{
defer rsp_file.close(io);
var rsp_file_buffer: [1024]u8 = undefined;
@ -1662,8 +1668,8 @@ fn spawnLld(comp: *Compilation, arena: Allocator, argv: []const []const u8) !voi
rsp_child.spawn(io) catch |err| break :err err;
var stderr_reader = rsp_child.stderr.?.readerStreaming(io, &.{});
stderr = try stderr_reader.interface.allocRemaining(comp.gpa, .unlimited);
break :term rsp_child.wait() catch |err| break :err err;
stderr = try stderr_reader.interface.allocRemaining(gpa, .unlimited);
break :term rsp_child.wait(io) catch |err| break :err err;
}
},
else => first_err,

View file

@ -347,7 +347,8 @@ pub fn flush(
const comp = self.base.comp;
const gpa = comp.gpa;
const diags = &self.base.comp.link_diags;
const io = comp.io;
const diags = &comp.link_diags;
const sub_prog_node = prog_node.start("MachO Flush", 0);
defer sub_prog_node.end();
@ -380,26 +381,26 @@ pub fn flush(
// in this set.
try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
for (comp.c_object_table.keys()) |key| {
positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
positionals.appendAssumeCapacity(try link.openObjectInput(io, diags, key.status.success.object_path));
}
if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.config.any_sanitize_thread) {
try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
try positionals.append(try link.openObjectInput(io, diags, comp.tsan_lib.?.full_object_path));
}
if (comp.config.any_fuzz) {
try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false));
try positionals.append(try link.openArchiveInput(io, diags, comp.fuzzer_lib.?.full_object_path, false, false));
}
if (comp.ubsan_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.ubsan_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@ -434,7 +435,7 @@ pub fn flush(
if (comp.config.link_libc and is_exe_or_dyn_lib) {
if (comp.zigc_static_lib) |zigc| {
const path = zigc.full_object_path;
self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
}
@ -457,12 +458,12 @@ pub fn flush(
for (system_libs.items) |lib| {
switch (Compilation.classifyFileExt(lib.path.sub_path)) {
.shared_library => {
const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
const dso_input = try link.openDsoInput(io, diags, lib.path, lib.needed, lib.weak, lib.reexport);
self.classifyInputFile(dso_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
.static_library => {
const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
const archive_input = try link.openArchiveInput(io, diags, lib.path, lib.must_link, lib.hidden);
self.classifyInputFile(archive_input) catch |err|
diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
},
@ -473,11 +474,11 @@ pub fn flush(
// Finally, link against compiler_rt.
if (comp.compiler_rt_lib) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
self.classifyInputFile(try link.openArchiveInput(io, diags, path, false, false)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
} else if (comp.compiler_rt_obj) |crt_file| {
const path = crt_file.full_object_path;
self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
self.classifyInputFile(try link.openObjectInput(io, diags, path)) catch |err|
diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
}
@ -568,7 +569,7 @@ pub fn flush(
self.writeLinkeditSectionsToFile() catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.LinkFailure => return error.LinkFailure,
else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
else => |e| return diags.fail("failed to write linkedit sections to file: {t}", .{e}),
};
var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
@ -579,8 +580,8 @@ pub fn flush(
// where the code signature goes into.
var codesig = CodeSignature.init(self.getPageSize());
codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
if (self.entitlements) |path| codesig.addEntitlements(gpa, io, path) catch |err|
return diags.fail("failed to add entitlements from {s}: {t}", .{ path, err });
try self.writeCodeSignaturePadding(&codesig);
break :blk codesig;
} else null;
@ -866,6 +867,9 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const tracy = trace(@src());
defer tracy.end();
const comp = self.base.comp;
const io = comp.io;
const path, const file = input.pathAndFile().?;
// TODO don't classify now, it's too late. The input file has already been classified
log.debug("classifying input file {f}", .{path});
@ -876,7 +880,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
const fat_arch: ?fat.Arch = try self.parseFatFile(file, path);
const offset = if (fat_arch) |fa| fa.offset else 0;
if (readMachHeader(file, offset) catch null) |h| blk: {
if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_OBJECT => try self.addObject(path, fh, offset),
@ -885,7 +889,7 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
}
return;
}
if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
if (readArMagic(io, file, offset, &buffer) catch null) |ar_magic| blk: {
if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
try self.addArchive(input.archive, fh, fat_arch);
return;
@ -894,11 +898,13 @@ pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
}
fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch {
const diags = &self.base.comp.link_diags;
const fat_h = fat.readFatHeader(file) catch return null;
const comp = self.base.comp;
const io = comp.io;
const diags = &comp.link_diags;
const fat_h = fat.readFatHeader(io, file) catch return null;
if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null;
var fat_archs_buffer: [2]fat.Arch = undefined;
const fat_archs = try fat.parseArchs(file, fat_h, &fat_archs_buffer);
const fat_archs = try fat.parseArchs(io, file, fat_h, &fat_archs_buffer);
const cpu_arch = self.getTarget().cpu.arch;
for (fat_archs) |arch| {
if (arch.tag == cpu_arch) return arch;
@ -906,16 +912,16 @@ fn parseFatFile(self: *MachO, file: Io.File, path: Path) !?fat.Arch {
return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)});
}
pub fn readMachHeader(file: Io.File, offset: usize) !macho.mach_header_64 {
pub fn readMachHeader(io: Io, file: Io.File, offset: usize) !macho.mach_header_64 {
var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
const nread = try file.preadAll(&buffer, offset);
const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
const hdr = @as(*align(1) const macho.mach_header_64, @ptrCast(&buffer)).*;
return hdr;
}
pub fn readArMagic(file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
const nread = try file.preadAll(buffer, offset);
pub fn readArMagic(io: Io, file: Io.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
const nread = try file.readPositionalAll(io, buffer, offset);
if (nread != buffer.len) return error.InputOutput;
return buffer[0..Archive.SARMAG];
}
@ -1212,7 +1218,8 @@ fn parseDependentDylibs(self: *MachO) !void {
const rel_path = try fs.path.join(arena, &.{ prefix, path });
try checked_paths.append(rel_path);
var buffer: [fs.max_path_bytes]u8 = undefined;
const full_path = fs.realpath(rel_path, &buffer) catch continue;
// TODO don't use realpath
const full_path = buffer[0 .. Io.Dir.realPathAbsolute(io, rel_path, &buffer) catch continue];
break :full_path try arena.dupe(u8, full_path);
}
} else if (eatPrefix(id.name, "@loader_path/")) |_| {
@ -1225,8 +1232,9 @@ fn parseDependentDylibs(self: *MachO) !void {
try checked_paths.append(try arena.dupe(u8, id.name));
var buffer: [fs.max_path_bytes]u8 = undefined;
if (fs.realpath(id.name, &buffer)) |full_path| {
break :full_path try arena.dupe(u8, full_path);
// TODO don't use realpath
if (Io.Dir.realPathAbsolute(io, id.name, &buffer)) |full_path_n| {
break :full_path try arena.dupe(u8, buffer[0..full_path_n]);
} else |_| {
try self.reportMissingDependencyError(
self.getFile(dylib_index).?.dylib.getUmbrella(self).index,
@ -1248,7 +1256,7 @@ fn parseDependentDylibs(self: *MachO) !void {
const fat_arch = try self.parseFatFile(file, lib.path);
const offset = if (fat_arch) |fa| fa.offset else 0;
const file_index = file_index: {
if (readMachHeader(file, offset) catch null) |h| blk: {
if (readMachHeader(io, file, offset) catch null) |h| blk: {
if (h.magic != macho.MH_MAGIC_64) break :blk;
switch (h.filetype) {
macho.MH_DYLIB => break :file_index try self.addDylib(lib, false, fh, offset),
@ -3244,21 +3252,36 @@ pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32)
}
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
const file = self.base.file.?;
const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
if (amt != size) return error.InputOutput;
return self.base.copyRangeAll(old_offset, new_offset, size);
}
/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
/// Like copyRangeAll but also ensures the source region is zeroed out after copy.
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
const gpa = self.base.comp.gpa;
try self.copyRangeAll(old_offset, new_offset, size);
const comp = self.base.comp;
const io = comp.io;
const file = self.base.file.?;
var write_buffer: [2048]u8 = undefined;
var file_reader = file.reader(io, &.{});
file_reader.pos = old_offset;
var file_writer = file.writer(io, &write_buffer);
file_writer.pos = new_offset;
const size_u = math.cast(usize, size) orelse return error.Overflow;
const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
defer gpa.free(zeroes);
@memset(zeroes, 0);
try self.base.file.?.pwriteAll(zeroes, old_offset);
const n = file_writer.interface.sendFileAll(&file_reader, .limited(size_u)) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
error.WriteFailed => return file_writer.err.?,
};
assert(n == size_u);
file_writer.seekTo(old_offset) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
else => |e| return e,
};
file_writer.interface.splatByteAll(0, size_u) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};
file_writer.interface.flush() catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};
}
const InitMetadataOptions = struct {
@ -5355,10 +5378,10 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
const comp = macho_file.base.comp;
const io = comp.io;
const diags = &comp.link_diags;
macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
return diags.fail("failed to write: {s}", .{@errorName(err)});
};
macho_file.base.file.?.writePositionalAll(io, bytes, offset) catch |err|
return diags.fail("failed to write: {t}", .{err});
}
pub fn setLength(macho_file: *MachO, length: u64) error{LinkFailure}!void {

View file

@ -24,7 +24,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
var hdr_buffer: [@sizeOf(ar_hdr)]u8 = undefined;
{
const amt = try handle.preadAll(&hdr_buffer, pos);
const amt = try handle.readPositionalAll(io, &hdr_buffer, pos);
if (amt != @sizeOf(ar_hdr)) return error.InputOutput;
}
const hdr = @as(*align(1) const ar_hdr, @ptrCast(&hdr_buffer)).*;
@ -42,7 +42,7 @@ pub fn unpack(self: *Archive, macho_file: *MachO, path: Path, handle_index: File
if (try hdr.nameLength()) |len| {
hdr_size -= len;
const buf = try arena.allocator().alloc(u8, len);
const amt = try handle.preadAll(buf, pos);
const amt = try handle.readPositionalAll(io, buf, pos);
if (amt != len) return error.InputOutput;
pos += len;
const actual_len = mem.indexOfScalar(u8, buf, @as(u8, 0)) orelse len;

View file

@ -135,20 +135,12 @@ pub fn growSection(
const new_offset = try self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
sect.sectName(),
existing_size,
sect.offset,
new_offset,
sect.sectName(), existing_size, sect.offset, new_offset,
});
if (requires_file_copy) {
const amt = try self.file.?.copyRangeAll(
sect.offset,
self.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
const file = self.file.?;
try link.File.copyRangeAll2(io, file, file, sect.offset, new_offset, existing_size);
}
sect.offset = @intCast(new_offset);
@ -204,6 +196,7 @@ fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) !u64
}
pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
const io = self.io;
const zo = macho_file.getZigObject().?;
for (self.relocs.items) |*reloc| {
const sym = zo.symbols.items[reloc.target];
@ -215,12 +208,9 @@ pub fn flush(self: *DebugSymbols, macho_file: *MachO) !void {
const sect = &self.sections.items[self.debug_info_section_index.?];
const file_offset = sect.offset + reloc.offset;
log.debug("resolving relocation: {d}@{x} ('{s}') at offset {x}", .{
reloc.target,
addr,
sym_name,
file_offset,
reloc.target, addr, sym_name, file_offset,
});
try self.file.?.pwriteAll(mem.asBytes(&addr), file_offset);
try self.file.?.writePositionalAll(io, mem.asBytes(&addr), file_offset);
}
self.finalizeDwarfSegment(macho_file);
@ -294,6 +284,7 @@ fn finalizeDwarfSegment(self: *DebugSymbols, macho_file: *MachO) void {
}
fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, usize } {
const io = self.io;
const gpa = self.allocator;
const needed_size = load_commands.calcLoadCommandsSizeDsym(macho_file, self);
const buffer = try gpa.alloc(u8, needed_size);
@ -345,12 +336,13 @@ fn writeLoadCommands(self: *DebugSymbols, macho_file: *MachO) !struct { usize, u
assert(writer.end == needed_size);
try self.file.?.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
try self.file.?.writePositionalAll(io, buffer, @sizeOf(macho.mach_header_64));
return .{ ncmds, buffer.len };
}
fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
const io = self.io;
var header: macho.mach_header_64 = .{};
header.filetype = macho.MH_DSYM;
@ -371,7 +363,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: usize, sizeofcmds
log.debug("writing Mach-O header {}", .{header});
try self.file.?.pwriteAll(mem.asBytes(&header), 0);
try self.file.?.writePositionalAll(io, mem.asBytes(&header), 0);
}
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
@ -406,6 +398,8 @@ fn writeLinkeditSegmentData(self: *DebugSymbols, macho_file: *MachO) !void {
pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
const tracy = trace(@src());
defer tracy.end();
const io = self.io;
const gpa = self.allocator;
const cmd = &self.symtab_cmd;
cmd.nsyms = macho_file.symtab_cmd.nsyms;
@ -429,15 +423,16 @@ pub fn writeSymtab(self: *DebugSymbols, off: u32, macho_file: *MachO) !u32 {
internal.writeSymtab(macho_file, self);
}
try self.file.?.pwriteAll(@ptrCast(self.symtab.items), cmd.symoff);
try self.file.?.writePositionalAll(io, @ptrCast(self.symtab.items), cmd.symoff);
return off + cmd.nsyms * @sizeOf(macho.nlist_64);
}
pub fn writeStrtab(self: *DebugSymbols, off: u32) !u32 {
const io = self.io;
const cmd = &self.symtab_cmd;
cmd.stroff = off;
try self.file.?.pwriteAll(self.strtab.items, cmd.stroff);
try self.file.?.writePositionalAll(io, self.strtab.items, cmd.stroff);
return off + cmd.strsize;
}

View file

@ -57,7 +57,9 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
const offset = self.offset;
@ -65,7 +67,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
const amt = try file.preadAll(&header_buffer, offset);
const amt = try file.readPositionalAll(io, &header_buffer, offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
const header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@ -86,7 +88,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, header.sizeofcmds);
defer gpa.free(lc_buffer);
{
const amt = try file.preadAll(lc_buffer, offset + @sizeOf(macho.mach_header_64));
const amt = try file.readPositionalAll(io, lc_buffer, offset + @sizeOf(macho.mach_header_64));
if (amt != lc_buffer.len) return error.InputOutput;
}
@ -103,7 +105,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const dyld_cmd = cmd.cast(macho.dyld_info_command).?;
const data = try gpa.alloc(u8, dyld_cmd.export_size);
defer gpa.free(data);
const amt = try file.preadAll(data, dyld_cmd.export_off + offset);
const amt = try file.readPositionalAll(io, data, dyld_cmd.export_off + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@ -111,7 +113,7 @@ fn parseBinary(self: *Dylib, macho_file: *MachO) !void {
const ld_cmd = cmd.cast(macho.linkedit_data_command).?;
const data = try gpa.alloc(u8, ld_cmd.datasize);
defer gpa.free(data);
const amt = try file.preadAll(data, ld_cmd.dataoff + offset);
const amt = try file.readPositionalAll(io, data, ld_cmd.dataoff + offset);
if (amt != data.len) return error.InputOutput;
try self.parseTrie(data, macho_file);
},
@ -238,13 +240,15 @@ fn parseTbd(self: *Dylib, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
log.debug("parsing dylib from stub: {f}", .{self.path});
const file = macho_file.getFileHandle(self.file_handle);
var lib_stub = LibStub.loadFromFile(gpa, file) catch |err| {
try macho_file.reportParseError2(self.index, "failed to parse TBD file: {s}", .{@errorName(err)});
var lib_stub = LibStub.loadFromFile(gpa, io, file) catch |err| {
try macho_file.reportParseError2(self.index, "failed to parse TBD file: {t}", .{err});
return error.MalformedTbd;
};
defer lib_stub.deinit();

View file

@ -1,3 +1,30 @@
const Object = @This();
const trace = @import("../../tracy.zig").trace;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const Dwarf = @import("Dwarf.zig");
const File = @import("file.zig").File;
const MachO = @import("../MachO.zig");
const Relocation = @import("Relocation.zig");
const Symbol = @import("Symbol.zig");
const UnwindInfo = @import("UnwindInfo.zig");
const std = @import("std");
const Io = std.Io;
const Writer = std.Io.Writer;
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
const LoadCommandIterator = macho.LoadCommandIterator;
const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const eh_frame = @import("eh_frame.zig");
const Cie = eh_frame.Cie;
const Fde = eh_frame.Fde;
/// Non-zero for fat object files or archives
offset: u64,
/// If `in_archive` is not `null`, this is the basename of the object in the archive. Otherwise,
@ -75,7 +102,9 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
log.debug("parsing {f}", .{self.fmtPath()});
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
const cpu_arch = macho_file.getTarget().cpu.arch;
@ -84,7 +113,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
const amt = try handle.preadAll(&header_buffer, self.offset);
const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@ -105,7 +134,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@ -129,14 +158,14 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@ -154,7 +183,7 @@ pub fn parse(self: *Object, macho_file: *MachO) !void {
const buffer = try gpa.alloc(u8, cmd.datasize);
defer gpa.free(buffer);
{
const amt = try handle.preadAll(buffer, self.offset + cmd.dataoff);
const amt = try handle.readPositionalAll(io, buffer, self.offset + cmd.dataoff);
if (amt != buffer.len) return error.InputOutput;
}
const ndice = @divExact(cmd.datasize, @sizeOf(macho.data_in_code_entry));
@ -440,12 +469,14 @@ fn initCstringLiterals(self: *Object, allocator: Allocator, file: File.Handle, m
const tracy = trace(@src());
defer tracy.end();
const comp = macho_file.base.comp;
const io = comp.io;
const slice = self.sections.slice();
for (slice.items(.header), 0..) |sect, n_sect| {
if (!isCstringLiteral(sect)) continue;
const data = try self.readSectionData(allocator, file, @intCast(n_sect));
const data = try self.readSectionData(allocator, io, file, @intCast(n_sect));
defer allocator.free(data);
var count: u32 = 0;
@ -628,7 +659,9 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var buffer = std.array_list.Managed(u8).init(gpa);
@ -647,7 +680,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.subsections), 0..) |header, subs, n_sect| {
if (isCstringLiteral(header) or isFixedSizeLiteral(header)) {
const data = try self.readSectionData(gpa, file, @intCast(n_sect));
const data = try self.readSectionData(gpa, io, file, @intCast(n_sect));
defer gpa.free(data);
for (subs.items) |sub| {
@ -682,7 +715,7 @@ pub fn resolveLiterals(self: *Object, lp: *MachO.LiteralPool, macho_file: *MachO
buffer.resize(target_size) catch unreachable;
const gop = try sections_data.getOrPut(target.n_sect);
if (!gop.found_existing) {
gop.value_ptr.* = try self.readSectionData(gpa, file, @intCast(target.n_sect));
gop.value_ptr.* = try self.readSectionData(gpa, io, file, @intCast(target.n_sect));
}
const data = gop.value_ptr.*;
const target_off = try macho_file.cast(usize, target.off);
@ -1037,9 +1070,11 @@ fn initEhFrameRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fi
const sect = slice.items(.header)[sect_id];
const relocs = slice.items(.relocs)[sect_id];
const comp = macho_file.base.comp;
const io = comp.io;
const size = try macho_file.cast(usize, sect.size);
try self.eh_frame_data.resize(allocator, size);
const amt = try file.preadAll(self.eh_frame_data.items, sect.offset + self.offset);
const amt = try file.readPositionalAll(io, self.eh_frame_data.items, sect.offset + self.offset);
if (amt != self.eh_frame_data.items.len) return error.InputOutput;
// Check for non-personality relocs in FDEs and apply them
@ -1138,8 +1173,10 @@ fn initUnwindRecords(self: *Object, allocator: Allocator, sect_id: u8, file: Fil
}
};
const comp = macho_file.base.comp;
const io = comp.io;
const header = self.sections.items(.header)[sect_id];
const data = try self.readSectionData(allocator, file, sect_id);
const data = try self.readSectionData(allocator, io, file, sect_id);
defer allocator.free(data);
const nrecs = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
@ -1348,7 +1385,9 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const file = macho_file.getFileHandle(self.file_handle);
var dwarf: Dwarf = .{};
@ -1358,18 +1397,18 @@ fn parseDebugInfo(self: *Object, macho_file: *MachO) !void {
const n_sect: u8 = @intCast(index);
if (sect.attrs() & macho.S_ATTR_DEBUG == 0) continue;
if (mem.eql(u8, sect.sectName(), "__debug_info")) {
dwarf.debug_info = try self.readSectionData(gpa, file, n_sect);
dwarf.debug_info = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_abbrev")) {
dwarf.debug_abbrev = try self.readSectionData(gpa, file, n_sect);
dwarf.debug_abbrev = try self.readSectionData(gpa, io, file, n_sect);
}
if (mem.eql(u8, sect.sectName(), "__debug_str")) {
dwarf.debug_str = try self.readSectionData(gpa, file, n_sect);
dwarf.debug_str = try self.readSectionData(gpa, io, file, n_sect);
}
// __debug_str_offs[ets] section is a new addition in DWARFv5 and is generally
// required in order to correctly parse strings.
if (mem.eql(u8, sect.sectName(), "__debug_str_offs")) {
dwarf.debug_str_offsets = try self.readSectionData(gpa, file, n_sect);
dwarf.debug_str_offsets = try self.readSectionData(gpa, io, file, n_sect);
}
}
@ -1611,12 +1650,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const handle = macho_file.getFileHandle(self.file_handle);
var header_buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
{
const amt = try handle.preadAll(&header_buffer, self.offset);
const amt = try handle.readPositionalAll(io, &header_buffer, self.offset);
if (amt != @sizeOf(macho.mach_header_64)) return error.InputOutput;
}
self.header = @as(*align(1) const macho.mach_header_64, @ptrCast(&header_buffer)).*;
@ -1637,7 +1678,7 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const lc_buffer = try gpa.alloc(u8, self.header.?.sizeofcmds);
defer gpa.free(lc_buffer);
{
const amt = try handle.preadAll(lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
const amt = try handle.readPositionalAll(io, lc_buffer, self.offset + @sizeOf(macho.mach_header_64));
if (amt != self.header.?.sizeofcmds) return error.InputOutput;
}
@ -1647,14 +1688,14 @@ pub fn parseAr(self: *Object, macho_file: *MachO) !void {
const cmd = lc.cast(macho.symtab_command).?;
try self.strtab.resize(gpa, cmd.strsize);
{
const amt = try handle.preadAll(self.strtab.items, cmd.stroff + self.offset);
const amt = try handle.readPositionalAll(io, self.strtab.items, cmd.stroff + self.offset);
if (amt != self.strtab.items.len) return error.InputOutput;
}
const symtab_buffer = try gpa.alloc(u8, cmd.nsyms * @sizeOf(macho.nlist_64));
defer gpa.free(symtab_buffer);
{
const amt = try handle.preadAll(symtab_buffer, cmd.symoff + self.offset);
const amt = try handle.readPositionalAll(io, symtab_buffer, cmd.symoff + self.offset);
if (amt != symtab_buffer.len) return error.InputOutput;
}
const symtab = @as([*]align(1) const macho.nlist_64, @ptrCast(symtab_buffer.ptr))[0..cmd.nsyms];
@ -1697,7 +1738,7 @@ pub fn updateArSize(self: *Object, macho_file: *MachO) !void {
};
}
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: anytype) !void {
pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writer: *Writer) !void {
// Header
const size = try macho_file.cast(usize, self.output_ar_state.size);
const basename = std.fs.path.basename(self.path);
@ -1705,10 +1746,12 @@ pub fn writeAr(self: Object, ar_format: Archive.Format, macho_file: *MachO, writ
// Data
const file = macho_file.getFileHandle(self.file_handle);
// TODO try using copyRangeAll
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const data = try gpa.alloc(u8, size);
defer gpa.free(data);
const amt = try file.preadAll(data, self.offset);
const amt = try file.readPositionalAll(io, data, self.offset);
if (amt != size) return error.InputOutput;
try writer.writeAll(data);
}
@ -1813,7 +1856,9 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@ -1829,7 +1874,7 @@ pub fn writeAtoms(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@ -1852,7 +1897,9 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const headers = self.sections.items(.header);
const sections_data = try gpa.alloc([]const u8, headers.len);
defer {
@ -1868,7 +1915,7 @@ pub fn writeAtomsRelocatable(self: *Object, macho_file: *MachO) !void {
if (header.isZerofill()) continue;
const size = try macho_file.cast(usize, header.size);
const data = try gpa.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
if (amt != data.len) return error.InputOutput;
sections_data[n_sect] = data;
}
@ -2484,11 +2531,11 @@ pub fn getUnwindRecord(self: *Object, index: UnwindInfo.Record.Index) *UnwindInf
}
/// Caller owns the memory.
pub fn readSectionData(self: Object, allocator: Allocator, file: File.Handle, n_sect: u8) ![]u8 {
pub fn readSectionData(self: Object, allocator: Allocator, io: Io, file: File.Handle, n_sect: u8) ![]u8 {
const header = self.sections.items(.header)[n_sect];
const size = math.cast(usize, header.size) orelse return error.Overflow;
const data = try allocator.alloc(u8, size);
const amt = try file.preadAll(data, header.offset + self.offset);
const amt = try file.readPositionalAll(io, data, header.offset + self.offset);
errdefer allocator.free(data);
if (amt != data.len) return error.InputOutput;
return data;
@ -2712,15 +2759,17 @@ const x86_64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
const code = try self.readSectionData(gpa, handle, n_sect);
const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@ -2879,15 +2928,17 @@ const aarch64 = struct {
handle: File.Handle,
macho_file: *MachO,
) !void {
const gpa = macho_file.base.comp.gpa;
const comp = macho_file.base.comp;
const io = comp.io;
const gpa = comp.gpa;
const relocs_buffer = try gpa.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer gpa.free(relocs_buffer);
const amt = try handle.preadAll(relocs_buffer, sect.reloff + self.offset);
const amt = try handle.readPositionalAll(io, relocs_buffer, sect.reloff + self.offset);
if (amt != relocs_buffer.len) return error.InputOutput;
const relocs = @as([*]align(1) const macho.relocation_info, @ptrCast(relocs_buffer.ptr))[0..sect.nreloc];
const code = try self.readSectionData(gpa, handle, n_sect);
const code = try self.readSectionData(gpa, io, handle, n_sect);
defer gpa.free(code);
try out.ensureTotalCapacityPrecise(gpa, relocs.len);
@ -3063,27 +3114,3 @@ const aarch64 = struct {
}
}
};
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const Writer = std.Io.Writer;
const eh_frame = @import("eh_frame.zig");
const trace = @import("../../tracy.zig").trace;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const Cie = eh_frame.Cie;
const Dwarf = @import("Dwarf.zig");
const Fde = eh_frame.Fde;
const File = @import("file.zig").File;
const LoadCommandIterator = macho.LoadCommandIterator;
const MachO = @import("../MachO.zig");
const Object = @This();
const Relocation = @import("Relocation.zig");
const Symbol = @import("Symbol.zig");
const UnwindInfo = @import("UnwindInfo.zig");

View file

@ -171,6 +171,9 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
const isec = atom.getInputSection(macho_file);
assert(!isec.isZerofill());
const comp = macho_file.base.comp;
const io = comp.io;
switch (isec.type()) {
macho.S_THREAD_LOCAL_REGULAR => {
const tlv = self.tlv_initializers.get(atom.atom_index).?;
@ -182,7 +185,7 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
else => {
const sect = macho_file.sections.items(.header)[atom.out_n_sect];
const file_offset = sect.offset + atom.value;
const amt = try macho_file.base.file.?.preadAll(buffer, file_offset);
const amt = try macho_file.base.file.?.readPositionalAll(io, buffer, file_offset);
if (amt != buffer.len) return error.InputOutput;
},
}
@ -290,12 +293,14 @@ pub fn dedupLiterals(self: *ZigObject, lp: MachO.LiteralPool, macho_file: *MachO
/// We need this so that we can write to an archive.
/// TODO implement writing ZigObject data directly to a buffer instead.
pub fn readFileContents(self: *ZigObject, macho_file: *MachO) !void {
const diags = &macho_file.base.comp.link_diags;
const comp = macho_file.base.comp;
const gpa = comp.gpa;
const io = comp.io;
const diags = &comp.link_diags;
// Size of the output object file is always the offset + size of the strtab
const size = macho_file.symtab_cmd.stroff + macho_file.symtab_cmd.strsize;
const gpa = macho_file.base.comp.gpa;
try self.data.resize(gpa, size);
const amt = macho_file.base.file.?.preadAll(self.data.items, 0) catch |err|
const amt = macho_file.base.file.?.readPositionalAll(io, self.data.items, 0) catch |err|
return diags.fail("failed to read output file: {s}", .{@errorName(err)});
if (amt != size)
return diags.fail("unexpected EOF reading from output file", .{});
@ -945,6 +950,8 @@ fn updateNavCode(
) link.File.UpdateNavError!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const comp = zcu.comp;
const io = comp.io;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
@ -1012,8 +1019,8 @@ fn updateNavCode(
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value;
macho_file.base.file.?.pwriteAll(code, file_offset) catch |err|
return macho_file.base.cgFail(nav_index, "failed to write output file: {s}", .{@errorName(err)});
macho_file.base.file.?.writePositionalAll(io, code, file_offset) catch |err|
return macho_file.base.cgFail(nav_index, "failed to write output file: {t}", .{err});
}
}
@ -1493,7 +1500,7 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, macho_file: *MachO) !void {
.x86_64 => try x86_64.writeTrampolineCode(source_addr, target_addr, &buf),
else => @panic("TODO implement write trampoline for this CPU arch"),
};
try macho_file.base.file.?.pwriteAll(out, fileoff);
return macho_file.pwriteAll(out, fileoff);
}
pub fn getOrCreateMetadataForNav(

View file

@ -10,13 +10,13 @@ const mem = std.mem;
const MachO = @import("../MachO.zig");
pub fn readFatHeader(file: Io.File) !macho.fat_header {
return readFatHeaderGeneric(macho.fat_header, file, 0);
pub fn readFatHeader(io: Io, file: Io.File) !macho.fat_header {
return readFatHeaderGeneric(io, macho.fat_header, file, 0);
}
fn readFatHeaderGeneric(comptime Hdr: type, file: Io.File, offset: usize) !Hdr {
fn readFatHeaderGeneric(io: Io, comptime Hdr: type, file: Io.File, offset: usize) !Hdr {
var buffer: [@sizeOf(Hdr)]u8 = undefined;
const nread = try file.preadAll(&buffer, offset);
const nread = try file.readPositionalAll(io, &buffer, offset);
if (nread != buffer.len) return error.InputOutput;
var hdr = @as(*align(1) const Hdr, @ptrCast(&buffer)).*;
mem.byteSwapAllFields(Hdr, &hdr);
@ -29,12 +29,12 @@ pub const Arch = struct {
size: u32,
};
pub fn parseArchs(file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
pub fn parseArchs(io: Io, file: Io.File, fat_header: macho.fat_header, out: *[2]Arch) ![]const Arch {
var count: usize = 0;
var fat_arch_index: u32 = 0;
while (fat_arch_index < fat_header.nfat_arch and count < out.len) : (fat_arch_index += 1) {
const offset = @sizeOf(macho.fat_header) + @sizeOf(macho.fat_arch) * fat_arch_index;
const fat_arch = try readFatHeaderGeneric(macho.fat_arch, file, offset);
const fat_arch = try readFatHeaderGeneric(io, macho.fat_arch, file, offset);
// If we come across an architecture that we do not know how to handle, that's
// fine because we can keep looking for one that might match.
const arch: std.Target.Cpu.Arch = switch (fat_arch.cputype) {

View file

@ -9,7 +9,7 @@ pub fn ParallelHasher(comptime Hasher: type) type {
const hash_size = Hasher.digest_length;
return struct {
pub fn hash(self: Self, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct {
pub fn hash(gpa: Allocator, io: Io, file: Io.File, out: [][hash_size]u8, opts: struct {
chunk_size: u64 = 0x4000,
max_file_size: ?u64 = null,
}) !void {
@ -22,11 +22,11 @@ pub fn ParallelHasher(comptime Hasher: type) type {
};
const chunk_size = std.math.cast(usize, opts.chunk_size) orelse return error.Overflow;
const buffer = try self.allocator.alloc(u8, chunk_size * out.len);
defer self.allocator.free(buffer);
const buffer = try gpa.alloc(u8, chunk_size * out.len);
defer gpa.free(buffer);
const results = try self.allocator.alloc(Io.File.ReadPositionalError!usize, out.len);
defer self.allocator.free(results);
const results = try gpa.alloc(Io.File.ReadPositionalError!usize, out.len);
defer gpa.free(results);
{
var group: Io.Group = .init;
@ -38,7 +38,8 @@ pub fn ParallelHasher(comptime Hasher: type) type {
file_size - fstart
else
chunk_size;
group.async(worker, .{
group.async(io, worker, .{
io,
file,
fstart,
buffer[fstart..][0..fsize],
@ -53,16 +54,15 @@ pub fn ParallelHasher(comptime Hasher: type) type {
}
fn worker(
io: Io,
file: Io.File,
fstart: usize,
buffer: []u8,
out: *[hash_size]u8,
err: *Io.File.ReadPositionalError!usize,
) void {
err.* = file.readPositionalAll(buffer, fstart);
err.* = file.readPositionalAll(io, buffer, fstart);
Hasher.hash(buffer, out, .{});
}
const Self = @This();
};
}

View file

@ -10,10 +10,10 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
@ -24,10 +24,8 @@ pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Pat
return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) });
const stat = in_file.stat(io) catch |err|
return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) });
const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) });
if (amt != stat.size)
return diags.fail("unexpected short write in copy range of file {f}", .{path});
link.File.copyRangeAll2(io, in_file, macho_file.base.file.?, 0, 0, stat.size) catch |err|
return diags.fail("failed to copy range of file {f}: {t}", .{ path, err });
return;
}
@ -90,17 +88,17 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
positionals.appendSliceAssumeCapacity(comp.link_inputs);
for (comp.c_object_table.keys()) |key| {
try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
try positionals.append(try link.openObjectInput(io, diags, key.status.success.object_path));
}
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
if (module_obj_path) |path| try positionals.append(try link.openObjectInput(io, diags, path));
if (comp.compiler_rt_strat == .obj) {
try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
try positionals.append(try link.openObjectInput(io, diags, comp.compiler_rt_obj.?.full_object_path));
}
if (comp.ubsan_rt_strat == .obj) {
try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path));
try positionals.append(try link.openObjectInput(io, diags, comp.ubsan_rt_obj.?.full_object_path));
}
for (positionals.items) |link_input| {
@ -231,7 +229,7 @@ pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?
assert(writer.end == total_size);
try macho_file.setLength(io, total_size);
try macho_file.setLength(total_size);
try macho_file.pwriteAll(writer.buffered(), 0);
if (diags.hasErrors()) return error.LinkFailure;

View file

@ -246,6 +246,7 @@ pub fn flush(
const comp = linker.base.comp;
const diags = &comp.link_diags;
const gpa = comp.gpa;
const io = comp.io;
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
@ -287,8 +288,8 @@ pub fn flush(
};
// TODO endianness bug. use file writer and call writeSliceEndian instead
linker.base.file.?.writeAll(@ptrCast(linked_module)) catch |err|
return diags.fail("failed to write: {s}", .{@errorName(err)});
linker.base.file.?.writeStreamingAll(io, @ptrCast(linked_module)) catch |err|
return diags.fail("failed to write: {t}", .{err});
}
fn linkModule(arena: Allocator, module: []Word, progress: std.Progress.Node) ![]Word {

View file

@ -3016,8 +3016,10 @@ pub fn createEmpty(
}
fn openParseObjectReportingFailure(wasm: *Wasm, path: Path) void {
const diags = &wasm.base.comp.link_diags;
const obj = link.openObject(path, false, false) catch |err| {
const comp = wasm.base.comp;
const io = comp.io;
const diags = &comp.link_diags;
const obj = link.openObject(io, path, false, false) catch |err| {
switch (diags.failParse(path, "failed to open object: {t}", .{err})) {
error.LinkFailure => return,
}

View file

@ -108,6 +108,7 @@ pub fn deinit(f: *Flush, gpa: Allocator) void {
pub fn finish(f: *Flush, wasm: *Wasm) !void {
const comp = wasm.base.comp;
const io = comp.io;
const shared_memory = comp.config.shared_memory;
const diags = &comp.link_diags;
const gpa = comp.gpa;
@ -1067,7 +1068,7 @@ pub fn finish(f: *Flush, wasm: *Wasm) !void {
}
// Finally, write the entire binary into the file.
var file_writer = wasm.base.file.?.writer(&.{});
var file_writer = wasm.base.file.?.writer(io, &.{});
file_writer.interface.writeAll(binary_bytes.items) catch |err| switch (err) {
error.WriteFailed => return file_writer.err.?,
};

View file

@ -130,7 +130,7 @@ pub const Tbd = union(enum) {
pub const TapiError = error{
NotLibStub,
InputOutput,
} || yaml.YamlError || Io.File.PReadError;
} || yaml.YamlError || Io.File.ReadPositionalError;
pub const LibStub = struct {
/// Underlying memory for stub's contents.
@ -146,7 +146,7 @@ pub const LibStub = struct {
};
const source = try allocator.alloc(u8, filesize);
defer allocator.free(source);
const amt = try file.preadAll(source, 0);
const amt = try file.readPositionalAll(io, source, 0);
if (amt != filesize) return error.InputOutput;
var lib_stub = LibStub{

View file

@ -3677,7 +3677,7 @@ fn buildOutputType(
}
{
const root_prog_node = std.Progress.start(.{
const root_prog_node = std.Progress.start(io, .{
.disable_printing = (color == .off),
});
defer root_prog_node.end();