Merge pull request 'std: move memory locking and memory protection to process' (#30758) from mmap into master

Reviewed-on: https://codeberg.org/ziglang/zig/pulls/30758
This commit is contained in:
Andrew Kelley 2026-01-10 04:14:48 +01:00
commit ec25b13848
24 changed files with 312 additions and 272 deletions

View file

@ -519,10 +519,7 @@ pub fn build(b: *std.Build) !void {
.skip_libc = true,
.no_builtin = true,
.max_rss = switch (b.graph.host.result.os.tag) {
.freebsd => switch (b.graph.host.result.cpu.arch) {
.x86_64 => 743_802_470,
else => 800_000_000,
},
.freebsd => 800_000_000,
.linux => switch (b.graph.host.result.cpu.arch) {
.aarch64 => 639_565_414,
.loongarch64 => 598_884_352,

View file

@ -1320,7 +1320,7 @@ pub const MemoryMappedList = struct {
const ptr = try std.posix.mmap(
null,
capacity,
std.posix.PROT.READ | std.posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .SHARED },
file.handle,
0,

View file

@ -422,7 +422,7 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
const mapped_memory = std.posix.mmap(
null,
file_size,
std.posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .SHARED },
coverage_file.handle,
0,

View file

@ -1535,7 +1535,7 @@ const LinuxThreadImpl = struct {
const mapped = posix.mmap(
null,
map_bytes,
posix.PROT.NONE,
.{},
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,
@ -1551,14 +1551,14 @@ const LinuxThreadImpl = struct {
assert(mapped.len >= map_bytes);
errdefer posix.munmap(mapped);
// map everything but the guard page as read/write
posix.mprotect(
@alignCast(mapped[guard_offset..]),
posix.PROT.READ | posix.PROT.WRITE,
) catch |err| switch (err) {
error.AccessDenied => unreachable,
else => |e| return e,
};
// Map everything but the guard page as read/write.
const guarded: []align(std.heap.page_size_min) u8 = @alignCast(mapped[guard_offset..]);
const protection: posix.PROT = .{ .READ = true, .WRITE = true };
switch (posix.errno(posix.system.mprotect(guarded.ptr, guarded.len, protection))) {
.SUCCESS => {},
.NOMEM => return error.OutOfMemory,
else => |err| return posix.unexpectedErrno(err),
}
// Prepare the TLS segment and prepare a user_desc struct when needed on x86
var tls_ptr = linux.tls.prepareArea(mapped[tls_offset..]);

View file

@ -1672,10 +1672,10 @@ pub const MCL = switch (native_os) {
// https://github.com/NetBSD/src/blob/fd2741deca927c18e3ba15acdf78b8b14b2abe36/sys/sys/mman.h#L179
// https://github.com/openbsd/src/blob/39404228f6d36c0ca4be5f04ab5385568ebd6aa3/sys/sys/mman.h#L129
// https://github.com/illumos/illumos-gate/blob/5280477614f83fea20fc938729df6adb3e44340d/usr/src/uts/common/sys/mman.h#L343
.freebsd, .dragonfly, .netbsd, .openbsd, .illumos => packed struct(c_int) {
CURRENT: bool = 0,
FUTURE: bool = 0,
_: std.meta.Int(.unsigned, @bitSizeOf(c_int) - 2) = 0,
.freebsd, .dragonfly, .netbsd, .openbsd, .illumos => packed struct(u32) {
CURRENT: bool = false,
FUTURE: bool = false,
_: u30 = 0,
},
else => void,
};
@ -1887,32 +1887,13 @@ pub const PROT = switch (native_os) {
.linux => linux.PROT,
.emscripten => emscripten.PROT,
// https://github.com/SerenityOS/serenity/blob/6d59d4d3d9e76e39112842ec487840828f1c9bfe/Kernel/API/POSIX/sys/mman.h#L28-L31
.openbsd, .haiku, .dragonfly, .netbsd, .illumos, .freebsd, .windows, .serenity => struct {
/// page can not be accessed
pub const NONE = 0x0;
/// page can be read
pub const READ = 0x1;
/// page can be written
pub const WRITE = 0x2;
/// page can be executed
pub const EXEC = 0x4;
},
.driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => struct {
/// [MC2] no permissions
pub const NONE: vm_prot_t = 0x00;
/// [MC2] pages can be read
pub const READ: vm_prot_t = 0x01;
/// [MC2] pages can be written
pub const WRITE: vm_prot_t = 0x02;
/// [MC2] pages can be executed
pub const EXEC: vm_prot_t = 0x04;
/// When a caller finds that they cannot obtain write permission on a
/// mapped entry, the following flag can be used. The entry will be
/// made "needs copy" effectively copying the object (using COW),
/// and write permission will be added to the maximum protections for
/// the associated entry.
pub const COPY: vm_prot_t = 0x10;
.openbsd, .haiku, .dragonfly, .netbsd, .illumos, .freebsd, .windows, .serenity => packed struct(u32) {
READ: bool = false,
WRITE: bool = false,
EXEC: bool = false,
_: u29 = 0,
},
.driverkit, .ios, .maccatalyst, .macos, .tvos, .visionos, .watchos => vm_prot_t,
else => void,
};
@ -10349,7 +10330,7 @@ pub extern "c" fn getgrgid(gid: gid_t) ?*group;
pub extern "c" fn getgrgid_r(gid: gid_t, grp: *group, buf: [*]u8, buflen: usize, result: *?*group) c_int;
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
pub extern "c" fn mmap64(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
pub extern "c" fn mmap64(addr: ?*align(page_size) anyopaque, len: usize, prot: PROT, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
@ -10478,7 +10459,7 @@ pub const mlock = switch (native_os) {
};
pub const mlock2 = switch (native_os) {
linux => private.mlock2,
.linux => private.mlock2,
else => {},
};
@ -10667,10 +10648,10 @@ pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) i
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize;
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: PROT, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
pub extern "c" fn mremap(addr: ?*align(page_size) const anyopaque, old_len: usize, new_len: usize, flags: MREMAP, ...) *anyopaque;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: PROT) c_int;
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_uint) c_int;
pub extern "c" fn unlink(path: [*:0]const u8) c_int;

View file

@ -808,7 +808,7 @@ pub const task_vm_info = extern struct {
pub const task_vm_info_data_t = task_vm_info;
pub const vm_prot_t = c_int;
pub const vm_prot_t = std.macho.vm_prot_t;
pub const boolean_t = c_int;
pub extern "c" fn mach_vm_protect(

View file

@ -442,7 +442,7 @@ fn loadInner(
break :mapped std.posix.mmap(
null,
file_len,
std.posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .SHARED },
elf_file.handle,
0,

View file

@ -526,7 +526,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c
return posix.mmap(
null,
file_len,
posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .SHARED },
file.handle,
0,

View file

@ -631,7 +631,7 @@ fn mapDebugInfoFile(io: Io, path: []const u8) ![]align(std.heap.page_size_min) c
return posix.mmap(
null,
file_len,
posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .SHARED },
file.handle,
0,

View file

@ -238,7 +238,7 @@ pub const ElfDynLib = struct {
const file_bytes = try posix.mmap(
null,
mem.alignForward(usize, size, page_size),
posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .PRIVATE },
file.handle,
0,
@ -276,7 +276,7 @@ pub const ElfDynLib = struct {
const all_loaded_mem = try posix.mmap(
null,
virt_addr_end,
posix.PROT.NONE,
.{},
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,
@ -302,7 +302,7 @@ pub const ElfDynLib = struct {
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, page_size);
const ptr = @as([*]align(std.heap.page_size_min) u8, @ptrFromInt(aligned_addr));
const prot = elfToMmapProt(ph.p_flags);
const prot = elfToProt(ph.p_flags);
if ((ph.p_flags & elf.PF_W) == 0) {
// If it does not need write access, it can be mapped from the fd.
_ = try posix.mmap(
@ -531,12 +531,12 @@ pub const ElfDynLib = struct {
return null;
}
fn elfToMmapProt(elf_prot: u64) u32 {
var result: u32 = posix.PROT.NONE;
if ((elf_prot & elf.PF_R) != 0) result |= posix.PROT.READ;
if ((elf_prot & elf.PF_W) != 0) result |= posix.PROT.WRITE;
if ((elf_prot & elf.PF_X) != 0) result |= posix.PROT.EXEC;
return result;
fn elfToProt(elf_prot: u64) posix.PROT {
return .{
.READ = (elf_prot & elf.PF_R) != 0,
.WRITE = (elf_prot & elf.PF_W) != 0,
.EXEC = (elf_prot & elf.PF_X) != 0,
};
}
};

View file

@ -96,7 +96,7 @@ pub fn map(n: usize, alignment: mem.Alignment) ?[*]u8 {
const slice = posix.mmap(
hint,
overalloc_len,
posix.PROT.READ | posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,

View file

@ -9,7 +9,19 @@ const Allocator = mem.Allocator;
pub const cpu_type_t = c_int;
pub const cpu_subtype_t = c_int;
pub const vm_prot_t = c_int;
pub const vm_prot_t = packed struct(u32) {
READ: bool = false,
WRITE: bool = false,
EXEC: bool = false,
_: u1 = 0,
/// When a caller finds that they cannot obtain write permission on a
/// mapped entry, the following flag can be used. The entry will be
/// made "needs copy" effectively copying the object (using COW),
/// and write permission will be added to the maximum protections for
/// the associated entry.
COPY: bool = false,
__: u27 = 0,
};
pub const mach_header = extern struct {
magic: u32,
@ -648,10 +660,10 @@ pub const segment_command_64 = extern struct {
filesize: u64 = 0,
/// maximum VM protection
maxprot: vm_prot_t = PROT.NONE,
maxprot: vm_prot_t = .{},
/// initial VM protection
initprot: vm_prot_t = PROT.NONE,
initprot: vm_prot_t = .{},
/// number of sections in segment
nsects: u32 = 0,
@ -662,7 +674,7 @@ pub const segment_command_64 = extern struct {
}
pub fn isWriteable(seg: segment_command_64) bool {
return seg.initprot & PROT.WRITE != 0;
return seg.initprot.write;
}
};

View file

@ -331,13 +331,14 @@ pub const POLL = struct {
pub const RDBAND = 0x080;
};
pub const PROT = struct {
pub const NONE = 0x0;
pub const READ = 0x1;
pub const WRITE = 0x2;
pub const EXEC = 0x4;
pub const GROWSDOWN = 0x01000000;
pub const GROWSUP = 0x02000000;
pub const PROT = packed struct(u32) {
READ: bool = false,
WRITE: bool = false,
EXEC: bool = false,
_: u21 = 0,
GROWSDOWN: bool = false,
GROWSUP: bool = false,
__: u6 = 0,
};
pub const rlim_t = u64;

View file

@ -986,13 +986,13 @@ pub fn pivot_root(new_root: [*:0]const u8, put_old: [*:0]const u8) usize {
return syscall2(.pivot_root, @intFromPtr(new_root), @intFromPtr(put_old));
}
pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, offset: i64) usize {
pub fn mmap(address: ?[*]u8, length: usize, prot: PROT, flags: MAP, fd: i32, offset: i64) usize {
if (@hasField(SYS, "mmap2")) {
return syscall6(
.mmap2,
@intFromPtr(address),
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@bitCast(@as(isize, fd)),
@truncate(@as(u64, @bitCast(offset)) / std.heap.pageSize()),
@ -1005,7 +1005,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, of
@intFromPtr(&[_]usize{
@intFromPtr(address),
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@bitCast(@as(isize, fd)),
@as(u64, @bitCast(offset)),
@ -1014,7 +1014,7 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, of
.mmap,
@intFromPtr(address),
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@bitCast(@as(isize, fd)),
@as(u64, @bitCast(offset)),
@ -1022,8 +1022,8 @@ pub fn mmap(address: ?[*]u8, length: usize, prot: usize, flags: MAP, fd: i32, of
}
}
pub fn mprotect(address: [*]const u8, length: usize, protection: usize) usize {
return syscall3(.mprotect, @intFromPtr(address), length, protection);
pub fn mprotect(address: [*]const u8, length: usize, protection: PROT) usize {
return syscall3(.mprotect, @intFromPtr(address), length, @as(u32, @bitCast(protection)));
}
pub fn mremap(old_addr: ?[*]const u8, old_len: usize, new_len: usize, flags: MREMAP, new_addr: ?[*]const u8) usize {
@ -3616,24 +3616,30 @@ pub const FUTEX2_FLAGS = packed struct(u32) {
_undefined: u24 = 0,
};
pub const PROT = struct {
/// page can not be accessed
pub const NONE = 0x0;
/// page can be read
pub const READ = 0x1;
/// page can be written
pub const WRITE = 0x2;
/// page can be executed
pub const EXEC = 0x4;
/// page may be used for atomic ops
pub const SEM = switch (native_arch) {
.mips, .mipsel, .mips64, .mips64el, .xtensa, .xtensaeb => 0x10,
else => 0x8,
};
/// mprotect flag: extend change to start of growsdown vma
pub const GROWSDOWN = 0x01000000;
/// mprotect flag: extend change to end of growsup vma
pub const GROWSUP = 0x02000000;
pub const PROT = switch (native_arch) {
.mips, .mipsel, .mips64, .mips64el, .xtensa, .xtensaeb => packed struct(u32) {
READ: bool = false,
WRITE: bool = false,
EXEC: bool = false,
_: u1 = 0,
/// Page may be used for atomic ops.
SEM: bool = false,
__: u19 = 0,
GROWSDOWN: bool = false,
GROWSUP: bool = false,
___: u6 = 0,
},
else => packed struct(u32) {
READ: bool = false,
WRITE: bool = false,
EXEC: bool = false,
/// Page may be used for atomic ops.
SEM: bool = false,
__: u20 = 0,
GROWSDOWN: bool = false,
GROWSUP: bool = false,
___: u6 = 0,
},
};
pub const FD_CLOEXEC = 1;

View file

@ -1526,7 +1526,7 @@ pub const SubmissionQueue = struct {
const mmap = try posix.mmap(
null,
size,
posix.PROT.READ | posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .SHARED, .POPULATE = true },
fd,
linux.IORING_OFF_SQ_RING,
@ -1540,7 +1540,7 @@ pub const SubmissionQueue = struct {
const mmap_sqes = try posix.mmap(
null,
size_sqes,
posix.PROT.READ | posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .SHARED, .POPULATE = true },
fd,
linux.IORING_OFF_SQES,
@ -1747,7 +1747,7 @@ pub fn setup_buf_ring(
const mmap = try posix.mmap(
null,
mmap_size,
posix.PROT.READ | posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,

View file

@ -568,7 +568,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
}
inline fn mmap_tls(length: usize) usize {
const prot = linux.PROT.READ | linux.PROT.WRITE;
const prot: linux.PROT = .{ .READ = true, .WRITE = true };
const flags: linux.MAP = .{ .TYPE = .PRIVATE, .ANONYMOUS = true };
if (@hasField(linux.SYS, "mmap2")) {
@ -576,7 +576,7 @@ inline fn mmap_tls(length: usize) usize {
.mmap2,
0,
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@as(usize, @bitCast(@as(isize, -1))),
0,
@ -589,7 +589,7 @@ inline fn mmap_tls(length: usize) usize {
@intFromPtr(&[_]usize{
0,
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@as(usize, @bitCast(@as(isize, -1))),
0,
@ -598,7 +598,7 @@ inline fn mmap_tls(length: usize) usize {
.mmap,
0,
length,
prot,
@as(u32, @bitCast(prot)),
@as(u32, @bitCast(flags)),
@as(usize, @bitCast(@as(isize, -1))),
0,

View file

@ -3765,40 +3765,6 @@ pub fn NtFreeVirtualMemory(hProcess: HANDLE, addr: ?*PVOID, size: *SIZE_T, free_
};
}
pub const VirtualProtectError = error{
InvalidAddress,
Unexpected,
};
pub fn VirtualProtect(lpAddress: ?LPVOID, dwSize: SIZE_T, flNewProtect: DWORD, lpflOldProtect: *DWORD) VirtualProtectError!void {
// ntdll takes an extra level of indirection here
var addr = lpAddress;
var size = dwSize;
switch (ntdll.NtProtectVirtualMemory(GetCurrentProcess(), &addr, &size, flNewProtect, lpflOldProtect)) {
.SUCCESS => {},
.INVALID_ADDRESS => return error.InvalidAddress,
else => |st| return unexpectedStatus(st),
}
}
pub fn VirtualProtectEx(handle: HANDLE, addr: ?LPVOID, size: SIZE_T, new_prot: DWORD) VirtualProtectError!DWORD {
var old_prot: DWORD = undefined;
var out_addr = addr;
var out_size = size;
switch (ntdll.NtProtectVirtualMemory(
handle,
&out_addr,
&out_size,
new_prot,
&old_prot,
)) {
.SUCCESS => return old_prot,
.INVALID_ADDRESS => return error.InvalidAddress,
// TODO: map errors
else => |rc| return unexpectedStatus(rc),
}
}
pub const SetConsoleTextAttributeError = error{Unexpected};
pub fn SetConsoleTextAttribute(hConsoleOutput: HANDLE, wAttributes: WORD) SetConsoleTextAttributeError!void {

View file

@ -934,128 +934,14 @@ pub fn fanotify_markZ(
}
}
pub const MlockError = error{
PermissionDenied,
LockedMemoryLimitExceeded,
SystemResources,
} || UnexpectedError;
pub fn mlock(memory: []align(page_size_min) const u8) MlockError!void {
if (@TypeOf(system.mlock) == void)
@compileError("mlock not supported on this OS");
return switch (errno(system.mlock(memory.ptr, memory.len))) {
.SUCCESS => {},
.INVAL => unreachable, // unaligned, negative, runs off end of addrspace
.PERM => error.PermissionDenied,
.NOMEM => error.LockedMemoryLimitExceeded,
.AGAIN => error.SystemResources,
else => |err| unexpectedErrno(err),
};
}
pub fn mlock2(memory: []align(page_size_min) const u8, flags: MLOCK) MlockError!void {
if (@TypeOf(system.mlock2) == void)
@compileError("mlock2 not supported on this OS");
return switch (errno(system.mlock2(memory.ptr, memory.len, flags))) {
.SUCCESS => {},
.INVAL => unreachable, // bad memory or bad flags
.PERM => error.PermissionDenied,
.NOMEM => error.LockedMemoryLimitExceeded,
.AGAIN => error.SystemResources,
else => |err| unexpectedErrno(err),
};
}
pub fn munlock(memory: []align(page_size_min) const u8) MlockError!void {
if (@TypeOf(system.munlock) == void)
@compileError("munlock not supported on this OS");
return switch (errno(system.munlock(memory.ptr, memory.len))) {
.SUCCESS => {},
.INVAL => unreachable, // unaligned or runs off end of addr space
.PERM => return error.PermissionDenied,
.NOMEM => return error.LockedMemoryLimitExceeded,
.AGAIN => return error.SystemResources,
else => |err| unexpectedErrno(err),
};
}
pub fn mlockall(flags: MCL) MlockError!void {
if (@TypeOf(system.mlockall) == void)
@compileError("mlockall not supported on this OS");
return switch (errno(system.mlockall(flags))) {
.SUCCESS => {},
.INVAL => unreachable, // bad flags
.PERM => error.PermissionDenied,
.NOMEM => error.LockedMemoryLimitExceeded,
.AGAIN => error.SystemResources,
else => |err| unexpectedErrno(err),
};
}
pub fn munlockall() MlockError!void {
if (@TypeOf(system.munlockall) == void)
@compileError("munlockall not supported on this OS");
return switch (errno(system.munlockall())) {
.SUCCESS => {},
.PERM => error.PermissionDenied,
.NOMEM => error.LockedMemoryLimitExceeded,
.AGAIN => error.SystemResources,
else => |err| unexpectedErrno(err),
};
}
pub const MProtectError = error{
/// The memory cannot be given the specified access. This can happen, for example, if you
/// mmap(2) a file to which you have read-only access, then ask mprotect() to mark it
/// PROT_WRITE.
AccessDenied,
/// Changing the protection of a memory region would result in the total number of map
/// pings with distinct attributes (e.g., read versus read/write protection) exceeding the
/// allowed maximum. (For example, making the protection of a range PROT_READ in the mid
/// dle of a region currently protected as PROT_READ|PROT_WRITE would result in three map
/// pings: two read/write mappings at each end and a read-only mapping in the middle.)
OutOfMemory,
} || UnexpectedError;
pub fn mprotect(memory: []align(page_size_min) u8, protection: u32) MProtectError!void {
if (native_os == .windows) {
const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
0b000 => windows.PAGE_NOACCESS,
0b001 => windows.PAGE_READONLY,
0b010 => unreachable, // +w -r not allowed
0b011 => windows.PAGE_READWRITE,
0b100 => windows.PAGE_EXECUTE,
0b101 => windows.PAGE_EXECUTE_READ,
0b110 => unreachable, // +w -r not allowed
0b111 => windows.PAGE_EXECUTE_READWRITE,
};
var old: windows.DWORD = undefined;
windows.VirtualProtect(memory.ptr, memory.len, win_prot, &old) catch |err| switch (err) {
error.InvalidAddress => return error.AccessDenied,
error.Unexpected => return error.Unexpected,
};
} else {
switch (errno(system.mprotect(memory.ptr, memory.len, protection))) {
.SUCCESS => return,
.INVAL => unreachable,
.ACCES => return error.AccessDenied,
.NOMEM => return error.OutOfMemory,
else => |err| return unexpectedErrno(err),
}
}
}
pub const MMapError = error{
/// The underlying filesystem of the specified file does not support memory mapping.
MemoryMappingNotSupported,
/// A file descriptor refers to a non-regular file. Or a file mapping was requested,
/// but the file descriptor is not open for reading. Or `MAP.SHARED` was requested
/// and `PROT_WRITE` is set, but the file descriptor is not open in `RDWR` mode.
/// Or `PROT_WRITE` is set, but the file is append-only.
AccessDenied,
/// The `prot` argument asks for `PROT_EXEC` but the mapped area belongs to a file on
/// a filesystem that was mounted no-exec.
PermissionDenied,
@ -1063,7 +949,6 @@ pub const MMapError = error{
ProcessFdQuotaExceeded,
SystemFdQuotaExceeded,
OutOfMemory,
/// Using FIXED_NOREPLACE flag and the process has already mapped memory at the given address
MappingAlreadyExists,
} || UnexpectedError;
@ -1076,8 +961,8 @@ pub const MMapError = error{
pub fn mmap(
ptr: ?[*]align(page_size_min) u8,
length: usize,
prot: u32,
flags: system.MAP,
prot: PROT,
flags: MAP,
fd: fd_t,
offset: u64,
) MMapError![]align(page_size_min) u8 {

View file

@ -172,7 +172,7 @@ test "mmap" {
const data = try posix.mmap(
null,
1234,
posix.PROT.READ | posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = .PRIVATE, .ANONYMOUS = true },
-1,
0,
@ -214,7 +214,7 @@ test "mmap" {
const data = try posix.mmap(
null,
alloc_size,
posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .PRIVATE },
file.handle,
0,
@ -239,7 +239,7 @@ test "mmap" {
const data = try posix.mmap(
null,
alloc_size / 2,
posix.PROT.READ,
.{ .READ = true },
.{ .TYPE = .PRIVATE },
file.handle,
alloc_size / 2,

View file

@ -886,3 +886,195 @@ pub const SetCurrentDirError = error{
pub fn setCurrentDir(io: Io, dir: Io.Dir) !void {
return io.vtable.processSetCurrentDir(io.userdata, dir);
}
pub const LockMemoryError = error{
UnsupportedOperation,
PermissionDenied,
LockedMemoryLimitExceeded,
SystemResources,
} || Io.UnexpectedError;
pub const LockMemoryOptions = struct {
/// Lock pages that are currently resident and mark the entire range so
/// that the remaining nonresident pages are locked when they are populated
/// by a page fault.
on_fault: bool = false,
};
/// Request part of the calling process's virtual address space to be in RAM,
/// preventing that memory from being paged to the swap area.
///
/// Corresponds to "mlock" or "mlock2" in libc.
///
/// See also:
/// * unlockMemory
pub fn lockMemory(memory: []align(std.heap.page_size_min) const u8, options: LockMemoryOptions) LockMemoryError!void {
if (native_os == .windows) {
// TODO call VirtualLock
}
if (!options.on_fault and @TypeOf(posix.system.mlock) != void) {
switch (posix.errno(posix.system.mlock(memory.ptr, memory.len))) {
.SUCCESS => return,
.INVAL => |err| return std.Io.Threaded.errnoBug(err), // unaligned, negative, runs off end of addrspace
.PERM => return error.PermissionDenied,
.NOMEM => return error.LockedMemoryLimitExceeded,
.AGAIN => return error.SystemResources,
else => |err| return posix.unexpectedErrno(err),
}
}
if (@TypeOf(posix.system.mlock2) != void) {
const flags: posix.MLOCK = .{ .ONFAULT = options.on_fault };
switch (posix.errno(posix.system.mlock2(memory.ptr, memory.len, flags))) {
.SUCCESS => return,
.INVAL => |err| return std.Io.Threaded.errnoBug(err), // unaligned, negative, runs off end of addrspace
.PERM => return error.PermissionDenied,
.NOMEM => return error.LockedMemoryLimitExceeded,
.AGAIN => return error.SystemResources,
else => |err| return posix.unexpectedErrno(err),
}
}
return error.UnsupportedOperation;
}
pub const UnlockMemoryError = error{
PermissionDenied,
OutOfMemory,
SystemResources,
} || Io.UnexpectedError;
/// Withdraw request for process's virtual address space to be in RAM.
///
/// Corresponds to "munlock" in libc.
///
/// See also:
/// * `lockMemory`
pub fn unlockMemory(memory: []align(std.heap.page_size_min) const u8) UnlockMemoryError!void {
if (@TypeOf(posix.system.munlock) == void) return;
switch (posix.errno(posix.system.munlock(memory.ptr, memory.len))) {
.SUCCESS => return,
.INVAL => |err| return std.Io.Threaded.errnoBug(err), // unaligned or runs off end of addr space
.PERM => return error.PermissionDenied,
.NOMEM => return error.OutOfMemory,
.AGAIN => return error.SystemResources,
else => |err| return posix.unexpectedErrno(err),
}
}
pub const LockMemoryAllOptions = struct {
current: bool = false,
future: bool = false,
/// Asserted to be used together with `current` or `future`, or both.
on_fault: bool = false,
};
pub fn lockMemoryAll(options: LockMemoryAllOptions) LockMemoryError!void {
if (@TypeOf(posix.system.mlockall) == void) return error.UnsupportedOperation;
var flags: posix.MCL = .{
.CURRENT = options.current,
.FUTURE = options.future,
};
if (options.on_fault) {
assert(options.current or options.future);
if (@hasField(posix.MCL, "ONFAULT")) {
flags.ONFAULT = true;
} else {
return error.UnsupportedOperation;
}
}
switch (posix.errno(posix.system.mlockall(flags))) {
.SUCCESS => return,
.INVAL => |err| return std.Io.Threaded.errnoBug(err),
.PERM => return error.PermissionDenied,
.NOMEM => return error.LockedMemoryLimitExceeded,
.AGAIN => return error.SystemResources,
else => |err| return posix.unexpectedErrno(err),
}
}
pub fn unlockMemoryAll() UnlockMemoryError!void {
if (@TypeOf(posix.system.munlockall) == void) return;
switch (posix.errno(posix.system.munlockall())) {
.SUCCESS => return,
.PERM => return error.PermissionDenied,
.NOMEM => return error.OutOfMemory,
.AGAIN => return error.SystemResources,
else => |err| return posix.unexpectedErrno(err),
}
}
pub const ProtectMemoryError = error{
UnsupportedOperation,
/// The memory cannot be given the specified access. This can happen, for
/// example, if you memory map a file to which you have read-only access,
/// then use `protectMemory` to mark it writable.
AccessDenied,
/// Changing the protection of a memory region would result in the total
/// number of mappings with distinct attributes exceeding the allowed
/// maximum.
OutOfMemory,
} || Io.UnexpectedError;
pub const ProtectMemoryOptions = packed struct(u3) {
read: bool = false,
write: bool = false,
execute: bool = false,
};
pub fn protectMemory(
memory: []align(std.heap.page_size_min) u8,
options: ProtectMemoryOptions,
) ProtectMemoryError!void {
if (native_os == .windows) {
var addr = memory.ptr; // ntdll takes an extra level of indirection here
var size = memory.len; // ntdll takes an extra level of indirection here
var old: windows.PAGE = undefined;
const current_process: windows.HANDLE = @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))));
const new: windows.PAGE = switch (@as(u3, @bitCast(options))) {
0b000 => .{ .NOACCESS = true },
0b001 => .{ .READONLY = true },
0b010 => return error.AccessDenied, // +w -r not allowed
0b011 => .{ .READWRITE = true },
0b100 => .{ .EXECUTE = true },
0b101 => .{ .EXECUTE_READ = true },
0b110 => return error.AccessDenied, // +w -r not allowed
0b111 => .{ .EXECUTE_READWRITE = true },
};
switch (windows.ntdll.NtProtectVirtualMemory(current_process, @ptrCast(&addr), &size, new, &old)) {
.SUCCESS => return,
.INVALID_ADDRESS => return error.AccessDenied,
else => |st| return windows.unexpectedStatus(st),
}
} else if (posix.PROT != void) {
const flags: posix.PROT = .{
.READ = options.read,
.WRITE = options.write,
.EXEC = options.execute,
};
switch (posix.errno(posix.system.mprotect(memory.ptr, memory.len, flags))) {
.SUCCESS => return,
.INVAL => |err| return std.Io.Threaded.errnoBug(err),
.ACCES => return error.AccessDenied,
.NOMEM => return error.OutOfMemory,
else => |err| return posix.unexpectedErrno(err),
}
}
return error.UnsupportedOperation;
}
test lockMemory {
var page: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
lockMemory(&page, .{}) catch return error.SkipZigTest;
unlockMemory(&page) catch return error.SkipZigTest;
}
test lockMemoryAll {
lockMemoryAll(.{ .current = true }) catch return error.SkipZigTest;
unlockMemoryAll() catch return error.SkipZigTest;
}
test protectMemory {
if (builtin.cpu.arch == .hexagon) return error.SkipZigTest; // TODO
var page: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = undefined;
protectMemory(&page, .{}) catch return error.SkipZigTest;
protectMemory(&page, .{ .read = true, .write = true }) catch return error.SkipZigTest;
}

View file

@ -1758,10 +1758,10 @@ fn initSyntheticSections(self: *MachO) !void {
}
fn getSegmentProt(segname: []const u8) macho.vm_prot_t {
if (mem.eql(u8, segname, "__PAGEZERO")) return macho.PROT.NONE;
if (mem.eql(u8, segname, "__TEXT")) return macho.PROT.READ | macho.PROT.EXEC;
if (mem.eql(u8, segname, "__LINKEDIT")) return macho.PROT.READ;
return macho.PROT.READ | macho.PROT.WRITE;
if (mem.eql(u8, segname, "__PAGEZERO")) return .{};
if (mem.eql(u8, segname, "__TEXT")) return .{ .READ = true, .EXEC = true };
if (mem.eql(u8, segname, "__LINKEDIT")) return .{ .READ = true };
return .{ .READ = true, .WRITE = true };
}
fn getSegmentRank(segname: []const u8) u8 {
@ -3348,7 +3348,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
.filesize = filesize,
.vmaddr = base_vmaddr + 0x4000000,
.vmsize = filesize,
.prot = macho.PROT.READ | macho.PROT.EXEC,
.prot = .{ .READ = true, .EXEC = true },
});
}
@ -3360,7 +3360,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
.filesize = filesize,
.vmaddr = base_vmaddr + 0xc000000,
.vmsize = filesize,
.prot = macho.PROT.READ | macho.PROT.WRITE,
.prot = .{ .READ = true, .WRITE = true },
});
}
@ -3372,7 +3372,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
.filesize = filesize,
.vmaddr = base_vmaddr + 0x10000000,
.vmsize = filesize,
.prot = macho.PROT.READ | macho.PROT.WRITE,
.prot = .{ .READ = true, .WRITE = true },
});
}
@ -3381,7 +3381,7 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
self.zig_bss_seg_index = try self.addSegment("__BSS_ZIG", .{
.vmaddr = base_vmaddr + 0x14000000,
.vmsize = memsize,
.prot = macho.PROT.READ | macho.PROT.WRITE,
.prot = .{ .READ = true, .WRITE = true },
});
}
@ -3711,7 +3711,7 @@ pub fn addSegment(self: *MachO, name: []const u8, opts: struct {
vmsize: u64 = 0,
fileoff: u64 = 0,
filesize: u64 = 0,
prot: macho.vm_prot_t = macho.PROT.NONE,
prot: macho.vm_prot_t = .{},
}) error{OutOfMemory}!u8 {
const gpa = self.base.comp.gpa;
const index = @as(u8, @intCast(self.segments.items.len));
@ -4903,7 +4903,7 @@ pub const MachTask = extern struct {
try task.setCurrProtection(
address,
buf.len,
std.c.PROT.READ | std.c.PROT.WRITE | std.c.PROT.COPY,
.{ .READ = true, .WRITE = true, .COPY = true },
);
defer {
task.setCurrProtection(address, buf.len, curr_prot) catch {};

View file

@ -94,8 +94,8 @@ pub fn initMetadata(self: *DebugSymbols, macho_file: *MachO) !void {
self.linkedit_segment_cmd_index = @intCast(self.segments.items.len);
try self.segments.append(self.allocator, .{
.segname = makeStaticString("__LINKEDIT"),
.maxprot = macho.PROT.READ,
.initprot = macho.PROT.READ,
.maxprot = .{ .READ = true },
.initprot = .{ .READ = true },
.cmdsize = @sizeOf(macho.segment_command_64),
});
}

View file

@ -539,7 +539,7 @@ fn createSegment(macho_file: *MachO) !void {
const gpa = macho_file.base.comp.gpa;
// For relocatable, we only ever need a single segment so create it now.
const prot: macho.vm_prot_t = macho.PROT.READ | macho.PROT.WRITE | macho.PROT.EXEC;
const prot: macho.vm_prot_t = .{ .READ = true, .WRITE = true, .EXEC = true };
try macho_file.segments.append(gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = MachO.makeStaticString(""),

View file

@ -1049,7 +1049,7 @@ pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {
} else mf.contents = try std.posix.mmap(
null,
aligned_capacity,
std.posix.PROT.READ | std.posix.PROT.WRITE,
.{ .READ = true, .WRITE = true },
.{ .TYPE = if (is_linux) .SHARED_VALIDATE else .SHARED },
mf.file.handle,
0,