std.heap: delete ThreadSafeAllocator

We can keep ourselves safe from those threads perfectly well without you, thanks!
This commit is contained in:
Justus Klausecker 2026-02-26 21:12:22 +01:00
parent 4e2cec265d
commit bbc77df3eb
2 changed files with 0 additions and 64 deletions

View file

@ -13,7 +13,6 @@ pub const ArenaAllocator = @import("heap/ArenaAllocator.zig");
pub const SmpAllocator = @import("heap/SmpAllocator.zig");
pub const FixedBufferAllocator = @import("heap/FixedBufferAllocator.zig");
pub const PageAllocator = @import("heap/PageAllocator.zig");
pub const ThreadSafeAllocator = @import("heap/ThreadSafeAllocator.zig");
pub const WasmAllocator = if (builtin.single_threaded) BrkAllocator else @compileError("unimplemented");
pub const BrkAllocator = @import("heap/BrkAllocator.zig");
@ -1009,7 +1008,6 @@ test {
_ = ArenaAllocator;
_ = GeneralPurposeAllocator;
_ = FixedBufferAllocator;
_ = ThreadSafeAllocator;
if (builtin.single_threaded) {
if (builtin.cpu.arch.isWasm() or (builtin.os.tag == .linux and !builtin.link_libc)) {
_ = brk_allocator;

View file

@ -1,62 +0,0 @@
//! Deprecated. Thread safety should be built into each Allocator instance
//! directly rather than trying to do this "composable allocators" thing.
const ThreadSafeAllocator = @This();
const std = @import("../std.zig");
const Io = std.Io;
const Allocator = std.mem.Allocator;
child_allocator: Allocator,
io: Io,
mutex: Io.Mutex = .init,
pub fn allocator(self: *ThreadSafeAllocator) Allocator {
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.remap = remap,
.free = free,
},
};
}
fn alloc(ctx: *anyopaque, n: usize, alignment: std.mem.Alignment, ra: usize) ?[*]u8 {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
const io = self.io;
self.mutex.lockUncancelable(io);
defer self.mutex.unlock(io);
return self.child_allocator.rawAlloc(n, alignment, ra);
}
fn resize(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, new_len: usize, ret_addr: usize) bool {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
const io = self.io;
self.mutex.lockUncancelable(io);
defer self.mutex.unlock(io);
return self.child_allocator.rawResize(buf, alignment, new_len, ret_addr);
}
fn remap(context: *anyopaque, memory: []u8, alignment: std.mem.Alignment, new_len: usize, return_address: usize) ?[*]u8 {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(context));
const io = self.io;
self.mutex.lockUncancelable(io);
defer self.mutex.unlock(io);
return self.child_allocator.rawRemap(memory, alignment, new_len, return_address);
}
fn free(ctx: *anyopaque, buf: []u8, alignment: std.mem.Alignment, ret_addr: usize) void {
const self: *ThreadSafeAllocator = @ptrCast(@alignCast(ctx));
const io = self.io;
self.mutex.lockUncancelable(io);
defer self.mutex.unlock(io);
return self.child_allocator.rawFree(buf, alignment, ret_addr);
}