improvements & fixes for general purpose allocator integration
* std.Mutex API is improved to not have init() deinit(). This API is designed to support static initialization and does not require any resource cleanup. This also happens to work around some kind of stage1 behavior that wasn't letting the new allocator mutex code get compiled. * the general purpose allocator now returns a bool from deinit() which tells if there were any leaks. This value is used by the test runner to fail the tests if there are any. * self-hosted compiler is updated to use the general purpose allocator when not linking against libc.master
parent
cc17f84ccc
commit
0347df82e8
|
@ -22,7 +22,7 @@ pub fn Queue(comptime T: type) type {
|
|||
return Self{
|
||||
.head = null,
|
||||
.tail = null,
|
||||
.mutex = std.Mutex.init(),
|
||||
.mutex = std.Mutex{},
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ pub const LineInfo = struct {
|
|||
}
|
||||
};
|
||||
|
||||
var stderr_mutex = std.Mutex.init();
|
||||
var stderr_mutex = std.Mutex{};
|
||||
|
||||
/// Deprecated. Use `std.log` functions for logging or `std.debug.print` for
|
||||
/// "printf debugging".
|
||||
|
@ -232,7 +232,7 @@ pub fn panic(comptime format: []const u8, args: anytype) noreturn {
|
|||
var panicking: u8 = 0;
|
||||
|
||||
// Locked to avoid interleaving panic messages from multiple threads.
|
||||
var panic_mutex = std.Mutex.init();
|
||||
var panic_mutex = std.Mutex{};
|
||||
|
||||
/// Counts how many times the panic handler is invoked by this thread.
|
||||
/// This is used to catch and handle panics triggered by the panic handler.
|
||||
|
|
|
@ -464,7 +464,13 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
|||
return buf;
|
||||
}
|
||||
|
||||
fn resize(allocator: *Allocator, buf: []u8, new_size: usize, len_align: u29) error{OutOfMemory}!usize {
|
||||
fn resize(
|
||||
allocator: *Allocator,
|
||||
buf: []u8,
|
||||
buf_align: u29,
|
||||
new_size: usize,
|
||||
len_align: u29,
|
||||
) error{OutOfMemory}!usize {
|
||||
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
|
||||
if (new_size == 0) {
|
||||
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
|
||||
|
|
|
@ -140,7 +140,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
|
||||
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
|
||||
|
||||
const mutex_init = if (config.thread_safe) std.Mutex.init() else std.Mutex.Dummy.init();
|
||||
const mutex_init = if (config.thread_safe) std.Mutex{} else std.mutex.Dummy{};
|
||||
|
||||
const stack_n = config.stack_trace_frames;
|
||||
const one_trace_size = @sizeOf(usize) * stack_n;
|
||||
|
@ -250,7 +250,8 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
bucket: *BucketHeader,
|
||||
size_class: usize,
|
||||
used_bits_count: usize,
|
||||
) void {
|
||||
) bool {
|
||||
var leaks = false;
|
||||
var used_bits_byte: usize = 0;
|
||||
while (used_bits_byte < used_bits_count) : (used_bits_byte += 1) {
|
||||
const used_byte = bucket.usedBits(used_bits_byte).*;
|
||||
|
@ -268,22 +269,26 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
.alloc,
|
||||
);
|
||||
std.debug.dumpStackTrace(stack_trace);
|
||||
leaks = true;
|
||||
}
|
||||
if (bit_index == math.maxInt(u3))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return leaks;
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
/// Returns whether there were leaks.
|
||||
pub fn deinit(self: *Self) bool {
|
||||
var leaks = false;
|
||||
for (self.buckets) |optional_bucket, bucket_i| {
|
||||
const first_bucket = optional_bucket orelse continue;
|
||||
const size_class = @as(usize, 1) << @intCast(u6, bucket_i);
|
||||
const used_bits_count = usedBitsCount(size_class);
|
||||
var bucket = first_bucket;
|
||||
while (true) {
|
||||
detectLeaksInBucket(bucket, size_class, used_bits_count);
|
||||
leaks = detectLeaksInBucket(bucket, size_class, used_bits_count) or leaks;
|
||||
bucket = bucket.next;
|
||||
if (bucket == first_bucket)
|
||||
break;
|
||||
|
@ -292,9 +297,11 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||
for (self.large_allocations.items()) |*large_alloc| {
|
||||
std.debug.print("\nMemory leak detected:\n", .{});
|
||||
large_alloc.value.dumpStackTrace();
|
||||
leaks = true;
|
||||
}
|
||||
self.large_allocations.deinit(self.backing_allocator);
|
||||
self.* = undefined;
|
||||
return leaks;
|
||||
}
|
||||
|
||||
fn collectStackTrace(first_trace_addr: usize, addresses: *[stack_n]usize) void {
|
||||
|
@ -600,7 +607,7 @@ const test_config = Config{};
|
|||
|
||||
test "small allocations - free in same order" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
|
@ -619,7 +626,7 @@ test "small allocations - free in same order" {
|
|||
|
||||
test "small allocations - free in reverse order" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var list = std.ArrayList(*u64).init(std.testing.allocator);
|
||||
|
@ -638,7 +645,7 @@ test "small allocations - free in reverse order" {
|
|||
|
||||
test "large allocations" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
const ptr1 = try allocator.alloc(u64, 42768);
|
||||
|
@ -651,7 +658,7 @@ test "large allocations" {
|
|||
|
||||
test "realloc" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alignedAlloc(u8, @alignOf(u32), 1);
|
||||
|
@ -673,7 +680,7 @@ test "realloc" {
|
|||
|
||||
test "shrink" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, 20);
|
||||
|
@ -696,7 +703,7 @@ test "shrink" {
|
|||
|
||||
test "large object - grow" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice1 = try allocator.alloc(u8, page_size * 2 - 20);
|
||||
|
@ -714,7 +721,7 @@ test "large object - grow" {
|
|||
|
||||
test "realloc small object to large object" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, 70);
|
||||
|
@ -731,7 +738,7 @@ test "realloc small object to large object" {
|
|||
|
||||
test "shrink large object to large object" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
|
@ -754,7 +761,7 @@ test "shrink large object to large object" {
|
|||
|
||||
test "shrink large object to large object with larger alignment" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
|
@ -782,7 +789,7 @@ test "shrink large object to large object with larger alignment" {
|
|||
|
||||
test "realloc large object to small object" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
|
@ -797,7 +804,7 @@ test "realloc large object to small object" {
|
|||
|
||||
test "non-page-allocator backing allocator" {
|
||||
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = std.testing.allocator };
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
const ptr = try allocator.create(i32);
|
||||
|
@ -806,7 +813,7 @@ test "non-page-allocator backing allocator" {
|
|||
|
||||
test "realloc large object to larger alignment" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var debug_buffer: [1000]u8 = undefined;
|
||||
|
@ -866,7 +873,7 @@ test "isAligned works" {
|
|||
test "large object shrinks to small but allocation fails during shrink" {
|
||||
var failing_allocator = std.testing.FailingAllocator.init(std.heap.page_allocator, 3);
|
||||
var gpda = GeneralPurposeAllocator(.{}){ .backing_allocator = &failing_allocator.allocator };
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
var slice = try allocator.alloc(u8, page_size * 2 + 50);
|
||||
|
@ -883,7 +890,7 @@ test "large object shrinks to small but allocation fails during shrink" {
|
|||
|
||||
test "objects of size 1024 and 2048" {
|
||||
var gpda = GeneralPurposeAllocator(test_config){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
const slice = try allocator.alloc(u8, 1025);
|
||||
|
@ -895,7 +902,7 @@ test "objects of size 1024 and 2048" {
|
|||
|
||||
test "setting a memory cap" {
|
||||
var gpda = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
|
||||
defer gpda.deinit();
|
||||
defer std.testing.expect(!gpda.deinit());
|
||||
const allocator = &gpda.allocator;
|
||||
|
||||
gpda.setRequestedMemoryLimit(1010);
|
||||
|
|
|
@ -15,8 +15,7 @@ const ResetEvent = std.ResetEvent;
|
|||
/// deadlock detection.
|
||||
///
|
||||
/// Example usage:
|
||||
/// var m = Mutex.init();
|
||||
/// defer m.deinit();
|
||||
/// var m = Mutex{};
|
||||
///
|
||||
/// const lock = m.acquire();
|
||||
/// defer lock.release();
|
||||
|
@ -32,101 +31,11 @@ const ResetEvent = std.ResetEvent;
|
|||
pub const Mutex = if (builtin.single_threaded)
|
||||
Dummy
|
||||
else if (builtin.os.tag == .windows)
|
||||
// https://locklessinc.com/articles/keyed_events/
|
||||
extern union {
|
||||
locked: u8,
|
||||
waiters: u32,
|
||||
|
||||
const WAKE = 1 << 8;
|
||||
const WAIT = 1 << 9;
|
||||
|
||||
pub const Dummy = Dummy;
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .waiters = 0 };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn tryAcquire(self: *Mutex) ?Held {
|
||||
if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) != 0)
|
||||
return null;
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
return self.tryAcquire() orelse self.acquireSlow();
|
||||
}
|
||||
|
||||
fn acquireSpinning(self: *Mutex) Held {
|
||||
@setCold(true);
|
||||
while (true) : (SpinLock.yield()) {
|
||||
return self.tryAcquire() orelse continue;
|
||||
}
|
||||
}
|
||||
|
||||
fn acquireSlow(self: *Mutex) Held {
|
||||
// try to use NT keyed events for blocking, falling back to spinlock if unavailable
|
||||
@setCold(true);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
|
||||
const key = @ptrCast(*const c_void, &self.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.waiters, .Monotonic);
|
||||
|
||||
// try and take lock if unlocked
|
||||
if ((waiters & 1) == 0) {
|
||||
if (@atomicRmw(u8, &self.locked, .Xchg, 1, .Acquire) == 0) {
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
// otherwise, try and update the waiting count.
|
||||
// then unset the WAKE bit so that another unlocker can wake up a thread.
|
||||
} else if (@cmpxchgWeak(u32, &self.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
_ = @atomicRmw(u32, &self.waiters, .Sub, WAKE, .Monotonic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// unlock without a rmw/cmpxchg instruction
|
||||
@atomicStore(u8, @ptrCast(*u8, &self.mutex.locked), 0, .Release);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
|
||||
const key = @ptrCast(*const c_void, &self.mutex.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.mutex.waiters, .Monotonic);
|
||||
|
||||
// no one is waiting
|
||||
if (waiters < WAIT) return;
|
||||
// someone grabbed the lock and will do the wake instead
|
||||
if (waiters & 1 != 0) return;
|
||||
// someone else is currently waking up
|
||||
if (waiters & WAKE != 0) return;
|
||||
|
||||
// try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
|
||||
if (@cmpxchgWeak(u32, &self.mutex.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
WindowsMutex
|
||||
else if (builtin.link_libc or builtin.os.tag == .linux)
|
||||
// stack-based version of https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
|
||||
struct {
|
||||
state: usize,
|
||||
|
||||
pub const Dummy = Dummy;
|
||||
state: usize = 0,
|
||||
|
||||
/// number of times to spin trying to acquire the lock.
|
||||
/// https://webkit.org/blog/6161/locking-in-webkit/
|
||||
|
@ -141,14 +50,6 @@ else if (builtin.link_libc or builtin.os.tag == .linux)
|
|||
event: ResetEvent,
|
||||
};
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .state = 0 };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn tryAcquire(self: *Mutex) ?Held {
|
||||
if (@cmpxchgWeak(usize, &self.state, 0, MUTEX_LOCK, .Acquire, .Monotonic) != null)
|
||||
return null;
|
||||
|
@ -263,7 +164,7 @@ else
|
|||
/// This has the sematics as `Mutex`, however it does not actually do any
|
||||
/// synchronization. Operations are safety-checked no-ops.
|
||||
pub const Dummy = struct {
|
||||
lock: @TypeOf(lock_init),
|
||||
lock: @TypeOf(lock_init) = lock_init,
|
||||
|
||||
const lock_init = if (std.debug.runtime_safety) false else {};
|
||||
|
||||
|
@ -278,15 +179,7 @@ pub const Dummy = struct {
|
|||
};
|
||||
|
||||
/// Create a new mutex in unlocked state.
|
||||
pub fn init() Dummy {
|
||||
return Dummy{ .lock = lock_init };
|
||||
}
|
||||
|
||||
/// Free a mutex created with init. Calling this while the
|
||||
/// mutex is held is illegal behavior.
|
||||
pub fn deinit(self: *Dummy) void {
|
||||
self.* = undefined;
|
||||
}
|
||||
pub const init = Dummy{};
|
||||
|
||||
/// Try to acquire the mutex without blocking. Returns null if
|
||||
/// the mutex is unavailable. Otherwise returns Held. Call
|
||||
|
@ -306,6 +199,90 @@ pub const Dummy = struct {
|
|||
}
|
||||
};
|
||||
|
||||
// https://locklessinc.com/articles/keyed_events/
|
||||
const WindowsMutex = struct {
|
||||
state: State = State{ .waiters = 0 },
|
||||
|
||||
const State = extern union {
|
||||
locked: u8,
|
||||
waiters: u32,
|
||||
};
|
||||
|
||||
const WAKE = 1 << 8;
|
||||
const WAIT = 1 << 9;
|
||||
|
||||
pub fn tryAcquire(self: *WindowsMutex) ?Held {
|
||||
if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) != 0)
|
||||
return null;
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
pub fn acquire(self: *WindowsMutex) Held {
|
||||
return self.tryAcquire() orelse self.acquireSlow();
|
||||
}
|
||||
|
||||
fn acquireSpinning(self: *WindowsMutex) Held {
|
||||
@setCold(true);
|
||||
while (true) : (SpinLock.yield()) {
|
||||
return self.tryAcquire() orelse continue;
|
||||
}
|
||||
}
|
||||
|
||||
fn acquireSlow(self: *WindowsMutex) Held {
|
||||
// try to use NT keyed events for blocking, falling back to spinlock if unavailable
|
||||
@setCold(true);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return self.acquireSpinning();
|
||||
const key = @ptrCast(*const c_void, &self.state.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.state.waiters, .Monotonic);
|
||||
|
||||
// try and take lock if unlocked
|
||||
if ((waiters & 1) == 0) {
|
||||
if (@atomicRmw(u8, &self.state.locked, .Xchg, 1, .Acquire) == 0) {
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
|
||||
// otherwise, try and update the waiting count.
|
||||
// then unset the WAKE bit so that another unlocker can wake up a thread.
|
||||
} else if (@cmpxchgWeak(u32, &self.state.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
_ = @atomicRmw(u32, &self.state.waiters, .Sub, WAKE, .Monotonic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *WindowsMutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// unlock without a rmw/cmpxchg instruction
|
||||
@atomicStore(u8, @ptrCast(*u8, &self.mutex.state.locked), 0, .Release);
|
||||
const handle = ResetEvent.OsEvent.Futex.getEventHandle() orelse return;
|
||||
const key = @ptrCast(*const c_void, &self.mutex.state.waiters);
|
||||
|
||||
while (true) : (SpinLock.loopHint(1)) {
|
||||
const waiters = @atomicLoad(u32, &self.mutex.state.waiters, .Monotonic);
|
||||
|
||||
// no one is waiting
|
||||
if (waiters < WAIT) return;
|
||||
// someone grabbed the lock and will do the wake instead
|
||||
if (waiters & 1 != 0) return;
|
||||
// someone else is currently waking up
|
||||
if (waiters & WAKE != 0) return;
|
||||
|
||||
// try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
|
||||
if (@cmpxchgWeak(u32, &self.mutex.state.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
|
||||
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == .SUCCESS);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
const TestContext = struct {
|
||||
mutex: *Mutex,
|
||||
data: i128,
|
||||
|
@ -314,8 +291,7 @@ const TestContext = struct {
|
|||
};
|
||||
|
||||
test "std.Mutex" {
|
||||
var mutex = Mutex.init();
|
||||
defer mutex.deinit();
|
||||
var mutex = Mutex{};
|
||||
|
||||
var context = TestContext{
|
||||
.mutex = &mutex,
|
||||
|
|
|
@ -10,7 +10,7 @@ pub fn once(comptime f: fn () void) Once(f) {
|
|||
pub fn Once(comptime f: fn () void) type {
|
||||
return struct {
|
||||
done: bool = false,
|
||||
mutex: std.Mutex = std.Mutex.init(),
|
||||
mutex: std.Mutex = std.Mutex{},
|
||||
|
||||
/// Call the function `f`.
|
||||
/// If `call` is invoked multiple times `f` will be executed only the
|
||||
|
|
|
@ -19,9 +19,14 @@ pub fn main() anyerror!void {
|
|||
// ignores the alignment of the slice.
|
||||
async_frame_buffer = &[_]u8{};
|
||||
|
||||
var leaks: usize = 0;
|
||||
for (test_fn_list) |test_fn, i| {
|
||||
std.testing.allocator_instance = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
defer std.testing.allocator_instance.deinit();
|
||||
defer {
|
||||
if (std.testing.allocator_instance.deinit()) {
|
||||
leaks += 1;
|
||||
}
|
||||
}
|
||||
std.testing.log_level = .warn;
|
||||
|
||||
var test_node = root_node.start(test_fn.name, null);
|
||||
|
@ -70,6 +75,10 @@ pub fn main() anyerror!void {
|
|||
} else {
|
||||
std.debug.print("{} passed; {} skipped.\n", .{ ok_count, skip_count });
|
||||
}
|
||||
if (leaks != 0) {
|
||||
std.debug.print("{} tests leaked memory\n", .{ok_count});
|
||||
std.process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn log(
|
||||
|
|
|
@ -13,7 +13,8 @@ pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringM
|
|||
pub const DynLib = @import("dynamic_library.zig").DynLib;
|
||||
pub const HashMap = hash_map.HashMap;
|
||||
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
|
||||
pub const Mutex = @import("mutex.zig").Mutex;
|
||||
pub const mutex = @import("mutex.zig");
|
||||
pub const Mutex = mutex.Mutex;
|
||||
pub const PackedIntArray = @import("packed_int_array.zig").PackedIntArray;
|
||||
pub const PackedIntArrayEndian = @import("packed_int_array.zig").PackedIntArrayEndian;
|
||||
pub const PackedIntSlice = @import("packed_int_array.zig").PackedIntSlice;
|
||||
|
|
|
@ -60,9 +60,10 @@ pub fn log(
|
|||
std.debug.print(prefix ++ format, args);
|
||||
}
|
||||
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub fn main() !void {
|
||||
// TODO general purpose allocator in the zig std lib
|
||||
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else std.heap.page_allocator;
|
||||
const gpa = if (std.builtin.link_libc) std.heap.c_allocator else &general_purpose_allocator.allocator;
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = &arena_instance.allocator;
|
||||
|
|
|
@ -407,8 +407,6 @@ pub const TestContext = struct {
|
|||
defer root_node.end();
|
||||
|
||||
for (self.cases.items) |case| {
|
||||
std.testing.base_allocator_instance.reset();
|
||||
|
||||
var prg_node = root_node.start(case.name, case.updates.items.len);
|
||||
prg_node.activate();
|
||||
defer prg_node.end();
|
||||
|
@ -419,7 +417,6 @@ pub const TestContext = struct {
|
|||
progress.refresh_rate_ns = 0;
|
||||
|
||||
try self.runOneCase(std.testing.allocator, &prg_node, case);
|
||||
try std.testing.allocator_instance.validate();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue