2019-03-02 13:46:04 -08:00
const std = @import ( " std.zig " ) ;
2019-12-02 12:02:17 -08:00
const root = @import ( " root " ) ;
2017-12-23 19:08:53 -08:00
const debug = std . debug ;
2017-10-31 01:47:55 -07:00
const assert = debug . assert ;
2019-02-08 15:18:47 -08:00
const testing = std . testing ;
2017-12-23 19:08:53 -08:00
const mem = std . mem ;
const os = std . os ;
2017-10-31 01:47:55 -07:00
const builtin = @import ( " builtin " ) ;
2017-12-23 19:08:53 -08:00
const c = std . c ;
2018-10-26 11:59:58 -07:00
const maxInt = std . math . maxInt ;
2017-10-31 01:47:55 -07:00
2019-06-27 09:04:14 -07:00
pub const LoggingAllocator = @import ( " heap/logging_allocator.zig " ) . LoggingAllocator ;
2020-03-10 15:44:30 -07:00
pub const loggingAllocator = @import ( " heap/logging_allocator.zig " ) . loggingAllocator ;
2020-05-09 23:05:54 -07:00
pub const ArenaAllocator = @import ( " heap/arena_allocator.zig " ) . ArenaAllocator ;
2019-06-27 09:04:14 -07:00
2017-10-31 01:47:55 -07:00
const Allocator = mem . Allocator ;
2017-12-22 21:29:39 -08:00
pub const c_allocator = & c_allocator_state ;
2018-11-13 05:08:37 -08:00
var c_allocator_state = Allocator {
2017-10-31 01:47:55 -07:00
. reallocFn = cRealloc ,
2019-03-15 14:47:47 -07:00
. shrinkFn = cShrink ,
2017-10-31 01:47:55 -07:00
} ;
2019-03-15 14:47:47 -07:00
fn cRealloc ( self : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
assert ( new_align < = @alignOf ( c_longdouble ) ) ;
const old_ptr = if ( old_mem . len = = 0 ) null else @ptrCast ( * c_void , old_mem . ptr ) ;
const buf = c . realloc ( old_ptr , new_size ) orelse return error . OutOfMemory ;
return @ptrCast ( [ * ] u8 , buf ) [ 0 . . new_size ] ;
2017-10-31 01:47:55 -07:00
}
2019-03-15 14:47:47 -07:00
fn cShrink ( self : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
2018-06-05 15:03:21 -07:00
const old_ptr = @ptrCast ( * c_void , old_mem . ptr ) ;
2019-03-15 14:47:47 -07:00
const buf = c . realloc ( old_ptr , new_size ) orelse return old_mem [ 0 . . new_size ] ;
return @ptrCast ( [ * ] u8 , buf ) [ 0 . . new_size ] ;
2017-10-31 01:47:55 -07:00
}
2018-02-11 23:14:44 -08:00
/// This allocator makes a syscall directly for every allocation and free.
2018-07-05 12:09:02 -07:00
/// Thread-safe and lock-free.
2019-11-25 14:53:26 -08:00
pub const page_allocator = if ( std . Target . current . isWasm ( ) )
& wasm_page_allocator_state
2020-02-24 22:52:27 -08:00
else if ( std . Target . current . os . tag = = . freestanding )
2019-12-02 12:02:17 -08:00
root . os . heap . page_allocator
2019-11-25 14:53:26 -08:00
else
& page_allocator_state ;
2019-11-25 14:25:06 -08:00
var page_allocator_state = Allocator {
. reallocFn = PageAllocator . realloc ,
. shrinkFn = PageAllocator . shrink ,
2019-06-22 07:33:00 -07:00
} ;
2019-11-25 14:53:26 -08:00
var wasm_page_allocator_state = Allocator {
. reallocFn = WasmPageAllocator . realloc ,
. shrinkFn = WasmPageAllocator . shrink ,
} ;
2018-02-11 23:14:44 -08:00
2020-03-30 11:23:22 -07:00
pub const direct_allocator = @compileError ( " deprecated; use std.heap.page_allocator " ) ;
2019-11-25 14:25:06 -08:00
const PageAllocator = struct {
2018-12-12 17:19:46 -08:00
fn alloc ( allocator : * Allocator , n : usize , alignment : u29 ) error { OutOfMemory } ! [ ] u8 {
2019-11-06 17:43:13 -08:00
if ( n = = 0 ) return & [ 0 ] u8 { } ;
2017-10-31 01:47:55 -07:00
2020-02-24 22:52:27 -08:00
if ( builtin . os . tag = = . windows ) {
2019-05-25 10:07:44 -07:00
const w = os . windows ;
// Although officially it's at least aligned to page boundary,
// Windows is known to reserve pages on a 64K boundary. It's
// even more likely that the requested alignment is <= 64K than
// 4K, so we're just allocating blindly and hoping for the best.
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w . VirtualAlloc (
null ,
n ,
w . MEM_COMMIT | w . MEM_RESERVE ,
w . PAGE_READWRITE ,
) catch return error . OutOfMemory ;
// If the allocation is sufficiently aligned, use it.
if ( @ptrToInt ( addr ) & ( alignment - 1 ) = = 0 ) {
return @ptrCast ( [ * ] u8 , addr ) [ 0 . . n ] ;
}
// If it wasn't, actually do an explicitely aligned allocation.
w . VirtualFree ( addr , 0 , w . MEM_RELEASE ) ;
const alloc_size = n + alignment ;
const final_addr = while ( true ) {
// Reserve a range of memory large enough to find a sufficiently
// aligned address.
const reserved_addr = w . VirtualAlloc (
2019-05-08 09:02:37 -07:00
null ,
2019-05-25 10:07:44 -07:00
alloc_size ,
w . MEM_RESERVE ,
w . PAGE_NOACCESS ,
) catch return error . OutOfMemory ;
const aligned_addr = mem . alignForward ( @ptrToInt ( reserved_addr ) , alignment ) ;
// Release the reserved pages (not actually used).
w . VirtualFree ( reserved_addr , 0 , w . MEM_RELEASE ) ;
// At this point, it is possible that another thread has
// obtained some memory space that will cause the next
// VirtualAlloc call to fail. To handle this, we will retry
// until it succeeds.
2019-05-26 21:48:56 -07:00
const ptr = w . VirtualAlloc (
2019-05-25 10:07:44 -07:00
@intToPtr ( * c_void , aligned_addr ) ,
2019-05-08 09:02:37 -07:00
n ,
w . MEM_COMMIT | w . MEM_RESERVE ,
w . PAGE_READWRITE ,
2019-05-25 10:07:44 -07:00
) catch continue ;
2019-05-26 21:48:56 -07:00
return @ptrCast ( [ * ] u8 , ptr ) [ 0 . . n ] ;
2019-05-25 10:07:44 -07:00
} ;
2019-05-08 09:02:37 -07:00
2019-05-25 10:07:44 -07:00
return @ptrCast ( [ * ] u8 , final_addr ) [ 0 . . n ] ;
}
2019-05-08 09:02:37 -07:00
2019-05-26 10:17:34 -07:00
const alloc_size = if ( alignment < = mem . page_size ) n else n + alignment ;
2019-05-26 16:56:37 -07:00
const slice = os . mmap (
2019-05-25 10:07:44 -07:00
null ,
2019-05-26 16:56:37 -07:00
mem . alignForward ( alloc_size , mem . page_size ) ,
2019-05-25 10:07:44 -07:00
os . PROT_READ | os . PROT_WRITE ,
os . MAP_PRIVATE | os . MAP_ANONYMOUS ,
- 1 ,
0 ,
) catch return error . OutOfMemory ;
2019-05-26 20:35:26 -07:00
if ( alloc_size = = n ) return slice [ 0 . . n ] ;
2019-05-25 10:07:44 -07:00
2019-05-26 16:56:37 -07:00
const aligned_addr = mem . alignForward ( @ptrToInt ( slice . ptr ) , alignment ) ;
2019-05-25 10:07:44 -07:00
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
2019-05-26 16:56:37 -07:00
const unused_start_len = aligned_addr - @ptrToInt ( slice . ptr ) ;
2019-05-25 10:07:44 -07:00
if ( unused_start_len ! = 0 ) {
2019-05-26 16:56:37 -07:00
os . munmap ( slice [ 0 . . unused_start_len ] ) ;
2019-05-25 10:07:44 -07:00
}
2019-05-26 16:56:37 -07:00
const aligned_end_addr = mem . alignForward ( aligned_addr + n , mem . page_size ) ;
const unused_end_len = @ptrToInt ( slice . ptr ) + slice . len - aligned_end_addr ;
2019-05-25 10:07:44 -07:00
if ( unused_end_len ! = 0 ) {
2019-05-26 16:56:37 -07:00
os . munmap ( @intToPtr ( [ * ] align ( mem . page_size ) u8 , aligned_end_addr ) [ 0 . . unused_end_len ] ) ;
2018-02-11 23:14:44 -08:00
}
2019-05-25 10:07:44 -07:00
return @intToPtr ( [ * ] u8 , aligned_addr ) [ 0 . . n ] ;
2018-02-11 23:14:44 -08:00
}
2019-05-26 16:56:37 -07:00
fn shrink ( allocator : * Allocator , old_mem_unaligned : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
const old_mem = @alignCast ( mem . page_size , old_mem_unaligned ) ;
2020-02-24 22:52:27 -08:00
if ( builtin . os . tag = = . windows ) {
2019-05-25 10:07:44 -07:00
const w = os . windows ;
if ( new_size = = 0 ) {
// From the docs:
// "If the dwFreeType parameter is MEM_RELEASE, this parameter
// must be 0 (zero). The function frees the entire region that
// is reserved in the initial allocation call to VirtualAlloc."
// So we can only use MEM_RELEASE when actually releasing the
// whole allocation.
w . VirtualFree ( old_mem . ptr , 0 , w . MEM_RELEASE ) ;
} else {
2019-03-15 14:47:47 -07:00
const base_addr = @ptrToInt ( old_mem . ptr ) ;
const old_addr_end = base_addr + old_mem . len ;
const new_addr_end = base_addr + new_size ;
2019-05-26 10:17:34 -07:00
const new_addr_end_rounded = mem . alignForward ( new_addr_end , mem . page_size ) ;
2019-03-15 14:47:47 -07:00
if ( old_addr_end > new_addr_end_rounded ) {
2019-05-25 10:07:44 -07:00
// For shrinking that is not releasing, we will only
// decommit the pages not needed anymore.
w . VirtualFree (
@intToPtr ( * c_void , new_addr_end_rounded ) ,
old_addr_end - new_addr_end_rounded ,
w . MEM_DECOMMIT ,
) ;
2018-02-11 23:14:44 -08:00
}
2019-05-25 10:07:44 -07:00
}
return old_mem [ 0 . . new_size ] ;
}
const base_addr = @ptrToInt ( old_mem . ptr ) ;
const old_addr_end = base_addr + old_mem . len ;
const new_addr_end = base_addr + new_size ;
2019-05-26 10:17:34 -07:00
const new_addr_end_rounded = mem . alignForward ( new_addr_end , mem . page_size ) ;
2019-05-25 10:07:44 -07:00
if ( old_addr_end > new_addr_end_rounded ) {
2019-05-26 16:56:37 -07:00
const ptr = @intToPtr ( [ * ] align ( mem . page_size ) u8 , new_addr_end_rounded ) ;
os . munmap ( ptr [ 0 . . old_addr_end - new_addr_end_rounded ] ) ;
2019-03-15 14:47:47 -07:00
}
2019-05-25 10:07:44 -07:00
return old_mem [ 0 . . new_size ] ;
2019-03-15 14:47:47 -07:00
}
2018-02-11 23:14:44 -08:00
2019-05-26 16:56:37 -07:00
fn realloc ( allocator : * Allocator , old_mem_unaligned : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
const old_mem = @alignCast ( mem . page_size , old_mem_unaligned ) ;
2020-02-24 22:52:27 -08:00
if ( builtin . os . tag = = . windows ) {
2019-05-25 10:07:44 -07:00
if ( old_mem . len = = 0 ) {
return alloc ( allocator , new_size , new_align ) ;
}
if ( new_size < = old_mem . len and new_align < = old_align ) {
return shrink ( allocator , old_mem , old_align , new_size , new_align ) ;
}
const w = os . windows ;
const base_addr = @ptrToInt ( old_mem . ptr ) ;
if ( new_align > old_align and base_addr & ( new_align - 1 ) ! = 0 ) {
// Current allocation doesn't satisfy the new alignment.
// For now we'll do a new one no matter what, but maybe
// there is something smarter to do instead.
2019-03-15 14:47:47 -07:00
const result = try alloc ( allocator , new_size , new_align ) ;
2019-05-25 10:07:44 -07:00
assert ( old_mem . len ! = 0 ) ;
@memcpy ( result . ptr , old_mem . ptr , std . math . min ( old_mem . len , result . len ) ) ;
w . VirtualFree ( old_mem . ptr , 0 , w . MEM_RELEASE ) ;
2019-05-02 06:56:49 -07:00
2019-05-25 10:07:44 -07:00
return result ;
}
const old_addr_end = base_addr + old_mem . len ;
2019-05-26 10:17:34 -07:00
const old_addr_end_rounded = mem . alignForward ( old_addr_end , mem . page_size ) ;
2019-05-25 10:07:44 -07:00
const new_addr_end = base_addr + new_size ;
2019-05-26 10:17:34 -07:00
const new_addr_end_rounded = mem . alignForward ( new_addr_end , mem . page_size ) ;
2019-05-25 10:07:44 -07:00
if ( new_addr_end_rounded = = old_addr_end_rounded ) {
// The reallocation fits in the already allocated pages.
2019-05-08 09:02:37 -07:00
return @ptrCast ( [ * ] u8 , old_mem . ptr ) [ 0 . . new_size ] ;
2019-05-25 10:07:44 -07:00
}
assert ( new_addr_end_rounded > old_addr_end_rounded ) ;
// We need to commit new pages.
const additional_size = new_addr_end - old_addr_end_rounded ;
const realloc_addr = w . kernel32 . VirtualAlloc (
@intToPtr ( * c_void , old_addr_end_rounded ) ,
additional_size ,
w . MEM_COMMIT | w . MEM_RESERVE ,
w . PAGE_READWRITE ,
) orelse {
// Committing new pages at the end of the existing allocation
// failed, we need to try a new one.
const new_alloc_mem = try alloc ( allocator , new_size , new_align ) ;
@memcpy ( new_alloc_mem . ptr , old_mem . ptr , old_mem . len ) ;
w . VirtualFree ( old_mem . ptr , 0 , w . MEM_RELEASE ) ;
return new_alloc_mem ;
} ;
assert ( @ptrToInt ( realloc_addr ) = = old_addr_end_rounded ) ;
return @ptrCast ( [ * ] u8 , old_mem . ptr ) [ 0 . . new_size ] ;
}
if ( new_size < = old_mem . len and new_align < = old_align ) {
return shrink ( allocator , old_mem , old_align , new_size , new_align ) ;
}
const result = try alloc ( allocator , new_size , new_align ) ;
if ( old_mem . len ! = 0 ) {
@memcpy ( result . ptr , old_mem . ptr , std . math . min ( old_mem . len , result . len ) ) ;
2019-05-26 16:56:37 -07:00
os . munmap ( old_mem ) ;
2017-10-31 01:47:55 -07:00
}
2019-05-25 10:07:44 -07:00
return result ;
2017-10-31 01:47:55 -07:00
}
2018-02-11 23:14:44 -08:00
} ;
2017-10-31 01:47:55 -07:00
2019-11-25 14:53:26 -08:00
// TODO Exposed LLVM intrinsics is a bug
// See: https://github.com/ziglang/zig/issues/2291
extern fn @ " llvm.wasm.memory.size.i32 " ( u32 ) u32 ;
extern fn @ " llvm.wasm.memory.grow.i32 " ( u32 , u32 ) i32 ;
const WasmPageAllocator = struct {
comptime {
2019-12-06 14:03:15 -08:00
if ( ! std . Target . current . isWasm ( ) ) {
2019-11-25 14:53:26 -08:00
@compileError ( " WasmPageAllocator is only available for wasm32 arch " ) ;
}
}
2019-12-05 15:59:43 -08:00
const PageStatus = enum ( u1 ) {
used = 0 ,
free = 1 ,
2019-12-05 19:54:57 -08:00
pub const none_free : u8 = 0 ;
2019-12-05 15:59:43 -08:00
} ;
2019-11-25 14:53:26 -08:00
2019-11-27 16:46:42 -08:00
const FreeBlock = struct {
2019-12-05 16:28:32 -08:00
data : [ ] u128 ,
2019-11-25 14:53:26 -08:00
2019-12-04 19:41:01 -08:00
const Io = std . packed_int_array . PackedIntIo ( u1 , . Little ) ;
2019-11-27 19:19:08 -08:00
fn totalPages ( self : FreeBlock ) usize {
2019-12-05 19:54:57 -08:00
return self . data . len * 128 ;
}
fn isInitialized ( self : FreeBlock ) bool {
return self . data . len > 0 ;
2019-12-04 19:21:54 -08:00
}
2019-12-05 16:28:32 -08:00
fn getBit ( self : FreeBlock , idx : usize ) PageStatus {
2019-12-04 19:41:01 -08:00
const bit_offset = 0 ;
2020-02-21 10:46:53 -08:00
return @intToEnum ( PageStatus , Io . get ( mem . sliceAsBytes ( self . data ) , idx , bit_offset ) ) ;
2019-11-27 19:19:08 -08:00
}
2019-12-05 16:28:32 -08:00
fn setBits ( self : FreeBlock , start_idx : usize , len : usize , val : PageStatus ) void {
2019-12-04 19:41:01 -08:00
const bit_offset = 0 ;
2019-12-03 15:41:05 -08:00
var i : usize = 0 ;
while ( i < len ) : ( i + = 1 ) {
2020-02-21 10:46:53 -08:00
Io . set ( mem . sliceAsBytes ( self . data ) , start_idx + i , bit_offset , @enumToInt ( val ) ) ;
2019-11-25 14:53:26 -08:00
}
2019-12-03 15:41:05 -08:00
}
2019-12-04 16:12:25 -08:00
// Use '0xFFFFFFFF' as a _missing_ sentinel
// This saves ~50 bytes compared to returning a nullable
// We can guarantee that conventional memory never gets this big,
2019-12-04 19:21:54 -08:00
// and wasm32 would not be able to address this memory (32 GB > usize).
2019-12-04 16:12:25 -08:00
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std . math . maxInt ( usize ) ;
2019-11-25 14:53:26 -08:00
2019-12-05 16:28:32 -08:00
fn useRecycled ( self : FreeBlock , num_pages : usize ) usize {
2019-12-03 21:49:56 -08:00
@setCold ( true ) ;
2019-12-05 16:28:32 -08:00
for ( self . data ) | segment , i | {
2019-12-03 15:24:50 -08:00
const spills_into_next = @bitCast ( i128 , segment ) < 0 ;
const has_enough_bits = @popCount ( u128 , segment ) > = num_pages ;
if ( ! spills_into_next and ! has_enough_bits ) continue ;
2019-11-27 20:02:54 -08:00
2019-12-03 15:24:50 -08:00
var j : usize = i * 128 ;
2019-12-03 21:49:56 -08:00
while ( j < ( i + 1 ) * 128 ) : ( j + = 1 ) {
var count : usize = 0 ;
2019-12-05 15:59:43 -08:00
while ( j + count < self . totalPages ( ) and self . getBit ( j + count ) = = . free ) {
2019-12-03 21:49:56 -08:00
count + = 1 ;
if ( count > = num_pages ) {
2019-12-05 15:59:43 -08:00
self . setBits ( j , num_pages , . used ) ;
2019-12-04 16:12:25 -08:00
return j ;
2019-12-02 10:26:14 -08:00
}
2019-11-27 20:02:54 -08:00
}
2019-12-03 21:49:56 -08:00
j + = count ;
2019-11-27 20:02:54 -08:00
}
2019-11-25 14:53:26 -08:00
}
2019-12-04 16:12:25 -08:00
return not_found ;
2019-11-25 14:53:26 -08:00
}
2019-12-05 16:28:32 -08:00
fn recycle ( self : FreeBlock , start_idx : usize , len : usize ) void {
2019-12-05 15:59:43 -08:00
self . setBits ( start_idx , len , . free ) ;
2019-11-25 14:53:26 -08:00
}
2019-11-27 16:46:42 -08:00
} ;
2019-11-25 14:53:26 -08:00
2019-12-05 19:54:57 -08:00
var _conventional_data = [ _ ] u128 { 0 } * * 16 ;
// Marking `conventional` as const saves ~40 bytes
2019-12-06 13:16:07 -08:00
const conventional = FreeBlock { . data = & _conventional_data } ;
2019-12-05 16:28:32 -08:00
var extended = FreeBlock { . data = & [ _ ] u128 { } } ;
2019-11-25 14:53:26 -08:00
2019-12-04 16:12:25 -08:00
fn extendedOffset ( ) usize {
return conventional . totalPages ( ) ;
2019-11-25 14:53:26 -08:00
}
2019-11-27 16:46:42 -08:00
fn nPages ( memsize : usize ) usize {
return std . mem . alignForward ( memsize , std . mem . page_size ) / std . mem . page_size ;
2019-11-25 14:53:26 -08:00
}
2019-12-04 16:12:25 -08:00
fn alloc ( allocator : * Allocator , page_count : usize , alignment : u29 ) error { OutOfMemory } ! usize {
var idx = conventional . useRecycled ( page_count ) ;
if ( idx ! = FreeBlock . not_found ) {
return idx ;
2019-11-25 14:53:26 -08:00
}
2019-12-04 16:12:25 -08:00
idx = extended . useRecycled ( page_count ) ;
if ( idx ! = FreeBlock . not_found ) {
return idx + extendedOffset ( ) ;
}
2019-11-25 14:53:26 -08:00
2019-12-04 16:12:25 -08:00
const prev_page_count = @ " llvm.wasm.memory.grow.i32 " ( 0 , @intCast ( u32 , page_count ) ) ;
if ( prev_page_count < = 0 ) {
return error . OutOfMemory ;
}
2019-11-25 14:53:26 -08:00
2019-12-04 16:12:25 -08:00
return @intCast ( usize , prev_page_count ) ;
2019-11-25 14:53:26 -08:00
}
2019-11-27 16:46:42 -08:00
pub fn realloc ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) Allocator . Error ! [ ] u8 {
2019-12-02 10:26:14 -08:00
if ( new_align > std . mem . page_size ) {
2019-11-25 14:53:26 -08:00
return error . OutOfMemory ;
2019-12-02 10:26:14 -08:00
}
2019-12-03 22:10:37 -08:00
if ( nPages ( new_size ) = = nPages ( old_mem . len ) ) {
return old_mem . ptr [ 0 . . new_size ] ;
2019-11-27 16:46:42 -08:00
} else if ( new_size < old_mem . len ) {
return shrink ( allocator , old_mem , old_align , new_size , new_align ) ;
2019-11-25 14:53:26 -08:00
} else {
2019-12-04 16:12:25 -08:00
const page_idx = try alloc ( allocator , nPages ( new_size ) , new_align ) ;
const new_mem = @intToPtr ( [ * ] u8 , page_idx * std . mem . page_size ) [ 0 . . new_size ] ;
2019-11-27 16:46:42 -08:00
std . mem . copy ( u8 , new_mem , old_mem ) ;
_ = shrink ( allocator , old_mem , old_align , 0 , 0 ) ;
2019-12-04 16:12:25 -08:00
return new_mem ;
2019-11-25 14:53:26 -08:00
}
}
2019-11-27 16:46:42 -08:00
pub fn shrink ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
2019-12-05 16:28:32 -08:00
@setCold ( true ) ;
2019-11-27 19:19:08 -08:00
const free_start = nPages ( @ptrToInt ( old_mem . ptr ) + new_size ) ;
var free_end = nPages ( @ptrToInt ( old_mem . ptr ) + old_mem . len ) ;
if ( free_end > free_start ) {
2019-12-04 16:12:25 -08:00
if ( free_start < extendedOffset ( ) ) {
2019-12-04 20:43:02 -08:00
const clamped_end = std . math . min ( extendedOffset ( ) , free_end ) ;
conventional . recycle ( free_start , clamped_end - free_start ) ;
}
if ( free_end > extendedOffset ( ) ) {
2019-12-05 19:54:57 -08:00
if ( ! extended . isInitialized ( ) ) {
2019-12-02 10:26:14 -08:00
// Steal the last page from the memory currently being recycled
2019-12-04 19:41:01 -08:00
// TODO: would it be better if we use the first page instead?
2019-11-27 19:19:08 -08:00
free_end - = 1 ;
2019-12-04 19:41:01 -08:00
2019-12-05 16:28:32 -08:00
extended . data = @intToPtr ( [ * ] u128 , free_end * std . mem . page_size ) [ 0 . . std . mem . page_size / @sizeOf ( u128 ) ] ;
2019-12-04 19:41:01 -08:00
// Since this is the first page being freed and we consume it, assume *nothing* is free.
2019-12-05 19:54:57 -08:00
std . mem . set ( u128 , extended . data , PageStatus . none_free ) ;
2019-11-27 19:19:08 -08:00
}
2019-12-04 20:43:02 -08:00
const clamped_start = std . math . max ( extendedOffset ( ) , free_start ) ;
extended . recycle ( clamped_start - extendedOffset ( ) , free_end - clamped_start ) ;
2019-11-27 16:46:42 -08:00
}
}
2019-11-27 19:19:08 -08:00
2019-11-25 14:53:26 -08:00
return old_mem [ 0 . . new_size ] ;
}
} ;
2020-02-24 22:52:27 -08:00
pub const HeapAllocator = switch ( builtin . os . tag ) {
2019-05-08 08:53:58 -07:00
. windows = > struct {
allocator : Allocator ,
heap_handle : ? HeapHandle ,
const HeapHandle = os . windows . HANDLE ;
pub fn init ( ) HeapAllocator {
return HeapAllocator {
. allocator = Allocator {
. reallocFn = realloc ,
. shrinkFn = shrink ,
} ,
. heap_handle = null ,
} ;
}
pub fn deinit ( self : * HeapAllocator ) void {
if ( self . heap_handle ) | heap_handle | {
2019-05-26 22:35:58 -07:00
os . windows . HeapDestroy ( heap_handle ) ;
2019-05-08 08:53:58 -07:00
}
}
fn alloc ( allocator : * Allocator , n : usize , alignment : u29 ) error { OutOfMemory } ! [ ] u8 {
const self = @fieldParentPtr ( HeapAllocator , " allocator " , allocator ) ;
2019-11-06 17:43:13 -08:00
if ( n = = 0 ) return & [ 0 ] u8 { } ;
2019-05-08 08:53:58 -07:00
const amt = n + alignment + @sizeOf ( usize ) ;
const optional_heap_handle = @atomicLoad ( ? HeapHandle , & self . heap_handle , builtin . AtomicOrder . SeqCst ) ;
const heap_handle = optional_heap_handle orelse blk : {
2019-05-08 09:14:35 -07:00
const options = if ( builtin . single_threaded ) os . windows . HEAP_NO_SERIALIZE else 0 ;
2019-05-26 22:35:58 -07:00
const hh = os . windows . kernel32 . HeapCreate ( options , amt , 0 ) orelse return error . OutOfMemory ;
2019-05-08 08:53:58 -07:00
const other_hh = @cmpxchgStrong ( ? HeapHandle , & self . heap_handle , null , hh , builtin . AtomicOrder . SeqCst , builtin . AtomicOrder . SeqCst ) orelse break : blk hh ;
2019-05-26 22:35:58 -07:00
os . windows . HeapDestroy ( hh ) ;
2019-05-08 08:53:58 -07:00
break : blk other_hh . ? ; // can't be null because of the cmpxchg
} ;
2019-05-26 22:35:58 -07:00
const ptr = os . windows . kernel32 . HeapAlloc ( heap_handle , 0 , amt ) orelse return error . OutOfMemory ;
2019-05-08 08:53:58 -07:00
const root_addr = @ptrToInt ( ptr ) ;
const adjusted_addr = mem . alignForward ( root_addr , alignment ) ;
const record_addr = adjusted_addr + n ;
@intToPtr ( * align ( 1 ) usize , record_addr ) . * = root_addr ;
return @intToPtr ( [ * ] u8 , adjusted_addr ) [ 0 . . n ] ;
}
fn shrink ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
return realloc ( allocator , old_mem , old_align , new_size , new_align ) catch {
const old_adjusted_addr = @ptrToInt ( old_mem . ptr ) ;
const old_record_addr = old_adjusted_addr + old_mem . len ;
const root_addr = @intToPtr ( * align ( 1 ) usize , old_record_addr ) . * ;
const old_ptr = @intToPtr ( * c_void , root_addr ) ;
const new_record_addr = old_record_addr - new_size + old_mem . len ;
@intToPtr ( * align ( 1 ) usize , new_record_addr ) . * = root_addr ;
return old_mem [ 0 . . new_size ] ;
} ;
}
fn realloc ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
if ( old_mem . len = = 0 ) return alloc ( allocator , new_size , new_align ) ;
const self = @fieldParentPtr ( HeapAllocator , " allocator " , allocator ) ;
const old_adjusted_addr = @ptrToInt ( old_mem . ptr ) ;
const old_record_addr = old_adjusted_addr + old_mem . len ;
const root_addr = @intToPtr ( * align ( 1 ) usize , old_record_addr ) . * ;
const old_ptr = @intToPtr ( * c_void , root_addr ) ;
if ( new_size = = 0 ) {
2019-05-26 22:35:58 -07:00
os . windows . HeapFree ( self . heap_handle . ? , 0 , old_ptr ) ;
2019-05-08 08:53:58 -07:00
return old_mem [ 0 . . 0 ] ;
}
const amt = new_size + new_align + @sizeOf ( usize ) ;
2019-05-26 22:35:58 -07:00
const new_ptr = os . windows . kernel32 . HeapReAlloc (
2019-05-08 08:53:58 -07:00
self . heap_handle . ? ,
0 ,
old_ptr ,
amt ,
) orelse return error . OutOfMemory ;
const offset = old_adjusted_addr - root_addr ;
const new_root_addr = @ptrToInt ( new_ptr ) ;
var new_adjusted_addr = new_root_addr + offset ;
const offset_is_valid = new_adjusted_addr + new_size + @sizeOf ( usize ) < = new_root_addr + amt ;
const offset_is_aligned = new_adjusted_addr % new_align = = 0 ;
if ( ! offset_is_valid or ! offset_is_aligned ) {
// If HeapReAlloc didn't happen to move the memory to the new alignment,
// or the memory starting at the old offset would be outside of the new allocation,
// then we need to copy the memory to a valid aligned address and use that
const new_aligned_addr = mem . alignForward ( new_root_addr , new_align ) ;
@memcpy ( @intToPtr ( [ * ] u8 , new_aligned_addr ) , @intToPtr ( [ * ] u8 , new_adjusted_addr ) , std . math . min ( old_mem . len , new_size ) ) ;
new_adjusted_addr = new_aligned_addr ;
}
const new_record_addr = new_adjusted_addr + new_size ;
@intToPtr ( * align ( 1 ) usize , new_record_addr ) . * = new_root_addr ;
return @intToPtr ( [ * ] u8 , new_adjusted_addr ) [ 0 . . new_size ] ;
}
} ,
else = > @compileError ( " Unsupported OS " ) ,
} ;
2018-11-13 05:08:37 -08:00
pub const FixedBufferAllocator = struct {
2018-02-11 23:27:02 -08:00
allocator : Allocator ,
end_index : usize ,
buffer : [ ] u8 ,
2018-02-11 23:14:44 -08:00
2018-02-11 23:27:02 -08:00
pub fn init ( buffer : [ ] u8 ) FixedBufferAllocator {
2018-11-13 05:08:37 -08:00
return FixedBufferAllocator {
. allocator = Allocator {
2018-02-11 23:27:02 -08:00
. reallocFn = realloc ,
2019-03-15 14:47:47 -07:00
. shrinkFn = shrink ,
2018-02-11 23:27:02 -08:00
} ,
. buffer = buffer ,
. end_index = 0 ,
} ;
2017-11-05 09:27:56 -08:00
}
2018-05-31 07:56:59 -07:00
fn alloc ( allocator : * Allocator , n : usize , alignment : u29 ) ! [ ] u8 {
2018-02-11 23:27:02 -08:00
const self = @fieldParentPtr ( FixedBufferAllocator , " allocator " , allocator ) ;
2018-04-28 14:53:06 -07:00
const addr = @ptrToInt ( self . buffer . ptr ) + self . end_index ;
2019-03-11 10:34:51 -07:00
const adjusted_addr = mem . alignForward ( addr , alignment ) ;
const adjusted_index = self . end_index + ( adjusted_addr - addr ) ;
2018-02-11 23:27:02 -08:00
const new_end_index = adjusted_index + n ;
if ( new_end_index > self . buffer . len ) {
return error . OutOfMemory ;
}
2018-04-30 22:53:04 -07:00
const result = self . buffer [ adjusted_index . . new_end_index ] ;
2018-02-11 23:27:02 -08:00
self . end_index = new_end_index ;
2017-10-31 01:47:55 -07:00
2018-02-11 23:27:02 -08:00
return result ;
}
2017-10-31 01:47:55 -07:00
2019-03-15 14:47:47 -07:00
fn realloc ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
2018-07-14 13:31:11 -07:00
const self = @fieldParentPtr ( FixedBufferAllocator , " allocator " , allocator ) ;
assert ( old_mem . len < = self . end_index ) ;
2019-03-15 14:47:47 -07:00
if ( old_mem . ptr = = self . buffer . ptr + self . end_index - old_mem . len and
mem . alignForward ( @ptrToInt ( old_mem . ptr ) , new_align ) = = @ptrToInt ( old_mem . ptr ) )
{
2018-07-14 13:31:11 -07:00
const start_index = self . end_index - old_mem . len ;
const new_end_index = start_index + new_size ;
if ( new_end_index > self . buffer . len ) return error . OutOfMemory ;
const result = self . buffer [ start_index . . new_end_index ] ;
self . end_index = new_end_index ;
return result ;
2019-03-15 14:47:47 -07:00
} else if ( new_size < = old_mem . len and new_align < = old_align ) {
// We can't do anything with the memory, so tell the client to keep it.
return error . OutOfMemory ;
2018-02-11 23:27:02 -08:00
} else {
2019-03-15 14:47:47 -07:00
const result = try alloc ( allocator , new_size , new_align ) ;
2019-04-25 13:41:19 -07:00
@memcpy ( result . ptr , old_mem . ptr , std . math . min ( old_mem . len , result . len ) ) ;
2018-02-11 23:27:02 -08:00
return result ;
}
2017-10-31 01:47:55 -07:00
}
2019-03-15 14:47:47 -07:00
fn shrink ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
return old_mem [ 0 . . new_size ] ;
}
2019-09-15 18:35:01 -07:00
2019-09-21 08:42:26 -07:00
pub fn reset ( self : * FixedBufferAllocator ) void {
2019-09-15 18:35:01 -07:00
self . end_index = 0 ;
}
2018-02-11 23:27:02 -08:00
} ;
2019-02-07 12:28:37 -08:00
pub const ThreadSafeFixedBufferAllocator = blk : {
if ( builtin . single_threaded ) {
break : blk FixedBufferAllocator ;
} else {
2019-03-11 10:34:51 -07:00
// lock free
2019-02-07 12:28:37 -08:00
break : blk struct {
allocator : Allocator ,
end_index : usize ,
buffer : [ ] u8 ,
pub fn init ( buffer : [ ] u8 ) ThreadSafeFixedBufferAllocator {
return ThreadSafeFixedBufferAllocator {
. allocator = Allocator {
. reallocFn = realloc ,
2019-03-15 14:47:47 -07:00
. shrinkFn = shrink ,
2019-02-07 12:28:37 -08:00
} ,
. buffer = buffer ,
. end_index = 0 ,
} ;
}
2018-04-28 14:53:06 -07:00
2019-02-07 12:28:37 -08:00
fn alloc ( allocator : * Allocator , n : usize , alignment : u29 ) ! [ ] u8 {
const self = @fieldParentPtr ( ThreadSafeFixedBufferAllocator , " allocator " , allocator ) ;
var end_index = @atomicLoad ( usize , & self . end_index , builtin . AtomicOrder . SeqCst ) ;
while ( true ) {
const addr = @ptrToInt ( self . buffer . ptr ) + end_index ;
2019-03-11 10:34:51 -07:00
const adjusted_addr = mem . alignForward ( addr , alignment ) ;
const adjusted_index = end_index + ( adjusted_addr - addr ) ;
2019-02-07 12:28:37 -08:00
const new_end_index = adjusted_index + n ;
if ( new_end_index > self . buffer . len ) {
return error . OutOfMemory ;
}
end_index = @cmpxchgWeak ( usize , & self . end_index , end_index , new_end_index , builtin . AtomicOrder . SeqCst , builtin . AtomicOrder . SeqCst ) orelse return self . buffer [ adjusted_index . . new_end_index ] ;
}
}
2018-04-28 14:53:06 -07:00
2019-03-15 14:47:47 -07:00
fn realloc ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
if ( new_size < = old_mem . len and new_align < = old_align ) {
// We can't do anything useful with the memory, tell the client to keep it.
return error . OutOfMemory ;
2019-02-07 12:28:37 -08:00
} else {
2019-03-15 14:47:47 -07:00
const result = try alloc ( allocator , new_size , new_align ) ;
2019-04-25 13:41:19 -07:00
@memcpy ( result . ptr , old_mem . ptr , std . math . min ( old_mem . len , result . len ) ) ;
2019-02-07 12:28:37 -08:00
return result ;
}
2018-04-28 14:53:06 -07:00
}
2019-03-15 14:47:47 -07:00
fn shrink ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
return old_mem [ 0 . . new_size ] ;
}
2020-01-29 10:21:29 -08:00
pub fn reset ( self : * ThreadSafeFixedBufferAllocator ) void {
self . end_index = 0 ;
}
2019-02-07 12:28:37 -08:00
} ;
2018-04-28 14:53:06 -07:00
}
} ;
2018-07-06 22:23:18 -07:00
pub fn stackFallback ( comptime size : usize , fallback_allocator : * Allocator ) StackFallbackAllocator ( size ) {
2018-11-13 05:08:37 -08:00
return StackFallbackAllocator ( size ) {
2018-07-06 22:23:18 -07:00
. buffer = undefined ,
. fallback_allocator = fallback_allocator ,
. fixed_buffer_allocator = undefined ,
2018-11-13 05:08:37 -08:00
. allocator = Allocator {
2018-07-06 22:23:18 -07:00
. reallocFn = StackFallbackAllocator ( size ) . realloc ,
2019-03-15 14:47:47 -07:00
. shrinkFn = StackFallbackAllocator ( size ) . shrink ,
2018-07-06 22:23:18 -07:00
} ,
} ;
}
pub fn StackFallbackAllocator ( comptime size : usize ) type {
2018-11-13 05:08:37 -08:00
return struct {
2018-09-13 13:34:33 -07:00
const Self = @This ( ) ;
2018-07-06 22:23:18 -07:00
buffer : [ size ] u8 ,
allocator : Allocator ,
fallback_allocator : * Allocator ,
fixed_buffer_allocator : FixedBufferAllocator ,
pub fn get ( self : * Self ) * Allocator {
self . fixed_buffer_allocator = FixedBufferAllocator . init ( self . buffer [ 0 . . ] ) ;
return & self . allocator ;
}
2019-03-15 14:47:47 -07:00
fn realloc ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) ! [ ] u8 {
2018-07-06 22:23:18 -07:00
const self = @fieldParentPtr ( Self , " allocator " , allocator ) ;
const in_buffer = @ptrToInt ( old_mem . ptr ) > = @ptrToInt ( & self . buffer ) and
@ptrToInt ( old_mem . ptr ) < @ptrToInt ( & self . buffer ) + self . buffer . len ;
if ( in_buffer ) {
return FixedBufferAllocator . realloc (
& self . fixed_buffer_allocator . allocator ,
old_mem ,
2019-03-15 14:47:47 -07:00
old_align ,
2018-07-06 22:23:18 -07:00
new_size ,
2019-03-15 14:47:47 -07:00
new_align ,
2018-07-06 22:23:18 -07:00
) catch {
2019-03-15 14:47:47 -07:00
const result = try self . fallback_allocator . reallocFn (
2018-07-06 22:23:18 -07:00
self . fallback_allocator ,
2019-11-06 17:43:13 -08:00
& [ 0 ] u8 { } ,
2019-03-15 14:47:47 -07:00
undefined ,
2018-07-06 22:23:18 -07:00
new_size ,
2019-03-15 14:47:47 -07:00
new_align ,
2018-07-06 22:23:18 -07:00
) ;
mem . copy ( u8 , result , old_mem ) ;
return result ;
} ;
}
2019-03-15 14:47:47 -07:00
return self . fallback_allocator . reallocFn (
self . fallback_allocator ,
old_mem ,
old_align ,
new_size ,
new_align ,
) ;
2018-07-06 22:23:18 -07:00
}
2019-03-15 14:47:47 -07:00
fn shrink ( allocator : * Allocator , old_mem : [ ] u8 , old_align : u29 , new_size : usize , new_align : u29 ) [ ] u8 {
2018-07-06 22:23:18 -07:00
const self = @fieldParentPtr ( Self , " allocator " , allocator ) ;
2019-03-15 14:47:47 -07:00
const in_buffer = @ptrToInt ( old_mem . ptr ) > = @ptrToInt ( & self . buffer ) and
@ptrToInt ( old_mem . ptr ) < @ptrToInt ( & self . buffer ) + self . buffer . len ;
if ( in_buffer ) {
return FixedBufferAllocator . shrink (
& self . fixed_buffer_allocator . allocator ,
old_mem ,
old_align ,
new_size ,
new_align ,
) ;
2018-07-06 22:23:18 -07:00
}
2019-03-15 14:47:47 -07:00
return self . fallback_allocator . shrinkFn (
self . fallback_allocator ,
old_mem ,
old_align ,
new_size ,
new_align ,
) ;
2018-07-06 22:23:18 -07:00
}
} ;
}
2018-02-11 23:27:02 -08:00
test " c_allocator " {
if ( builtin . link_libc ) {
2019-03-15 14:47:47 -07:00
var slice = try c_allocator . alloc ( u8 , 50 ) ;
2018-02-11 23:27:02 -08:00
defer c_allocator . free ( slice ) ;
2019-03-15 14:47:47 -07:00
slice = try c_allocator . realloc ( slice , 100 ) ;
2018-02-11 23:27:02 -08:00
}
2017-10-31 01:47:55 -07:00
}
2019-12-05 19:54:57 -08:00
test " WasmPageAllocator internals " {
2019-12-06 14:03:15 -08:00
if ( comptime std . Target . current . isWasm ( ) ) {
2019-12-08 19:22:07 -08:00
const conventional_memsize = WasmPageAllocator . conventional . totalPages ( ) * std . mem . page_size ;
const initial = try page_allocator . alloc ( u8 , std . mem . page_size ) ;
std . debug . assert ( @ptrToInt ( initial . ptr ) < conventional_memsize ) ; // If this isn't conventional, the rest of these tests don't make sense. Also we have a serious memory leak in the test suite.
var inplace = try page_allocator . realloc ( initial , 1 ) ;
testing . expectEqual ( initial . ptr , inplace . ptr ) ;
inplace = try page_allocator . realloc ( inplace , 4 ) ;
testing . expectEqual ( initial . ptr , inplace . ptr ) ;
page_allocator . free ( inplace ) ;
const reuse = try page_allocator . alloc ( u8 , 1 ) ;
testing . expectEqual ( initial . ptr , reuse . ptr ) ;
page_allocator . free ( reuse ) ;
// This segment may span conventional and extended which has really complex rules so we're just ignoring it for now.
const padding = try page_allocator . alloc ( u8 , conventional_memsize ) ;
page_allocator . free ( padding ) ;
const extended = try page_allocator . alloc ( u8 , conventional_memsize ) ;
testing . expect ( @ptrToInt ( extended . ptr ) > = conventional_memsize ) ;
const use_small = try page_allocator . alloc ( u8 , 1 ) ;
testing . expectEqual ( initial . ptr , use_small . ptr ) ;
page_allocator . free ( use_small ) ;
inplace = try page_allocator . realloc ( extended , 1 ) ;
testing . expectEqual ( extended . ptr , inplace . ptr ) ;
page_allocator . free ( inplace ) ;
const reuse_extended = try page_allocator . alloc ( u8 , conventional_memsize ) ;
testing . expectEqual ( extended . ptr , reuse_extended . ptr ) ;
page_allocator . free ( reuse_extended ) ;
2019-12-05 19:54:57 -08:00
}
}
2019-11-25 14:25:06 -08:00
test " PageAllocator " {
const allocator = page_allocator ;
2018-02-11 23:14:44 -08:00
try testAllocator ( allocator ) ;
2018-07-14 09:03:06 -07:00
try testAllocatorAligned ( allocator , 16 ) ;
2019-12-05 17:31:49 -08:00
if ( ! std . Target . current . isWasm ( ) ) {
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
}
2019-05-08 09:02:37 -07:00
2020-02-24 22:52:27 -08:00
if ( builtin . os . tag = = . windows ) {
2019-05-08 09:02:37 -07:00
// Trying really large alignment. As mentionned in the implementation,
// VirtualAlloc returns 64K aligned addresses. We want to make sure
2019-11-25 14:25:06 -08:00
// PageAllocator works beyond that, as it's not tested by
2019-05-08 09:02:37 -07:00
// `testAllocatorLargeAlignment`.
const slice = try allocator . alignedAlloc ( u8 , 1 < < 20 , 128 ) ;
slice [ 0 ] = 0x12 ;
slice [ 127 ] = 0x34 ;
allocator . free ( slice ) ;
}
2018-02-11 23:14:44 -08:00
}
2019-05-08 08:53:58 -07:00
test " HeapAllocator " {
2020-02-24 22:52:27 -08:00
if ( builtin . os . tag = = . windows ) {
2019-05-08 08:53:58 -07:00
var heap_allocator = HeapAllocator . init ( ) ;
defer heap_allocator . deinit ( ) ;
const allocator = & heap_allocator . allocator ;
try testAllocator ( allocator ) ;
try testAllocatorAligned ( allocator , 16 ) ;
try testAllocatorLargeAlignment ( allocator ) ;
try testAllocatorAlignedShrink ( allocator ) ;
}
}
2019-06-21 22:13:10 -07:00
test " ArenaAllocator " {
2019-11-25 14:25:06 -08:00
var arena_allocator = ArenaAllocator . init ( page_allocator ) ;
2019-06-21 22:13:10 -07:00
defer arena_allocator . deinit ( ) ;
try testAllocator ( & arena_allocator . allocator ) ;
try testAllocatorAligned ( & arena_allocator . allocator , 16 ) ;
try testAllocatorLargeAlignment ( & arena_allocator . allocator ) ;
try testAllocatorAlignedShrink ( & arena_allocator . allocator ) ;
}
2018-02-11 23:14:44 -08:00
2019-12-05 17:31:49 -08:00
var test_fixed_buffer_allocator_memory : [ 800000 * @sizeOf ( u64 ) ] u8 = undefined ;
2018-02-11 23:27:02 -08:00
test " FixedBufferAllocator " {
var fixed_buffer_allocator = FixedBufferAllocator . init ( test_fixed_buffer_allocator_memory [ 0 . . ] ) ;
try testAllocator ( & fixed_buffer_allocator . allocator ) ;
2018-07-14 09:03:06 -07:00
try testAllocatorAligned ( & fixed_buffer_allocator . allocator , 16 ) ;
2018-04-21 18:41:49 -07:00
try testAllocatorLargeAlignment ( & fixed_buffer_allocator . allocator ) ;
2019-04-19 17:54:53 -07:00
try testAllocatorAlignedShrink ( & fixed_buffer_allocator . allocator ) ;
2018-02-11 23:27:02 -08:00
}
2019-09-15 18:35:01 -07:00
test " FixedBufferAllocator.reset " {
2019-09-25 12:57:13 -07:00
var buf : [ 8 ] u8 align ( @alignOf ( u64 ) ) = undefined ;
2019-09-15 18:35:01 -07:00
var fba = FixedBufferAllocator . init ( buf [ 0 . . ] ) ;
const X = 0xeeeeeeeeeeeeeeee ;
const Y = 0xffffffffffffffff ;
var x = try fba . allocator . create ( u64 ) ;
x . * = X ;
testing . expectError ( error . OutOfMemory , fba . allocator . create ( u64 ) ) ;
fba . reset ( ) ;
var y = try fba . allocator . create ( u64 ) ;
y . * = Y ;
// we expect Y to have overwritten X.
testing . expect ( x . * = = y . * ) ;
testing . expect ( y . * = = Y ) ;
}
2018-07-14 13:31:11 -07:00
test " FixedBufferAllocator Reuse memory on realloc " {
var small_fixed_buffer : [ 10 ] u8 = undefined ;
// check if we re-use the memory
{
var fixed_buffer_allocator = FixedBufferAllocator . init ( small_fixed_buffer [ 0 . . ] ) ;
var slice0 = try fixed_buffer_allocator . allocator . alloc ( u8 , 5 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice0 . len = = 5 ) ;
2019-03-15 14:47:47 -07:00
var slice1 = try fixed_buffer_allocator . allocator . realloc ( slice0 , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice1 . ptr = = slice0 . ptr ) ;
testing . expect ( slice1 . len = = 10 ) ;
2019-03-15 14:47:47 -07:00
testing . expectError ( error . OutOfMemory , fixed_buffer_allocator . allocator . realloc ( slice1 , 11 ) ) ;
2018-07-14 13:31:11 -07:00
}
// check that we don't re-use the memory if it's not the most recent block
{
var fixed_buffer_allocator = FixedBufferAllocator . init ( small_fixed_buffer [ 0 . . ] ) ;
var slice0 = try fixed_buffer_allocator . allocator . alloc ( u8 , 2 ) ;
slice0 [ 0 ] = 1 ;
slice0 [ 1 ] = 2 ;
var slice1 = try fixed_buffer_allocator . allocator . alloc ( u8 , 2 ) ;
2019-03-15 14:47:47 -07:00
var slice2 = try fixed_buffer_allocator . allocator . realloc ( slice0 , 4 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice0 . ptr ! = slice2 . ptr ) ;
testing . expect ( slice1 . ptr ! = slice2 . ptr ) ;
testing . expect ( slice2 [ 0 ] = = 1 ) ;
testing . expect ( slice2 [ 1 ] = = 2 ) ;
2018-07-14 13:31:11 -07:00
}
}
2018-04-28 14:53:06 -07:00
test " ThreadSafeFixedBufferAllocator " {
var fixed_buffer_allocator = ThreadSafeFixedBufferAllocator . init ( test_fixed_buffer_allocator_memory [ 0 . . ] ) ;
try testAllocator ( & fixed_buffer_allocator . allocator ) ;
2018-07-14 09:03:06 -07:00
try testAllocatorAligned ( & fixed_buffer_allocator . allocator , 16 ) ;
2018-04-28 14:53:06 -07:00
try testAllocatorLargeAlignment ( & fixed_buffer_allocator . allocator ) ;
2019-04-19 17:54:53 -07:00
try testAllocatorAlignedShrink ( & fixed_buffer_allocator . allocator ) ;
2018-04-28 14:53:06 -07:00
}
2018-05-31 07:56:59 -07:00
fn testAllocator ( allocator : * mem . Allocator ) ! void {
var slice = try allocator . alloc ( * i32 , 100 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 100 ) ;
2018-02-11 23:14:44 -08:00
for ( slice ) | * item , i | {
2019-02-03 13:13:28 -08:00
item . * = try allocator . create ( i32 ) ;
item . * . * = @intCast ( i32 , i ) ;
2018-02-11 23:14:44 -08:00
}
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 20000 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 20000 ) ;
2018-07-14 13:31:11 -07:00
for ( slice [ 0 . . 100 ] ) | item , i | {
2019-02-08 15:18:47 -08:00
testing . expect ( item . * = = @intCast ( i32 , i ) ) ;
2018-02-11 23:14:44 -08:00
allocator . destroy ( item ) ;
}
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 50 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 50 ) ;
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 25 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 25 ) ;
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 0 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 0 ) ;
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 10 ) ;
2018-02-11 23:14:44 -08:00
allocator . free ( slice ) ;
}
2018-04-21 18:41:49 -07:00
2018-07-14 09:03:06 -07:00
fn testAllocatorAligned ( allocator : * mem . Allocator , comptime alignment : u29 ) ! void {
// initial
var slice = try allocator . alignedAlloc ( u8 , alignment , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 10 ) ;
2018-07-14 09:03:06 -07:00
// grow
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 100 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 100 ) ;
2018-07-14 09:03:06 -07:00
// shrink
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 10 ) ;
2018-07-14 09:03:06 -07:00
// go to zero
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 0 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 0 ) ;
2018-07-14 09:03:06 -07:00
// realloc from zero
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 100 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 100 ) ;
2018-07-14 09:03:06 -07:00
// shrink with shrink
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 10 ) ;
2018-07-14 09:03:06 -07:00
// shrink to zero
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 0 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( slice . len = = 0 ) ;
2018-07-14 09:03:06 -07:00
}
2018-05-31 07:56:59 -07:00
fn testAllocatorLargeAlignment ( allocator : * mem . Allocator ) mem . Allocator . Error ! void {
2018-05-28 17:23:55 -07:00
//Maybe a platform's page_size is actually the same as or
2018-04-21 18:41:49 -07:00
// very near usize?
2019-05-26 10:17:34 -07:00
if ( mem . page_size < < 2 > maxInt ( usize ) ) return ;
2018-04-30 22:53:04 -07:00
2020-04-28 18:10:09 -07:00
const USizeShift = std . meta . Int ( false , std . math . log2 ( usize . bit_count ) ) ;
2019-11-07 15:52:09 -08:00
const large_align = @as ( u29 , mem . page_size < < 2 ) ;
2018-04-30 22:53:04 -07:00
2018-04-21 18:41:49 -07:00
var align_mask : usize = undefined ;
2019-11-07 15:52:09 -08:00
_ = @shlWithOverflow ( usize , ~ @as ( usize , 0 ) , @as ( USizeShift , @ctz ( u29 , large_align ) ) , & align_mask ) ;
2018-04-30 22:53:04 -07:00
2019-03-15 14:47:47 -07:00
var slice = try allocator . alignedAlloc ( u8 , large_align , 500 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-30 22:53:04 -07:00
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 100 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-30 22:53:04 -07:00
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 5000 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-30 22:53:04 -07:00
2019-03-15 14:47:47 -07:00
slice = allocator . shrink ( slice , 10 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-30 22:53:04 -07:00
2019-03-15 14:47:47 -07:00
slice = try allocator . realloc ( slice , 20000 ) ;
2019-02-08 15:18:47 -08:00
testing . expect ( @ptrToInt ( slice . ptr ) & align_mask = = @ptrToInt ( slice . ptr ) ) ;
2018-04-21 18:41:49 -07:00
allocator . free ( slice ) ;
}
2019-04-19 17:54:53 -07:00
fn testAllocatorAlignedShrink ( allocator : * mem . Allocator ) mem . Allocator . Error ! void {
var debug_buffer : [ 1000 ] u8 = undefined ;
const debug_allocator = & FixedBufferAllocator . init ( & debug_buffer ) . allocator ;
2019-05-26 10:17:34 -07:00
const alloc_size = mem . page_size * 2 + 50 ;
2019-04-19 17:54:53 -07:00
var slice = try allocator . alignedAlloc ( u8 , 16 , alloc_size ) ;
defer allocator . free ( slice ) ;
var stuff_to_free = std . ArrayList ( [ ] align ( 16 ) u8 ) . init ( debug_allocator ) ;
2019-05-08 09:02:37 -07:00
// On Windows, VirtualAlloc returns addresses aligned to a 64K boundary,
// which is 16 pages, hence the 32. This test may require to increase
// the size of the allocations feeding the `allocator` parameter if they
// fail, because of this high over-alignment we want to have.
2019-05-26 10:17:34 -07:00
while ( @ptrToInt ( slice . ptr ) = = mem . alignForward ( @ptrToInt ( slice . ptr ) , mem . page_size * 32 ) ) {
2019-04-19 17:54:53 -07:00
try stuff_to_free . append ( slice ) ;
slice = try allocator . alignedAlloc ( u8 , 16 , alloc_size ) ;
}
while ( stuff_to_free . popOrNull ( ) ) | item | {
allocator . free ( item ) ;
}
slice [ 0 ] = 0x12 ;
slice [ 60 ] = 0x34 ;
// realloc to a smaller size but with a larger alignment
2019-05-26 10:17:34 -07:00
slice = try allocator . alignedRealloc ( slice , mem . page_size * 32 , alloc_size / 2 ) ;
2019-04-19 17:54:53 -07:00
testing . expect ( slice [ 0 ] = = 0x12 ) ;
testing . expect ( slice [ 60 ] = = 0x34 ) ;
}
2020-01-08 21:37:29 -08:00
test " heap " {
_ = @import ( " heap/logging_allocator.zig " ) ;
}