Merge remote-tracking branch 'origin/master' into llvm6

master
Andrew Kelley 2018-02-12 10:48:02 -05:00
commit 491d818f17
14 changed files with 397 additions and 211 deletions

View File

@ -13,10 +13,13 @@ const obj_ext = std.build.Target(std.build.Target.Native).oFileExt();
const tmp_dir_name = "docgen_tmp";
pub fn main() !void {
// TODO use a more general purpose allocator here
var inc_allocator = try std.heap.IncrementingAllocator.init(max_doc_file_size);
defer inc_allocator.deinit();
const allocator = &inc_allocator.allocator;
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
defer arena.deinit();
const allocator = &arena.allocator;
var args_it = os.args();

View File

@ -575,7 +575,7 @@ fn fmtMain(allocator: &mem.Allocator, file_paths: []const []const u8) !void {
var parser = std.zig.Parser.init(&tokenizer, allocator, file_path);
defer parser.deinit();
const tree = try parser.parse();
var tree = try parser.parse();
defer tree.deinit();
const baf = try io.BufferedAtomicFile.create(allocator, file_path);

View File

@ -241,7 +241,7 @@ pub const Module = struct {
var parser = Parser.init(&tokenizer, self.allocator, root_src_real_path);
defer parser.deinit();
const tree = try parser.parse();
var tree = try parser.parse();
defer tree.deinit();
var stderr_file = try std.io.getStdErr();

View File

@ -45,6 +45,7 @@ pub extern "c" fn nanosleep(rqtp: &const timespec, rmtp: ?&timespec) c_int;
pub extern "c" fn setreuid(ruid: c_uint, euid: c_uint) c_int;
pub extern "c" fn setregid(rgid: c_uint, egid: c_uint) c_int;
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?&c_void;
pub extern "c" fn malloc(usize) ?&c_void;
pub extern "c" fn realloc(&c_void, usize) ?&c_void;
pub extern "c" fn free(&c_void) void;

View File

@ -1078,5 +1078,5 @@ fn readILeb128(in_stream: var) !i64 {
}
pub const global_allocator = &global_fixed_allocator.allocator;
var global_fixed_allocator = mem.FixedBufferAllocator.init(global_allocator_mem[0..]);
var global_fixed_allocator = std.heap.FixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;

View File

@ -17,7 +17,8 @@ var c_allocator_state = Allocator {
};
fn cAlloc(self: &Allocator, n: usize, alignment: u29) ![]u8 {
return if (c.malloc(usize(n))) |buf|
assert(alignment <= @alignOf(c_longdouble));
return if (c.malloc(n)) |buf|
@ptrCast(&u8, buf)[0..n]
else
error.OutOfMemory;
@ -39,82 +40,256 @@ fn cFree(self: &Allocator, old_mem: []u8) void {
c.free(old_ptr);
}
pub const IncrementingAllocator = struct {
/// This allocator makes a syscall directly for every allocation and free.
pub const DirectAllocator = struct {
allocator: Allocator,
bytes: []u8,
end_index: usize,
heap_handle: if (builtin.os == Os.windows) os.windows.HANDLE else void,
heap_handle: ?HeapHandle,
fn init(capacity: usize) !IncrementingAllocator {
const HeapHandle = if (builtin.os == Os.windows) os.windows.HANDLE else void;
//pub const canary_bytes = []u8 {48, 239, 128, 46, 18, 49, 147, 9, 195, 59, 203, 3, 245, 54, 9, 122};
//pub const want_safety = switch (builtin.mode) {
// builtin.Mode.Debug => true,
// builtin.Mode.ReleaseSafe => true,
// else => false,
//};
pub fn init() DirectAllocator {
return DirectAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.heap_handle = if (builtin.os == Os.windows) null else {},
};
}
pub fn deinit(self: &DirectAllocator) void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
const p = os.posix;
const addr = p.mmap(null, capacity, p.PROT_READ|p.PROT_WRITE,
p.MAP_PRIVATE|p.MAP_ANONYMOUS|p.MAP_NORESERVE, -1, 0);
if (addr == p.MAP_FAILED) {
return error.OutOfMemory;
}
return IncrementingAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.bytes = @intToPtr(&u8, addr)[0..capacity],
.end_index = 0,
.heap_handle = {},
};
Os.windows => if (self.heap_handle) |heap_handle| {
_ = os.windows.HeapDestroy(heap_handle);
},
Os.windows => {
const heap_handle = os.windows.GetProcessHeap() ?? return error.OutOfMemory;
const ptr = os.windows.HeapAlloc(heap_handle, 0, capacity) ?? return error.OutOfMemory;
return IncrementingAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.bytes = @ptrCast(&u8, ptr)[0..capacity],
.end_index = 0,
.heap_handle = heap_handle,
};
},
else => @compileError("Unsupported OS"),
else => {},
}
}
fn deinit(self: &IncrementingAllocator) void {
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
_ = os.posix.munmap(self.bytes.ptr, self.bytes.len);
},
Os.windows => {
_ = os.windows.HeapFree(self.heap_handle, 0, @ptrCast(os.windows.LPVOID, self.bytes.ptr));
},
else => @compileError("Unsupported OS"),
}
}
fn reset(self: &IncrementingAllocator) void {
self.end_index = 0;
}
fn bytesLeft(self: &const IncrementingAllocator) usize {
return self.bytes.len - self.end_index;
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(IncrementingAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.bytes[self.end_index]);
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
assert(alignment <= os.page_size);
const p = os.posix;
const addr = p.mmap(null, n, p.PROT_READ|p.PROT_WRITE,
p.MAP_PRIVATE|p.MAP_ANONYMOUS, -1, 0);
if (addr == p.MAP_FAILED) {
return error.OutOfMemory;
}
return @intToPtr(&u8, addr)[0..n];
},
Os.windows => {
const amt = n + alignment + @sizeOf(usize);
const heap_handle = self.heap_handle ?? blk: {
const hh = os.windows.HeapCreate(os.windows.HEAP_NO_SERIALIZE, amt, 0) ?? return error.OutOfMemory;
self.heap_handle = hh;
break :blk hh;
};
const ptr = os.windows.HeapAlloc(heap_handle, 0, amt) ?? return error.OutOfMemory;
const root_addr = @ptrToInt(ptr);
const rem = @rem(root_addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_addr = root_addr + march_forward_bytes;
const record_addr = adjusted_addr + n;
*@intToPtr(&align(1) usize, record_addr) = root_addr;
return @intToPtr(&u8, adjusted_addr)[0..n];
},
else => @compileError("Unsupported OS"),
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
if (new_size <= old_mem.len) {
const base_addr = @ptrToInt(old_mem.ptr);
const old_addr_end = base_addr + old_mem.len;
const new_addr_end = base_addr + new_size;
const rem = @rem(new_addr_end, os.page_size);
const new_addr_end_rounded = new_addr_end + if (rem == 0) 0 else (os.page_size - rem);
if (old_addr_end > new_addr_end_rounded) {
_ = os.posix.munmap(@intToPtr(&u8, new_addr_end_rounded), old_addr_end - new_addr_end_rounded);
}
return old_mem[0..new_size];
}
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
return result;
},
Os.windows => {
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
const old_record_addr = old_adjusted_addr + old_mem.len;
const root_addr = *@intToPtr(&align(1) usize, old_record_addr);
const old_ptr = @intToPtr(os.windows.LPVOID, root_addr);
const amt = new_size + alignment + @sizeOf(usize);
const new_ptr = os.windows.HeapReAlloc(??self.heap_handle, 0, old_ptr, amt) ?? blk: {
if (new_size > old_mem.len) return error.OutOfMemory;
const new_record_addr = old_record_addr - new_size + old_mem.len;
*@intToPtr(&align(1) usize, new_record_addr) = root_addr;
return old_mem[0..new_size];
};
const offset = old_adjusted_addr - root_addr;
const new_root_addr = @ptrToInt(new_ptr);
const new_adjusted_addr = new_root_addr + offset;
assert(new_adjusted_addr % alignment == 0);
const new_record_addr = new_adjusted_addr + new_size;
*@intToPtr(&align(1) usize, new_record_addr) = new_root_addr;
return @intToPtr(&u8, new_adjusted_addr)[0..new_size];
},
else => @compileError("Unsupported OS"),
}
}
fn free(allocator: &Allocator, bytes: []u8) void {
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
switch (builtin.os) {
Os.linux, Os.macosx, Os.ios => {
_ = os.posix.munmap(bytes.ptr, bytes.len);
},
Os.windows => {
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
const root_addr = *@intToPtr(&align(1) usize, record_addr);
const ptr = @intToPtr(os.windows.LPVOID, root_addr);
_ = os.windows.HeapFree(??self.heap_handle, 0, ptr);
},
else => @compileError("Unsupported OS"),
}
}
};
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
pub allocator: Allocator,
child_allocator: &Allocator,
buffer_list: std.LinkedList([]u8),
end_index: usize,
const BufNode = std.LinkedList([]u8).Node;
pub fn init(child_allocator: &Allocator) ArenaAllocator {
return ArenaAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.child_allocator = child_allocator,
.buffer_list = std.LinkedList([]u8).init(),
.end_index = 0,
};
}
pub fn deinit(self: &ArenaAllocator) void {
var it = self.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
it = node.next;
self.child_allocator.free(node.data);
}
}
fn createNode(self: &ArenaAllocator, prev_len: usize, minimum_size: usize) !&BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
len += len / 2;
len += os.page_size - @rem(len, os.page_size);
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
const buf_node_slice = ([]BufNode)(buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
*buf_node = BufNode {
.data = buf,
.prev = null,
.next = null,
};
self.buffer_list.append(buf_node);
self.end_index = 0;
return buf_node;
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.buffer_list.last) |last_node| last_node else try self.createNode(0, n + alignment);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
cur_node = try self.createNode(cur_buf.len, n + alignment);
continue;
}
const result = cur_buf[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
mem.copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.buffer = buffer,
.end_index = 0,
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.buffer[self.end_index]);
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.bytes.len) {
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.bytes[adjusted_index .. new_end_index];
const result = self.buffer[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
@ -128,11 +303,11 @@ pub const IncrementingAllocator = struct {
}
}
fn free(allocator: &Allocator, bytes: []u8) void {
// Do nothing. That's the point of an incrementing allocator.
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
test "c_allocator" {
if (builtin.link_libc) {
var slice = c_allocator.alloc(u8, 50) catch return;
@ -141,23 +316,47 @@ test "c_allocator" {
}
}
test "IncrementingAllocator" {
const total_bytes = 100 * 1024 * 1024;
var inc_allocator = try IncrementingAllocator.init(total_bytes);
defer inc_allocator.deinit();
test "DirectAllocator" {
var direct_allocator = DirectAllocator.init();
defer direct_allocator.deinit();
const allocator = &inc_allocator.allocator;
const slice = try allocator.alloc(&i32, 100);
const allocator = &direct_allocator.allocator;
try testAllocator(allocator);
}
test "ArenaAllocator" {
var direct_allocator = DirectAllocator.init();
defer direct_allocator.deinit();
var arena_allocator = ArenaAllocator.init(&direct_allocator.allocator);
defer arena_allocator.deinit();
try testAllocator(&arena_allocator.allocator);
}
var test_fixed_buffer_allocator_memory: [30000 * @sizeOf(usize)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
try testAllocator(&fixed_buffer_allocator.allocator);
}
fn testAllocator(allocator: &mem.Allocator) !void {
var slice = try allocator.alloc(&i32, 100);
for (slice) |*item, i| {
*item = try allocator.create(i32);
**item = i32(i);
}
assert(inc_allocator.bytesLeft() == total_bytes - @sizeOf(i32) * 100 - @sizeOf(usize) * 100);
for (slice) |item, i| {
allocator.destroy(item);
}
inc_allocator.reset();
slice = try allocator.realloc(&i32, slice, 20000);
slice = try allocator.realloc(&i32, slice, 50);
slice = try allocator.realloc(&i32, slice, 25);
slice = try allocator.realloc(&i32, slice, 10);
assert(inc_allocator.bytesLeft() == total_bytes);
allocator.free(slice);
}

View File

@ -44,6 +44,7 @@ pub const Allocator = struct {
{
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.allocFn(self, byte_count, alignment);
assert(byte_slice.len == byte_count);
// This loop should get optimized out in ReleaseFast mode
for (byte_slice) |*byte| {
*byte = undefined;
@ -65,9 +66,12 @@ pub const Allocator = struct {
const old_byte_slice = ([]u8)(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
// This loop should get optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
*byte = undefined;
assert(byte_slice.len == byte_count);
if (n > old_mem.len) {
// This loop should get optimized out in ReleaseFast mode
for (byte_slice[old_byte_slice.len..]) |*byte| {
*byte = undefined;
}
}
return ([]T)(@alignCast(alignment, byte_slice));
}
@ -94,6 +98,7 @@ pub const Allocator = struct {
const byte_count = @sizeOf(T) * n;
const byte_slice = self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment) catch unreachable;
assert(byte_slice.len == byte_count);
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
}
@ -106,52 +111,6 @@ pub const Allocator = struct {
}
};
pub const FixedBufferAllocator = struct {
allocator: Allocator,
end_index: usize,
buffer: []u8,
pub fn init(buffer: []u8) FixedBufferAllocator {
return FixedBufferAllocator {
.allocator = Allocator {
.allocFn = alloc,
.reallocFn = realloc,
.freeFn = free,
},
.buffer = buffer,
.end_index = 0,
};
}
fn alloc(allocator: &Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const addr = @ptrToInt(&self.buffer[self.end_index]);
const rem = @rem(addr, alignment);
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
const adjusted_index = self.end_index + march_forward_bytes;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.buffer[adjusted_index .. new_end_index];
self.end_index = new_end_index;
return result;
}
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
if (new_size <= old_mem.len) {
return old_mem[0..new_size];
} else {
const result = try alloc(allocator, new_size, alignment);
copy(u8, result, old_mem);
return result;
}
}
fn free(allocator: &Allocator, bytes: []u8) void { }
};
/// Copy all of source into dest at position 0.
/// dest.len must be >= source.len.
pub fn copy(comptime T: type, dest: []T, source: []const T) void {

View File

@ -363,7 +363,7 @@ pub const ChildProcess = struct {
const dev_null_fd = if (any_ignore) blk: {
const dev_null_path = "/dev/null";
var fixed_buffer_mem: [dev_null_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.posixOpen(&fixed_allocator.allocator, "/dev/null", posix.O_RDWR, 0);
} else blk: {
break :blk undefined;
@ -472,7 +472,7 @@ pub const ChildProcess = struct {
const nul_handle = if (any_ignore) blk: {
const nul_file_path = "NUL";
var fixed_buffer_mem: [nul_file_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
break :blk try os.windowsOpen(&fixed_allocator.allocator, "NUL", windows.GENERIC_READ, windows.FILE_SHARE_READ,
windows.OPEN_EXISTING, windows.FILE_ATTRIBUTE_NORMAL);
} else blk: {

View File

@ -1702,12 +1702,12 @@ pub fn openSelfExe() !os.File {
Os.linux => {
const proc_file_path = "/proc/self/exe";
var fixed_buffer_mem: [proc_file_path.len + 1]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
return os.File.openRead(&fixed_allocator.allocator, proc_file_path);
},
Os.macosx, Os.ios => {
var fixed_buffer_mem: [darwin.PATH_MAX * 2]u8 = undefined;
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
const self_exe_path = try selfExePath(&fixed_allocator.allocator);
return os.File.openRead(&fixed_allocator.allocator, self_exe_path);
},

View File

@ -22,7 +22,7 @@ pub extern "kernel32" stdcallcc fn CreatePipe(hReadPipe: &HANDLE, hWritePipe: &H
pub extern "kernel32" stdcallcc fn CreateProcessA(lpApplicationName: ?LPCSTR, lpCommandLine: LPSTR,
lpProcessAttributes: ?&SECURITY_ATTRIBUTES, lpThreadAttributes: ?&SECURITY_ATTRIBUTES, bInheritHandles: BOOL,
dwCreationFlags: DWORD, lpEnvironment: ?LPVOID, lpCurrentDirectory: ?LPCSTR, lpStartupInfo: &STARTUPINFOA,
dwCreationFlags: DWORD, lpEnvironment: ?&c_void, lpCurrentDirectory: ?LPCSTR, lpStartupInfo: &STARTUPINFOA,
lpProcessInformation: &PROCESS_INFORMATION) BOOL;
pub extern "kernel32" stdcallcc fn CreateSymbolicLinkA(lpSymlinkFileName: LPCSTR, lpTargetFileName: LPCSTR,
@ -61,16 +61,24 @@ pub extern "kernel32" stdcallcc fn GetFinalPathNameByHandleA(hFile: HANDLE, lpsz
pub extern "kernel32" stdcallcc fn GetProcessHeap() ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapCreate(flOptions: DWORD, dwInitialSize: SIZE_T, dwMaximumSize: SIZE_T) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapDestroy(hHeap: HANDLE) BOOL;
pub extern "kernel32" stdcallcc fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void, dwBytes: SIZE_T) ?&c_void;
pub extern "kernel32" stdcallcc fn HeapSize(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapValidate(hHeap: HANDLE, dwFlags: DWORD, lpMem: &const c_void) BOOL;
pub extern "kernel32" stdcallcc fn HeapCompact(hHeap: HANDLE, dwFlags: DWORD) SIZE_T;
pub extern "kernel32" stdcallcc fn HeapSummary(hHeap: HANDLE, dwFlags: DWORD, lpSummary: LPHEAP_SUMMARY) BOOL;
pub extern "kernel32" stdcallcc fn GetStdHandle(in_nStdHandle: DWORD) ?HANDLE;
pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?LPVOID;
pub extern "kernel32" stdcallcc fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) ?&c_void;
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) BOOL;
pub extern "kernel32" stdcallcc fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: &c_void) BOOL;
pub extern "kernel32" stdcallcc fn MoveFileExA(lpExistingFileName: LPCSTR, lpNewFileName: LPCSTR,
dwFlags: DWORD) BOOL;
pub extern "kernel32" stdcallcc fn ReadFile(in_hFile: HANDLE, out_lpBuffer: LPVOID,
pub extern "kernel32" stdcallcc fn ReadFile(in_hFile: HANDLE, out_lpBuffer: &c_void,
in_nNumberOfBytesToRead: DWORD, out_lpNumberOfBytesRead: &DWORD,
in_out_lpOverlapped: ?&OVERLAPPED) BOOL;
@ -201,7 +209,7 @@ pub const VOLUME_NAME_NT = 0x2;
pub const SECURITY_ATTRIBUTES = extern struct {
nLength: DWORD,
lpSecurityDescriptor: ?LPVOID,
lpSecurityDescriptor: ?&c_void,
bInheritHandle: BOOL,
};
pub const PSECURITY_ATTRIBUTES = &SECURITY_ATTRIBUTES;
@ -296,3 +304,7 @@ pub const MOVEFILE_WRITE_THROUGH = 8;
pub const FILE_BEGIN = 0;
pub const FILE_CURRENT = 1;
pub const FILE_END = 2;
pub const HEAP_CREATE_ENABLE_EXECUTE = 0x00040000;
pub const HEAP_GENERATE_EXCEPTIONS = 0x00000004;
pub const HEAP_NO_SERIALIZE = 0x00000001;

View File

@ -1094,7 +1094,7 @@ var fixed_buffer_mem: [100 * 1024]u8 = undefined;
fn fuzzTest(rng: &std.rand.Rand) void {
const array_size = rng.range(usize, 0, 1000);
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var array = fixed_allocator.allocator.alloc(IdAndValue, array_size) catch unreachable;
// populate with random data
for (array) |*item, index| {

View File

@ -12,11 +12,17 @@ const warn = std.debug.warn;
pub fn main() !void {
var arg_it = os.args();
// TODO use a more general purpose allocator here
var inc_allocator = try std.heap.IncrementingAllocator.init(40 * 1024 * 1024);
defer inc_allocator.deinit();
// Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived,
// one shot program. We don't need to waste time freeing memory and finding places to squish
// bytes into. So we free everything all at once at the very end.
const allocator = &inc_allocator.allocator;
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
defer arena.deinit();
const allocator = &arena.allocator;
// skip my own exe name

View File

@ -13,7 +13,7 @@ const io = std.io;
const warn = std.debug.warn;
pub const Parser = struct {
allocator: &mem.Allocator,
util_allocator: &mem.Allocator,
tokenizer: &Tokenizer,
put_back_tokens: [2]Token,
put_back_count: usize,
@ -21,9 +21,10 @@ pub const Parser = struct {
pub const Tree = struct {
root_node: &ast.NodeRoot,
arena_allocator: std.heap.ArenaAllocator,
pub fn deinit(self: &const Tree) void {
// TODO free the whole arena
pub fn deinit(self: &Tree) void {
self.arena_allocator.deinit();
}
};
@ -33,12 +34,10 @@ pub const Parser = struct {
const utility_bytes_align = @alignOf( union { a: RenderAstFrame, b: State, c: RenderState } );
utility_bytes: []align(utility_bytes_align) u8,
/// `allocator` should be an arena allocator. Parser never calls free on anything. After you're
/// done with a Parser, free the arena. After the arena is freed, no member functions of Parser
/// may be called.
/// allocator must outlive the returned Parser and all the parse trees you create with it.
pub fn init(tokenizer: &Tokenizer, allocator: &mem.Allocator, source_file_name: []const u8) Parser {
return Parser {
.allocator = allocator,
.util_allocator = allocator,
.tokenizer = tokenizer,
.put_back_tokens = undefined,
.put_back_count = 0,
@ -48,7 +47,7 @@ pub const Parser = struct {
}
pub fn deinit(self: &Parser) void {
self.allocator.free(self.utility_bytes);
self.util_allocator.free(self.utility_bytes);
}
const TopLevelDeclCtx = struct {
@ -101,8 +100,11 @@ pub const Parser = struct {
var stack = self.initUtilityArrayList(State);
defer self.deinitUtilityArrayList(stack);
const root_node = try self.createRoot();
// TODO errdefer arena free root node
var arena_allocator = std.heap.ArenaAllocator.init(self.util_allocator);
errdefer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
const root_node = try self.createRoot(arena);
try stack.append(State.TopLevel);
@ -130,7 +132,7 @@ pub const Parser = struct {
stack.append(State { .TopLevelExtern = token }) catch unreachable;
continue;
},
Token.Id.Eof => return Tree {.root_node = root_node},
Token.Id.Eof => return Tree {.root_node = root_node, .arena_allocator = arena_allocator},
else => {
self.putBackToken(token);
stack.append(State { .TopLevelExtern = null }) catch unreachable;
@ -164,7 +166,7 @@ pub const Parser = struct {
Token.Id.Keyword_var, Token.Id.Keyword_const => {
stack.append(State.TopLevel) catch unreachable;
// TODO shouldn't need these casts
const var_decl_node = try self.createAttachVarDecl(&root_node.decls, ctx.visib_token,
const var_decl_node = try self.createAttachVarDecl(arena, &root_node.decls, ctx.visib_token,
token, (?Token)(null), ctx.extern_token);
try stack.append(State { .VarDecl = var_decl_node });
continue;
@ -172,7 +174,7 @@ pub const Parser = struct {
Token.Id.Keyword_fn => {
stack.append(State.TopLevel) catch unreachable;
// TODO shouldn't need these casts
const fn_proto = try self.createAttachFnProto(&root_node.decls, token,
const fn_proto = try self.createAttachFnProto(arena, &root_node.decls, token,
ctx.extern_token, (?Token)(null), (?Token)(null), (?Token)(null));
try stack.append(State { .FnDef = fn_proto });
try stack.append(State { .FnProto = fn_proto });
@ -185,7 +187,7 @@ pub const Parser = struct {
stack.append(State.TopLevel) catch unreachable;
const fn_token = try self.eatToken(Token.Id.Keyword_fn);
// TODO shouldn't need this cast
const fn_proto = try self.createAttachFnProto(&root_node.decls, fn_token,
const fn_proto = try self.createAttachFnProto(arena, &root_node.decls, fn_token,
ctx.extern_token, (?Token)(token), (?Token)(null), (?Token)(null));
try stack.append(State { .FnDef = fn_proto });
try stack.append(State { .FnProto = fn_proto });
@ -253,13 +255,13 @@ pub const Parser = struct {
const token = self.getNextToken();
switch (token.id) {
Token.Id.Keyword_return => {
try stack.append(State { .PrefixOp = try self.createPrefixOp(token,
try stack.append(State { .PrefixOp = try self.createPrefixOp(arena, token,
ast.NodePrefixOp.PrefixOp.Return) });
try stack.append(State.ExpectOperand);
continue;
},
Token.Id.Ampersand => {
const prefix_op = try self.createPrefixOp(token, ast.NodePrefixOp.PrefixOp{
const prefix_op = try self.createPrefixOp(arena, token, ast.NodePrefixOp.PrefixOp{
.AddrOf = ast.NodePrefixOp.AddrOfInfo {
.align_expr = null,
.bit_offset_start_token = null,
@ -275,21 +277,21 @@ pub const Parser = struct {
},
Token.Id.Identifier => {
try stack.append(State {
.Operand = &(try self.createIdentifier(token)).base
.Operand = &(try self.createIdentifier(arena, token)).base
});
try stack.append(State.AfterOperand);
continue;
},
Token.Id.IntegerLiteral => {
try stack.append(State {
.Operand = &(try self.createIntegerLiteral(token)).base
.Operand = &(try self.createIntegerLiteral(arena, token)).base
});
try stack.append(State.AfterOperand);
continue;
},
Token.Id.FloatLiteral => {
try stack.append(State {
.Operand = &(try self.createFloatLiteral(token)).base
.Operand = &(try self.createFloatLiteral(arena, token)).base
});
try stack.append(State.AfterOperand);
continue;
@ -306,14 +308,14 @@ pub const Parser = struct {
switch (token.id) {
Token.Id.EqualEqual => {
try stack.append(State {
.InfixOp = try self.createInfixOp(token, ast.NodeInfixOp.InfixOp.EqualEqual)
.InfixOp = try self.createInfixOp(arena, token, ast.NodeInfixOp.InfixOp.EqualEqual)
});
try stack.append(State.ExpectOperand);
continue;
},
Token.Id.BangEqual => {
try stack.append(State {
.InfixOp = try self.createInfixOp(token, ast.NodeInfixOp.InfixOp.BangEqual)
.InfixOp = try self.createInfixOp(arena, token, ast.NodeInfixOp.InfixOp.BangEqual)
});
try stack.append(State.ExpectOperand);
continue;
@ -421,7 +423,7 @@ pub const Parser = struct {
if (token.id == Token.Id.RParen) {
continue;
}
const param_decl = try self.createAttachParamDecl(&fn_proto.params);
const param_decl = try self.createAttachParamDecl(arena, &fn_proto.params);
if (token.id == Token.Id.Keyword_comptime) {
param_decl.comptime_token = token;
token = self.getNextToken();
@ -470,7 +472,7 @@ pub const Parser = struct {
const token = self.getNextToken();
switch(token.id) {
Token.Id.LBrace => {
const block = try self.createBlock(token);
const block = try self.createBlock(arena, token);
fn_proto.body_node = &block.base;
stack.append(State { .Block = block }) catch unreachable;
continue;
@ -504,7 +506,7 @@ pub const Parser = struct {
const mut_token = self.getNextToken();
if (mut_token.id == Token.Id.Keyword_var or mut_token.id == Token.Id.Keyword_const) {
// TODO shouldn't need these casts
const var_decl = try self.createAttachVarDecl(&block.statements, (?Token)(null),
const var_decl = try self.createAttachVarDecl(arena, &block.statements, (?Token)(null),
mut_token, (?Token)(comptime_token), (?Token)(null));
try stack.append(State { .VarDecl = var_decl });
continue;
@ -518,7 +520,7 @@ pub const Parser = struct {
const mut_token = self.getNextToken();
if (mut_token.id == Token.Id.Keyword_var or mut_token.id == Token.Id.Keyword_const) {
// TODO shouldn't need these casts
const var_decl = try self.createAttachVarDecl(&block.statements, (?Token)(null),
const var_decl = try self.createAttachVarDecl(arena, &block.statements, (?Token)(null),
mut_token, (?Token)(null), (?Token)(null));
try stack.append(State { .VarDecl = var_decl });
continue;
@ -541,20 +543,20 @@ pub const Parser = struct {
}
}
fn createRoot(self: &Parser) !&ast.NodeRoot {
const node = try self.allocator.create(ast.NodeRoot);
fn createRoot(self: &Parser, arena: &mem.Allocator) !&ast.NodeRoot {
const node = try arena.create(ast.NodeRoot);
*node = ast.NodeRoot {
.base = ast.Node {.id = ast.Node.Id.Root},
.decls = ArrayList(&ast.Node).init(self.allocator),
.decls = ArrayList(&ast.Node).init(arena),
};
return node;
}
fn createVarDecl(self: &Parser, visib_token: &const ?Token, mut_token: &const Token, comptime_token: &const ?Token,
extern_token: &const ?Token) !&ast.NodeVarDecl
fn createVarDecl(self: &Parser, arena: &mem.Allocator, visib_token: &const ?Token, mut_token: &const Token,
comptime_token: &const ?Token, extern_token: &const ?Token) !&ast.NodeVarDecl
{
const node = try self.allocator.create(ast.NodeVarDecl);
const node = try arena.create(ast.NodeVarDecl);
*node = ast.NodeVarDecl {
.base = ast.Node {.id = ast.Node.Id.VarDecl},
@ -573,17 +575,17 @@ pub const Parser = struct {
return node;
}
fn createFnProto(self: &Parser, fn_token: &const Token, extern_token: &const ?Token,
fn createFnProto(self: &Parser, arena: &mem.Allocator, fn_token: &const Token, extern_token: &const ?Token,
cc_token: &const ?Token, visib_token: &const ?Token, inline_token: &const ?Token) !&ast.NodeFnProto
{
const node = try self.allocator.create(ast.NodeFnProto);
const node = try arena.create(ast.NodeFnProto);
*node = ast.NodeFnProto {
.base = ast.Node {.id = ast.Node.Id.FnProto},
.visib_token = *visib_token,
.name_token = null,
.fn_token = *fn_token,
.params = ArrayList(&ast.Node).init(self.allocator),
.params = ArrayList(&ast.Node).init(arena),
.return_type = undefined,
.var_args_token = null,
.extern_token = *extern_token,
@ -596,8 +598,8 @@ pub const Parser = struct {
return node;
}
fn createParamDecl(self: &Parser) !&ast.NodeParamDecl {
const node = try self.allocator.create(ast.NodeParamDecl);
fn createParamDecl(self: &Parser, arena: &mem.Allocator) !&ast.NodeParamDecl {
const node = try arena.create(ast.NodeParamDecl);
*node = ast.NodeParamDecl {
.base = ast.Node {.id = ast.Node.Id.ParamDecl},
@ -610,20 +612,20 @@ pub const Parser = struct {
return node;
}
fn createBlock(self: &Parser, begin_token: &const Token) !&ast.NodeBlock {
const node = try self.allocator.create(ast.NodeBlock);
fn createBlock(self: &Parser, arena: &mem.Allocator, begin_token: &const Token) !&ast.NodeBlock {
const node = try arena.create(ast.NodeBlock);
*node = ast.NodeBlock {
.base = ast.Node {.id = ast.Node.Id.Block},
.begin_token = *begin_token,
.end_token = undefined,
.statements = ArrayList(&ast.Node).init(self.allocator),
.statements = ArrayList(&ast.Node).init(arena),
};
return node;
}
fn createInfixOp(self: &Parser, op_token: &const Token, op: &const ast.NodeInfixOp.InfixOp) !&ast.NodeInfixOp {
const node = try self.allocator.create(ast.NodeInfixOp);
fn createInfixOp(self: &Parser, arena: &mem.Allocator, op_token: &const Token, op: &const ast.NodeInfixOp.InfixOp) !&ast.NodeInfixOp {
const node = try arena.create(ast.NodeInfixOp);
*node = ast.NodeInfixOp {
.base = ast.Node {.id = ast.Node.Id.InfixOp},
@ -635,8 +637,8 @@ pub const Parser = struct {
return node;
}
fn createPrefixOp(self: &Parser, op_token: &const Token, op: &const ast.NodePrefixOp.PrefixOp) !&ast.NodePrefixOp {
const node = try self.allocator.create(ast.NodePrefixOp);
fn createPrefixOp(self: &Parser, arena: &mem.Allocator, op_token: &const Token, op: &const ast.NodePrefixOp.PrefixOp) !&ast.NodePrefixOp {
const node = try arena.create(ast.NodePrefixOp);
*node = ast.NodePrefixOp {
.base = ast.Node {.id = ast.Node.Id.PrefixOp},
@ -647,8 +649,8 @@ pub const Parser = struct {
return node;
}
fn createIdentifier(self: &Parser, name_token: &const Token) !&ast.NodeIdentifier {
const node = try self.allocator.create(ast.NodeIdentifier);
fn createIdentifier(self: &Parser, arena: &mem.Allocator, name_token: &const Token) !&ast.NodeIdentifier {
const node = try arena.create(ast.NodeIdentifier);
*node = ast.NodeIdentifier {
.base = ast.Node {.id = ast.Node.Id.Identifier},
@ -657,8 +659,8 @@ pub const Parser = struct {
return node;
}
fn createIntegerLiteral(self: &Parser, token: &const Token) !&ast.NodeIntegerLiteral {
const node = try self.allocator.create(ast.NodeIntegerLiteral);
fn createIntegerLiteral(self: &Parser, arena: &mem.Allocator, token: &const Token) !&ast.NodeIntegerLiteral {
const node = try arena.create(ast.NodeIntegerLiteral);
*node = ast.NodeIntegerLiteral {
.base = ast.Node {.id = ast.Node.Id.IntegerLiteral},
@ -667,8 +669,8 @@ pub const Parser = struct {
return node;
}
fn createFloatLiteral(self: &Parser, token: &const Token) !&ast.NodeFloatLiteral {
const node = try self.allocator.create(ast.NodeFloatLiteral);
fn createFloatLiteral(self: &Parser, arena: &mem.Allocator, token: &const Token) !&ast.NodeFloatLiteral {
const node = try arena.create(ast.NodeFloatLiteral);
*node = ast.NodeFloatLiteral {
.base = ast.Node {.id = ast.Node.Id.FloatLiteral},
@ -677,31 +679,32 @@ pub const Parser = struct {
return node;
}
fn createAttachIdentifier(self: &Parser, dest_ptr: &const DestPtr, name_token: &const Token) !&ast.NodeIdentifier {
const node = try self.createIdentifier(name_token);
fn createAttachIdentifier(self: &Parser, arena: &mem.Allocator, dest_ptr: &const DestPtr, name_token: &const Token) !&ast.NodeIdentifier {
const node = try self.createIdentifier(arena, name_token);
try dest_ptr.store(&node.base);
return node;
}
fn createAttachParamDecl(self: &Parser, list: &ArrayList(&ast.Node)) !&ast.NodeParamDecl {
const node = try self.createParamDecl();
fn createAttachParamDecl(self: &Parser, arena: &mem.Allocator, list: &ArrayList(&ast.Node)) !&ast.NodeParamDecl {
const node = try self.createParamDecl(arena);
try list.append(&node.base);
return node;
}
fn createAttachFnProto(self: &Parser, list: &ArrayList(&ast.Node), fn_token: &const Token,
fn createAttachFnProto(self: &Parser, arena: &mem.Allocator, list: &ArrayList(&ast.Node), fn_token: &const Token,
extern_token: &const ?Token, cc_token: &const ?Token, visib_token: &const ?Token,
inline_token: &const ?Token) !&ast.NodeFnProto
{
const node = try self.createFnProto(fn_token, extern_token, cc_token, visib_token, inline_token);
const node = try self.createFnProto(arena, fn_token, extern_token, cc_token, visib_token, inline_token);
try list.append(&node.base);
return node;
}
fn createAttachVarDecl(self: &Parser, list: &ArrayList(&ast.Node), visib_token: &const ?Token,
mut_token: &const Token, comptime_token: &const ?Token, extern_token: &const ?Token) !&ast.NodeVarDecl
fn createAttachVarDecl(self: &Parser, arena: &mem.Allocator, list: &ArrayList(&ast.Node),
visib_token: &const ?Token, mut_token: &const Token, comptime_token: &const ?Token,
extern_token: &const ?Token) !&ast.NodeVarDecl
{
const node = try self.createVarDecl(visib_token, mut_token, comptime_token, extern_token);
const node = try self.createVarDecl(arena, visib_token, mut_token, comptime_token, extern_token);
try list.append(&node.base);
return node;
}
@ -1018,10 +1021,10 @@ pub const Parser = struct {
fn initUtilityArrayList(self: &Parser, comptime T: type) ArrayList(T) {
const new_byte_count = self.utility_bytes.len - self.utility_bytes.len % @sizeOf(T);
self.utility_bytes = self.allocator.alignedShrink(u8, utility_bytes_align, self.utility_bytes, new_byte_count);
self.utility_bytes = self.util_allocator.alignedShrink(u8, utility_bytes_align, self.utility_bytes, new_byte_count);
const typed_slice = ([]T)(self.utility_bytes);
return ArrayList(T) {
.allocator = self.allocator,
.allocator = self.util_allocator,
.items = typed_slice,
.len = 0,
};
@ -1043,7 +1046,7 @@ fn testParse(source: []const u8, allocator: &mem.Allocator) ![]u8 {
var parser = Parser.init(&tokenizer, allocator, "(memory buffer)");
defer parser.deinit();
const tree = try parser.parse();
var tree = try parser.parse();
defer tree.deinit();
var buffer = try std.Buffer.initSize(allocator, 0);
@ -1057,7 +1060,7 @@ fn testParse(source: []const u8, allocator: &mem.Allocator) ![]u8 {
fn testCanonical(source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, @maxValue(usize));
const result_source = try testParse(source, &failing_allocator.allocator);
if (!mem.eql(u8, result_source, source)) {
@ -1074,7 +1077,7 @@ fn testCanonical(source: []const u8) !void {
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = mem.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
if (testParse(source, &failing_allocator.allocator)) |_| {
return error.NondeterministicMemoryUsage;

View File

@ -182,10 +182,13 @@ pub fn main() !void {
var stdin_file = try io.getStdIn();
var stdout_file = try io.getStdOut();
var inc_allocator = try std.heap.IncrementingAllocator.init(2 * 1024 * 1024);
defer inc_allocator.deinit();
var direct_allocator = std.heap.DirectAllocator.init();
defer direct_allocator.deinit();
global_allocator = &inc_allocator.allocator;
var arena = std.heap.ArenaAllocator.init(&direct_allocator.allocator);
defer arena.deinit();
global_allocator = &arena.allocator;
var stdin_buf = try Buffer.initSize(global_allocator, 0);
defer stdin_buf.deinit();