rework self-hosted compiler for incremental builds
* introduce std.ArrayListUnmanaged for when you have the allocator stored elsewhere * move std.heap.ArenaAllocator implementation to its own file. extract the main state into std.heap.ArenaAllocator.State, which can be stored as an alternative to storing the entire ArenaAllocator, saving 24 bytes per ArenaAllocator on 64 bit targets. * std.LinkedList.Node pointer field now defaults to being null initialized. * Rework self-hosted compiler Package API * Delete almost all the bitrotted self-hosted compiler code. The only bit rotted code left is in main.zig and compilation.zig * Add call instruction to ZIR * self-hosted compiler ir API and link API are reworked to support a long-running compiler that incrementally updates declarations * Introduce the concept of scopes to ZIR semantic analysis * ZIR text format supports referencing named decls that are declared later in the file * Figure out how memory management works for the long-running compiler and incremental compilation. The main roots are top level declarations. There is a table of decls. The key is a cryptographic hash of the fully qualified decl name. Each decl has an arena allocator where all of the memory related to that decl is stored. Each code block has its own arena allocator for the lifetime of the block. Values that want to survive when going out of scope in a block must get copied into the outer block. Finally, values must get copied into the Decl arena to be long-lived. * Delete the unused MemoryCell struct. Instead, comptime pointers are based on references to Decl structs. * Figure out how caching works. Each Decl will store a set of other Decls which must be recompiled when it changes. This branch is still work-in-progress; this commit breaks the build.
This commit is contained in:
parent
ae080b5c21
commit
a32d3a85d2
@ -8,13 +8,13 @@ const Allocator = mem.Allocator;
|
||||
/// A contiguous, growable list of items in memory.
|
||||
/// This is a wrapper around an array of T values. Initialize with `init`.
|
||||
pub fn ArrayList(comptime T: type) type {
|
||||
return AlignedArrayList(T, null);
|
||||
return ArrayListAligned(T, null);
|
||||
}
|
||||
|
||||
pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
if (alignment) |a| {
|
||||
if (a == @alignOf(T)) {
|
||||
return AlignedArrayList(T, null);
|
||||
return ArrayListAligned(T, null);
|
||||
}
|
||||
}
|
||||
return struct {
|
||||
@ -76,6 +76,10 @@ pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn toUnmanaged(self: Self) ArrayListAlignedUnmanaged(T, alignment) {
|
||||
return .{ .items = self.items, .capacity = self.capacity };
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSlice(self: *Self) Slice {
|
||||
const allocator = self.allocator;
|
||||
@ -84,8 +88,8 @@ pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
|
||||
/// to make room.
|
||||
/// Insert `item` at index `n` by moving `list[n .. list.len]` to make room.
|
||||
/// This operation is O(N).
|
||||
pub fn insert(self: *Self, n: usize, item: T) !void {
|
||||
try self.ensureCapacity(self.items.len + 1);
|
||||
self.items.len += 1;
|
||||
@ -94,8 +98,7 @@ pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
self.items[n] = item;
|
||||
}
|
||||
|
||||
/// Insert slice `items` at index `i`. Moves
|
||||
/// `list[i .. list.len]` to make room.
|
||||
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
|
||||
/// This operation is O(N).
|
||||
pub fn insertSlice(self: *Self, i: usize, items: SliceConst) !void {
|
||||
try self.ensureCapacity(self.items.len + items.len);
|
||||
@ -259,6 +262,232 @@ pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
};
|
||||
}
|
||||
|
||||
/// Bring-your-own allocator with every function call.
|
||||
/// Initialize directly and deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn init() Self {
|
||||
return .{
|
||||
.items = &[_]T{},
|
||||
.capacity = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ArrayListUnmanaged(comptime T: type) type {
|
||||
return ArrayListAlignedUnmanaged(T, null);
|
||||
}
|
||||
|
||||
pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) type {
|
||||
if (alignment) |a| {
|
||||
if (a == @alignOf(T)) {
|
||||
return ArrayListAlignedUnmanaged(T, null);
|
||||
}
|
||||
}
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Content of the ArrayList.
|
||||
items: Slice = &[_]T{},
|
||||
capacity: usize = 0,
|
||||
|
||||
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
|
||||
pub const SliceConst = if (alignment) |a| ([]align(a) const T) else []const T;
|
||||
|
||||
/// Initialize with capacity to hold at least num elements.
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
|
||||
var self = Self.init(allocator);
|
||||
try self.ensureCapacity(allocator, num);
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Release all allocated memory.
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
allocator.free(self.allocatedSlice());
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub fn toManaged(self: *Self, allocator: *Allocator) ArrayListAligned(T, alignment) {
|
||||
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSlice(self: *Self, allocator: *Allocator) Slice {
|
||||
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
|
||||
self.* = init(allocator);
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Insert `item` at index `n`. Moves `list[n .. list.len]`
|
||||
/// to make room.
|
||||
pub fn insert(self: *Self, allocator: *Allocator, n: usize, item: T) !void {
|
||||
try self.ensureCapacity(allocator, self.items.len + 1);
|
||||
self.items.len += 1;
|
||||
|
||||
mem.copyBackwards(T, self.items[n + 1 .. self.items.len], self.items[n .. self.items.len - 1]);
|
||||
self.items[n] = item;
|
||||
}
|
||||
|
||||
/// Insert slice `items` at index `i`. Moves
|
||||
/// `list[i .. list.len]` to make room.
|
||||
/// This operation is O(N).
|
||||
pub fn insertSlice(self: *Self, allocator: *Allocator, i: usize, items: SliceConst) !void {
|
||||
try self.ensureCapacity(allocator, self.items.len + items.len);
|
||||
self.items.len += items.len;
|
||||
|
||||
mem.copyBackwards(T, self.items[i + items.len .. self.items.len], self.items[i .. self.items.len - items.len]);
|
||||
mem.copy(T, self.items[i .. i + items.len], items);
|
||||
}
|
||||
|
||||
/// Extend the list by 1 element. Allocates more memory as necessary.
|
||||
pub fn append(self: *Self, allocator: *Allocator, item: T) !void {
|
||||
const new_item_ptr = try self.addOne(allocator);
|
||||
new_item_ptr.* = item;
|
||||
}
|
||||
|
||||
/// Extend the list by 1 element, but asserting `self.capacity`
|
||||
/// is sufficient to hold an additional item.
|
||||
pub fn appendAssumeCapacity(self: *Self, item: T) void {
|
||||
const new_item_ptr = self.addOneAssumeCapacity();
|
||||
new_item_ptr.* = item;
|
||||
}
|
||||
|
||||
/// Remove the element at index `i` from the list and return its value.
|
||||
/// Asserts the array has at least one item.
|
||||
/// This operation is O(N).
|
||||
pub fn orderedRemove(self: *Self, i: usize) T {
|
||||
const newlen = self.items.len - 1;
|
||||
if (newlen == i) return self.pop();
|
||||
|
||||
const old_item = self.items[i];
|
||||
for (self.items[i..newlen]) |*b, j| b.* = self.items[i + 1 + j];
|
||||
self.items[newlen] = undefined;
|
||||
self.items.len = newlen;
|
||||
return old_item;
|
||||
}
|
||||
|
||||
/// Removes the element at the specified index and returns it.
|
||||
/// The empty slot is filled from the end of the list.
|
||||
/// This operation is O(1).
|
||||
pub fn swapRemove(self: *Self, i: usize) T {
|
||||
if (self.items.len - 1 == i) return self.pop();
|
||||
|
||||
const old_item = self.items[i];
|
||||
self.items[i] = self.pop();
|
||||
return old_item;
|
||||
}
|
||||
|
||||
/// Append the slice of items to the list. Allocates more
|
||||
/// memory as necessary.
|
||||
pub fn appendSlice(self: *Self, allocator: *Allocator, items: SliceConst) !void {
|
||||
const oldlen = self.items.len;
|
||||
const newlen = self.items.len + items.len;
|
||||
|
||||
try self.ensureCapacity(allocator, newlen);
|
||||
self.items.len = newlen;
|
||||
mem.copy(T, self.items[oldlen..], items);
|
||||
}
|
||||
|
||||
/// Same as `append` except it returns the number of bytes written, which is always the same
|
||||
/// as `m.len`. The purpose of this function existing is to match `std.io.OutStream` API.
|
||||
/// This function may be called only when `T` is `u8`.
|
||||
fn appendWrite(self: *Self, allocator: *Allocator, m: []const u8) !usize {
|
||||
try self.appendSlice(allocator, m);
|
||||
return m.len;
|
||||
}
|
||||
|
||||
/// Append a value to the list `n` times.
|
||||
/// Allocates more memory as necessary.
|
||||
pub fn appendNTimes(self: *Self, allocator: *Allocator, value: T, n: usize) !void {
|
||||
const old_len = self.items.len;
|
||||
try self.resize(self.items.len + n);
|
||||
mem.set(T, self.items[old_len..self.items.len], value);
|
||||
}
|
||||
|
||||
/// Adjust the list's length to `new_len`.
|
||||
/// Does not initialize added items if any.
|
||||
pub fn resize(self: *Self, allocator: *Allocator, new_len: usize) !void {
|
||||
try self.ensureCapacity(allocator, new_len);
|
||||
self.items.len = new_len;
|
||||
}
|
||||
|
||||
/// Reduce allocated capacity to `new_len`.
|
||||
/// Invalidates element pointers.
|
||||
pub fn shrink(self: *Self, allocator: *Allocator, new_len: usize) void {
|
||||
assert(new_len <= self.items.len);
|
||||
|
||||
self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
|
||||
error.OutOfMemory => { // no problem, capacity is still correct then.
|
||||
self.items.len = new_len;
|
||||
return;
|
||||
},
|
||||
};
|
||||
self.capacity = new_len;
|
||||
}
|
||||
|
||||
pub fn ensureCapacity(self: *Self, allocator: *Allocator, new_capacity: usize) !void {
|
||||
var better_capacity = self.capacity;
|
||||
if (better_capacity >= new_capacity) return;
|
||||
|
||||
while (true) {
|
||||
better_capacity += better_capacity / 2 + 8;
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
|
||||
const new_memory = try allocator.realloc(self.allocatedSlice(), better_capacity);
|
||||
self.items.ptr = new_memory.ptr;
|
||||
self.capacity = new_memory.len;
|
||||
}
|
||||
|
||||
/// Increases the array's length to match the full capacity that is already allocated.
|
||||
/// The new elements have `undefined` values.
|
||||
/// This operation does not invalidate any element pointers.
|
||||
pub fn expandToCapacity(self: *Self) void {
|
||||
self.items.len = self.capacity;
|
||||
}
|
||||
|
||||
/// Increase length by 1, returning pointer to the new item.
|
||||
/// The returned pointer becomes invalid when the list is resized.
|
||||
pub fn addOne(self: *Self, allocator: *Allocator) !*T {
|
||||
const newlen = self.items.len + 1;
|
||||
try self.ensureCapacity(allocator, newlen);
|
||||
return self.addOneAssumeCapacity();
|
||||
}
|
||||
|
||||
/// Increase length by 1, returning pointer to the new item.
|
||||
/// Asserts that there is already space for the new item without allocating more.
|
||||
/// The returned pointer becomes invalid when the list is resized.
|
||||
/// This operation does not invalidate any element pointers.
|
||||
pub fn addOneAssumeCapacity(self: *Self) *T {
|
||||
assert(self.items.len < self.capacity);
|
||||
|
||||
self.items.len += 1;
|
||||
return &self.items[self.items.len - 1];
|
||||
}
|
||||
|
||||
/// Remove and return the last element from the list.
|
||||
/// Asserts the list has at least one item.
|
||||
/// This operation does not invalidate any element pointers.
|
||||
pub fn pop(self: *Self) T {
|
||||
const val = self.items[self.items.len - 1];
|
||||
self.items.len -= 1;
|
||||
return val;
|
||||
}
|
||||
|
||||
/// Remove and return the last element from the list.
|
||||
/// If the list is empty, returns `null`.
|
||||
/// This operation does not invalidate any element pointers.
|
||||
pub fn popOrNull(self: *Self) ?T {
|
||||
if (self.items.len == 0) return null;
|
||||
return self.pop();
|
||||
}
|
||||
|
||||
/// For a nicer API, `items.len` is the length, not the capacity.
|
||||
/// This requires "unsafe" slicing.
|
||||
fn allocatedSlice(self: Self) Slice {
|
||||
return self.items.ptr[0..self.capacity];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "std.ArrayList.init" {
|
||||
var list = ArrayList(i32).init(testing.allocator);
|
||||
defer list.deinit();
|
||||
|
@ -11,6 +11,7 @@ const maxInt = std.math.maxInt;
|
||||
|
||||
pub const LoggingAllocator = @import("heap/logging_allocator.zig").LoggingAllocator;
|
||||
pub const loggingAllocator = @import("heap/logging_allocator.zig").loggingAllocator;
|
||||
pub const ArenaAllocator = @import("heap/arena_allocator.zig").ArenaAllocator;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
@ -510,95 +511,6 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
else => @compileError("Unsupported OS"),
|
||||
};
|
||||
|
||||
/// This allocator takes an existing allocator, wraps it, and provides an interface
|
||||
/// where you can allocate without freeing, and then free it all together.
|
||||
pub const ArenaAllocator = struct {
|
||||
allocator: Allocator,
|
||||
|
||||
child_allocator: *Allocator,
|
||||
buffer_list: std.SinglyLinkedList([]u8),
|
||||
end_index: usize,
|
||||
|
||||
const BufNode = std.SinglyLinkedList([]u8).Node;
|
||||
|
||||
pub fn init(child_allocator: *Allocator) ArenaAllocator {
|
||||
return ArenaAllocator{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.buffer_list = std.SinglyLinkedList([]u8).init(),
|
||||
.end_index = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: ArenaAllocator) void {
|
||||
var it = self.buffer_list.first;
|
||||
while (it) |node| {
|
||||
// this has to occur before the free because the free frees node
|
||||
const next_it = node.next;
|
||||
self.child_allocator.free(node.data);
|
||||
it = next_it;
|
||||
}
|
||||
}
|
||||
|
||||
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
|
||||
const actual_min_size = minimum_size + @sizeOf(BufNode);
|
||||
var len = prev_len;
|
||||
while (true) {
|
||||
len += len / 2;
|
||||
len += mem.page_size - @rem(len, mem.page_size);
|
||||
if (len >= actual_min_size) break;
|
||||
}
|
||||
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
|
||||
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
|
||||
const buf_node = &buf_node_slice[0];
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
.next = null,
|
||||
};
|
||||
self.buffer_list.prepend(buf_node);
|
||||
self.end_index = 0;
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
|
||||
while (true) {
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
const addr = @ptrToInt(cur_buf.ptr) + self.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_index = self.end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > cur_buf.len) {
|
||||
cur_node = try self.createNode(cur_buf.len, n + alignment);
|
||||
continue;
|
||||
}
|
||||
const result = cur_buf[adjusted_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= new_size) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
|
||||
pub const FixedBufferAllocator = struct {
|
||||
allocator: Allocator,
|
||||
end_index: usize,
|
||||
|
102
lib/std/heap/arena_allocator.zig
Normal file
102
lib/std/heap/arena_allocator.zig
Normal file
@ -0,0 +1,102 @@
|
||||
const std = @import("../std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// This allocator takes an existing allocator, wraps it, and provides an interface
|
||||
/// where you can allocate without freeing, and then free it all together.
|
||||
pub const ArenaAllocator = struct {
|
||||
allocator: Allocator,
|
||||
|
||||
child_allocator: *Allocator,
|
||||
state: State,
|
||||
|
||||
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
|
||||
/// as a memory-saving optimization.
|
||||
pub const State = struct {
|
||||
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
|
||||
end_index: usize = 0,
|
||||
|
||||
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
|
||||
return .{
|
||||
.allocator = Allocator{
|
||||
.reallocFn = realloc,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.state = self,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const BufNode = std.SinglyLinkedList([]u8).Node;
|
||||
|
||||
pub fn init(child_allocator: *Allocator) ArenaAllocator {
|
||||
return (State{}).promote(child_allocator);
|
||||
}
|
||||
|
||||
pub fn deinit(self: ArenaAllocator) void {
|
||||
var it = self.state.buffer_list.first;
|
||||
while (it) |node| {
|
||||
// this has to occur before the free because the free frees node
|
||||
const next_it = node.next;
|
||||
self.child_allocator.free(node.data);
|
||||
it = next_it;
|
||||
}
|
||||
}
|
||||
|
||||
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
|
||||
const actual_min_size = minimum_size + @sizeOf(BufNode);
|
||||
var len = prev_len;
|
||||
while (true) {
|
||||
len += len / 2;
|
||||
len += mem.page_size - @rem(len, mem.page_size);
|
||||
if (len >= actual_min_size) break;
|
||||
}
|
||||
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
|
||||
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
|
||||
const buf_node = &buf_node_slice[0];
|
||||
buf_node.* = BufNode{
|
||||
.data = buf,
|
||||
.next = null,
|
||||
};
|
||||
self.state.buffer_list.prepend(buf_node);
|
||||
self.state.end_index = 0;
|
||||
return buf_node;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
|
||||
|
||||
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
|
||||
while (true) {
|
||||
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
|
||||
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
|
||||
const adjusted_addr = mem.alignForward(addr, alignment);
|
||||
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > cur_buf.len) {
|
||||
cur_node = try self.createNode(cur_buf.len, n + alignment);
|
||||
continue;
|
||||
}
|
||||
const result = cur_buf[adjusted_index..new_end_index];
|
||||
self.state.end_index = new_end_index;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= new_size) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
@ -49,7 +49,7 @@ pub fn SinglyLinkedList(comptime T: type) type {
|
||||
}
|
||||
};
|
||||
|
||||
first: ?*Node,
|
||||
first: ?*Node = null,
|
||||
|
||||
/// Initialize a linked list.
|
||||
///
|
||||
|
@ -1,6 +1,8 @@
|
||||
pub const AlignedArrayList = @import("array_list.zig").AlignedArrayList;
|
||||
pub const ArrayList = @import("array_list.zig").ArrayList;
|
||||
pub const ArrayListAligned = @import("array_list.zig").ArrayListAligned;
|
||||
pub const ArrayListAlignedUnmanaged = @import("array_list.zig").ArrayListAlignedUnmanaged;
|
||||
pub const ArrayListSentineled = @import("array_list_sentineled.zig").ArrayListSentineled;
|
||||
pub const ArrayListUnmanaged = @import("array_list.zig").ArrayListUnmanaged;
|
||||
pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
|
||||
pub const BloomFilter = @import("bloom_filter.zig").BloomFilter;
|
||||
pub const BufMap = @import("buf_map.zig").BufMap;
|
||||
|
52
src-self-hosted/Package.zig
Normal file
52
src-self-hosted/Package.zig
Normal file
@ -0,0 +1,52 @@
|
||||
pub const Table = std.StringHashMap(*Package);
|
||||
|
||||
root_src_dir: std.fs.Dir,
|
||||
/// Relative to `root_src_dir`.
|
||||
root_src_path: []const u8,
|
||||
table: Table,
|
||||
|
||||
/// No references to `root_src_dir` and `root_src_path` are kept.
|
||||
pub fn create(
|
||||
allocator: *mem.Allocator,
|
||||
base_dir: std.fs.Dir,
|
||||
/// Relative to `base_dir`.
|
||||
root_src_dir: []const u8,
|
||||
/// Relative to `root_src_dir`.
|
||||
root_src_path: []const u8,
|
||||
) !*Package {
|
||||
const ptr = try allocator.create(Package);
|
||||
errdefer allocator.destroy(ptr);
|
||||
const root_src_path_dupe = try mem.dupe(allocator, u8, root_src_path);
|
||||
errdefer allocator.free(root_src_path_dupe);
|
||||
ptr.* = .{
|
||||
.root_src_dir = try base_dir.openDir(root_src_dir, .{}),
|
||||
.root_src_path = root_src_path_dupe,
|
||||
.table = Table.init(allocator),
|
||||
};
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Package) void {
|
||||
const allocator = self.table.allocator;
|
||||
self.root_src_dir.close();
|
||||
allocator.free(self.root_src_path);
|
||||
{
|
||||
var it = self.table.iterator();
|
||||
while (it.next()) |kv| {
|
||||
allocator.free(kv.key);
|
||||
}
|
||||
}
|
||||
self.table.deinit();
|
||||
allocator.destroy(self);
|
||||
}
|
||||
|
||||
pub fn add(self: *Package, name: []const u8, package: *Package) !void {
|
||||
const name_dupe = try mem.dupe(self.table.allocator, u8, name);
|
||||
errdefer self.table.allocator.deinit(name_dupe);
|
||||
const entry = try self.table.put(name_dupe, package);
|
||||
assert(entry == null);
|
||||
}
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
@ -1,7 +0,0 @@
|
||||
pub usingnamespace @cImport({
|
||||
@cDefine("__STDC_CONSTANT_MACROS", "");
|
||||
@cDefine("__STDC_LIMIT_MACROS", "");
|
||||
@cInclude("inttypes.h");
|
||||
@cInclude("config.h");
|
||||
@cInclude("zig_llvm.h");
|
||||
});
|
@ -6,38 +6,24 @@ const Type = @import("type.zig").Type;
|
||||
const Value = @import("value.zig").Value;
|
||||
const Target = std.Target;
|
||||
|
||||
pub const ErrorMsg = struct {
|
||||
byte_offset: usize,
|
||||
msg: []const u8,
|
||||
};
|
||||
|
||||
pub const Symbol = struct {
|
||||
errors: []ErrorMsg,
|
||||
|
||||
pub fn deinit(self: *Symbol, allocator: *mem.Allocator) void {
|
||||
for (self.errors) |err| {
|
||||
allocator.free(err.msg);
|
||||
}
|
||||
allocator.free(self.errors);
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn generateSymbol(typed_value: ir.TypedValue, module: ir.Module, code: *std.ArrayList(u8)) !Symbol {
|
||||
pub fn generateSymbol(
|
||||
typed_value: ir.TypedValue,
|
||||
module: ir.Module,
|
||||
code: *std.ArrayList(u8),
|
||||
errors: *std.ArrayList(ir.ErrorMsg),
|
||||
) !void {
|
||||
switch (typed_value.ty.zigTypeTag()) {
|
||||
.Fn => {
|
||||
const index = typed_value.val.cast(Value.Payload.Function).?.index;
|
||||
const module_fn = module.fns[index];
|
||||
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
|
||||
|
||||
var function = Function{
|
||||
.module = &module,
|
||||
.mod_fn = &module_fn,
|
||||
.mod_fn = module_fn,
|
||||
.code = code,
|
||||
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(code.allocator),
|
||||
.errors = std.ArrayList(ErrorMsg).init(code.allocator),
|
||||
.errors = errors,
|
||||
};
|
||||
defer function.inst_table.deinit();
|
||||
defer function.errors.deinit();
|
||||
|
||||
for (module_fn.body.instructions) |inst| {
|
||||
const new_inst = function.genFuncInst(inst) catch |err| switch (err) {
|
||||
@ -52,7 +38,7 @@ pub fn generateSymbol(typed_value: ir.TypedValue, module: ir.Module, code: *std.
|
||||
|
||||
return Symbol{ .errors = function.errors.toOwnedSlice() };
|
||||
},
|
||||
else => @panic("TODO implement generateSymbol for non-function types"),
|
||||
else => @panic("TODO implement generateSymbol for non-function decls"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,7 +47,7 @@ const Function = struct {
|
||||
mod_fn: *const ir.Module.Fn,
|
||||
code: *std.ArrayList(u8),
|
||||
inst_table: std.AutoHashMap(*ir.Inst, MCValue),
|
||||
errors: std.ArrayList(ErrorMsg),
|
||||
errors: *std.ArrayList(ir.ErrorMsg),
|
||||
|
||||
const MCValue = union(enum) {
|
||||
none,
|
||||
@ -78,6 +64,7 @@ const Function = struct {
|
||||
fn genFuncInst(self: *Function, inst: *ir.Inst) !MCValue {
|
||||
switch (inst.tag) {
|
||||
.breakpoint => return self.genBreakpoint(inst.src),
|
||||
.call => return self.genCall(inst.cast(ir.Inst.Call).?),
|
||||
.unreach => return MCValue{ .unreach = {} },
|
||||
.constant => unreachable, // excluded from function bodies
|
||||
.assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?),
|
||||
@ -101,6 +88,13 @@ const Function = struct {
|
||||
return .unreach;
|
||||
}
|
||||
|
||||
fn genCall(self: *Function, inst: *ir.Inst.Call) !MCValue {
|
||||
switch (self.module.target.cpu.arch) {
|
||||
else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.module.target.cpu.arch}),
|
||||
}
|
||||
return .unreach;
|
||||
}
|
||||
|
||||
fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue {
|
||||
switch (self.module.target.cpu.arch) {
|
||||
.i386, .x86_64 => {
|
||||
@ -140,6 +134,7 @@ const Function = struct {
|
||||
fn genRelativeFwdJump(self: *Function, src: usize, amount: u32) !void {
|
||||
switch (self.module.target.cpu.arch) {
|
||||
.i386, .x86_64 => {
|
||||
// TODO x86 treats the operands as signed
|
||||
if (amount <= std.math.maxInt(u8)) {
|
||||
try self.code.resize(self.code.items.len + 2);
|
||||
self.code.items[self.code.items.len - 2] = 0xeb;
|
||||
@ -433,14 +428,11 @@ const Function = struct {
|
||||
|
||||
fn fail(self: *Function, src: usize, comptime format: []const u8, args: var) error{ CodegenFail, OutOfMemory } {
|
||||
@setCold(true);
|
||||
const msg = try std.fmt.allocPrint(self.errors.allocator, format, args);
|
||||
{
|
||||
errdefer self.errors.allocator.free(msg);
|
||||
(try self.errors.addOne()).* = .{
|
||||
.byte_offset = src,
|
||||
.msg = msg,
|
||||
};
|
||||
}
|
||||
try self.errors.ensureCapacity(self.errors.items.len + 1);
|
||||
self.errors.appendAssumeCapacity(.{
|
||||
.byte_offset = src,
|
||||
.msg = try std.fmt.allocPrint(self.errors.allocator, format, args),
|
||||
});
|
||||
return error.CodegenFail;
|
||||
}
|
||||
};
|
||||
|
@ -19,7 +19,6 @@ const AtomicOrder = builtin.AtomicOrder;
|
||||
const Scope = @import("scope.zig").Scope;
|
||||
const Decl = @import("decl.zig").Decl;
|
||||
const ir = @import("ir.zig");
|
||||
const Visib = @import("visib.zig").Visib;
|
||||
const Value = @import("value.zig").Value;
|
||||
const Type = Value.Type;
|
||||
const Span = errmsg.Span;
|
||||
@ -30,7 +29,11 @@ const link = @import("link.zig").link;
|
||||
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
|
||||
const CInt = @import("c_int.zig").CInt;
|
||||
const fs = std.fs;
|
||||
const util = @import("util.zig");
|
||||
|
||||
pub const Visib = enum {
|
||||
Private,
|
||||
Pub,
|
||||
};
|
||||
|
||||
const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
|
||||
|
||||
@ -45,7 +48,7 @@ pub const ZigCompiler = struct {
|
||||
|
||||
native_libc: event.Future(LibCInstallation),
|
||||
|
||||
var lazy_init_targets = std.once(util.initializeAllTargets);
|
||||
var lazy_init_targets = std.once(initializeAllTargets);
|
||||
|
||||
pub fn init(allocator: *Allocator) !ZigCompiler {
|
||||
lazy_init_targets.call();
|
||||
@ -119,6 +122,8 @@ pub const LlvmHandle = struct {
|
||||
};
|
||||
|
||||
pub const Compilation = struct {
|
||||
pub const FnLinkSet = std.TailQueue(?*Value.Fn);
|
||||
|
||||
zig_compiler: *ZigCompiler,
|
||||
name: ArrayListSentineled(u8, 0),
|
||||
llvm_triple: ArrayListSentineled(u8, 0),
|
||||
@ -152,8 +157,6 @@ pub const Compilation = struct {
|
||||
/// it uses an optional pointer so that tombstone removals are possible
|
||||
fn_link_set: event.Locked(FnLinkSet) = event.Locked(FnLinkSet).init(FnLinkSet.init()),
|
||||
|
||||
pub const FnLinkSet = std.TailQueue(?*Value.Fn);
|
||||
|
||||
link_libs_list: ArrayList(*LinkLib),
|
||||
libc_link_lib: ?*LinkLib = null,
|
||||
|
||||
@ -361,8 +364,7 @@ pub const Compilation = struct {
|
||||
return comp;
|
||||
} else if (await frame) |_| unreachable else |err| return err;
|
||||
}
|
||||
|
||||
async fn createAsync(
|
||||
fn createAsync(
|
||||
out_comp: *?*Compilation,
|
||||
zig_compiler: *ZigCompiler,
|
||||
name: []const u8,
|
||||
@ -372,7 +374,7 @@ pub const Compilation = struct {
|
||||
build_mode: builtin.Mode,
|
||||
is_static: bool,
|
||||
zig_lib_dir: []const u8,
|
||||
) !void {
|
||||
) callconv(.Async) !void {
|
||||
const allocator = zig_compiler.allocator;
|
||||
|
||||
// TODO merge this line with stage2.zig crossTargetToTarget
|
||||
@ -442,8 +444,8 @@ pub const Compilation = struct {
|
||||
}
|
||||
|
||||
comp.name = try ArrayListSentineled(u8, 0).init(comp.arena(), name);
|
||||
comp.llvm_triple = try util.getLLVMTriple(comp.arena(), target);
|
||||
comp.llvm_target = try util.llvmTargetFromTriple(comp.llvm_triple);
|
||||
comp.llvm_triple = try getLLVMTriple(comp.arena(), target);
|
||||
comp.llvm_target = try llvmTargetFromTriple(comp.llvm_triple);
|
||||
comp.zig_std_dir = try fs.path.join(comp.arena(), &[_][]const u8{ zig_lib_dir, "std" });
|
||||
|
||||
const opt_level = switch (build_mode) {
|
||||
@ -726,8 +728,7 @@ pub const Compilation = struct {
|
||||
fn start(self: *Compilation) void {
|
||||
self.main_loop_future.resolve();
|
||||
}
|
||||
|
||||
async fn mainLoop(self: *Compilation) void {
|
||||
fn mainLoop(self: *Compilation) callconv(.Async) void {
|
||||
// wait until start() is called
|
||||
_ = self.main_loop_future.get();
|
||||
|
||||
@ -790,8 +791,7 @@ pub const Compilation = struct {
|
||||
build_result = group.wait();
|
||||
}
|
||||
}
|
||||
|
||||
async fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) BuildError!void {
|
||||
fn rebuildFile(self: *Compilation, root_scope: *Scope.Root) callconv(.Async) BuildError!void {
|
||||
const tree_scope = blk: {
|
||||
const source_code = fs.cwd().readFileAlloc(
|
||||
self.gpa(),
|
||||
@ -964,15 +964,14 @@ pub const Compilation = struct {
|
||||
try link(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// caller takes ownership of resulting Code
|
||||
async fn genAndAnalyzeCode(
|
||||
fn genAndAnalyzeCode(
|
||||
comp: *Compilation,
|
||||
tree_scope: *Scope.AstTree,
|
||||
scope: *Scope,
|
||||
node: *ast.Node,
|
||||
expected_type: ?*Type,
|
||||
) !*ir.Code {
|
||||
) callconv(.Async) !*ir.Code {
|
||||
const unanalyzed_code = try ir.gen(
|
||||
comp,
|
||||
node,
|
||||
@ -1000,13 +999,12 @@ pub const Compilation = struct {
|
||||
|
||||
return analyzed_code;
|
||||
}
|
||||
|
||||
async fn addCompTimeBlock(
|
||||
fn addCompTimeBlock(
|
||||
comp: *Compilation,
|
||||
tree_scope: *Scope.AstTree,
|
||||
scope: *Scope,
|
||||
comptime_node: *ast.Node.Comptime,
|
||||
) BuildError!void {
|
||||
) callconv(.Async) BuildError!void {
|
||||
const void_type = Type.Void.get(comp);
|
||||
defer void_type.base.base.deref(comp);
|
||||
|
||||
@ -1024,12 +1022,11 @@ pub const Compilation = struct {
|
||||
};
|
||||
analyzed_code.destroy(comp.gpa());
|
||||
}
|
||||
|
||||
async fn addTopLevelDecl(
|
||||
fn addTopLevelDecl(
|
||||
self: *Compilation,
|
||||
decl: *Decl,
|
||||
locked_table: *Decl.Table,
|
||||
) BuildError!void {
|
||||
) callconv(.Async) BuildError!void {
|
||||
const is_export = decl.isExported(decl.tree_scope.tree);
|
||||
|
||||
if (is_export) {
|
||||
@ -1065,11 +1062,10 @@ pub const Compilation = struct {
|
||||
|
||||
try self.prelink_group.call(addCompileErrorAsync, .{ self, msg });
|
||||
}
|
||||
|
||||
async fn addCompileErrorAsync(
|
||||
fn addCompileErrorAsync(
|
||||
self: *Compilation,
|
||||
msg: *Msg,
|
||||
) BuildError!void {
|
||||
) callconv(.Async) BuildError!void {
|
||||
errdefer msg.destroy();
|
||||
|
||||
const compile_errors = self.compile_errors.acquire();
|
||||
@ -1077,8 +1073,7 @@ pub const Compilation = struct {
|
||||
|
||||
try compile_errors.value.append(msg);
|
||||
}
|
||||
|
||||
async fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) BuildError!void {
|
||||
fn verifyUniqueSymbol(self: *Compilation, decl: *Decl) callconv(.Async) BuildError!void {
|
||||
const exported_symbol_names = self.exported_symbol_names.acquire();
|
||||
defer exported_symbol_names.release();
|
||||
|
||||
@ -1129,8 +1124,7 @@ pub const Compilation = struct {
|
||||
}
|
||||
return link_lib;
|
||||
}
|
||||
|
||||
async fn startFindingNativeLibC(self: *Compilation) void {
|
||||
fn startFindingNativeLibC(self: *Compilation) callconv(.Async) void {
|
||||
event.Loop.startCpuBoundOperation();
|
||||
// we don't care if it fails, we're just trying to kick off the future resolution
|
||||
_ = self.zig_compiler.getNativeLibC() catch return;
|
||||
@ -1234,7 +1228,7 @@ pub const Compilation = struct {
|
||||
}
|
||||
|
||||
/// This declaration has been blessed as going into the final code generation.
|
||||
pub async fn resolveDecl(comp: *Compilation, decl: *Decl) BuildError!void {
|
||||
pub fn resolveDecl(comp: *Compilation, decl: *Decl) callconv(.Async) BuildError!void {
|
||||
if (decl.resolution.start()) |ptr| return ptr.*;
|
||||
|
||||
decl.resolution.data = try generateDecl(comp, decl);
|
||||
@ -1335,8 +1329,7 @@ fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
|
||||
try comp.prelink_group.call(codegen.renderToLlvm, .{ comp, fn_val, analyzed_code });
|
||||
try comp.prelink_group.call(addFnToLinkSet, .{ comp, fn_val });
|
||||
}
|
||||
|
||||
async fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) Compilation.BuildError!void {
|
||||
fn addFnToLinkSet(comp: *Compilation, fn_val: *Value.Fn) callconv(.Async) Compilation.BuildError!void {
|
||||
fn_val.base.ref();
|
||||
defer fn_val.base.deref(comp);
|
||||
|
||||
@ -1432,3 +1425,33 @@ fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
|
||||
fn_decl.value = .{ .FnProto = fn_proto_val };
|
||||
symbol_name_consumed = true;
|
||||
}
|
||||
|
||||
pub fn llvmTargetFromTriple(triple: [:0]const u8) !*llvm.Target {
|
||||
var result: *llvm.Target = undefined;
|
||||
var err_msg: [*:0]u8 = undefined;
|
||||
if (llvm.GetTargetFromTriple(triple, &result, &err_msg) != 0) {
|
||||
std.debug.warn("triple: {s} error: {s}\n", .{ triple, err_msg });
|
||||
return error.UnsupportedTarget;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn initializeAllTargets() void {
|
||||
llvm.InitializeAllTargets();
|
||||
llvm.InitializeAllTargetInfos();
|
||||
llvm.InitializeAllTargetMCs();
|
||||
llvm.InitializeAllAsmPrinters();
|
||||
llvm.InitializeAllAsmParsers();
|
||||
}
|
||||
|
||||
pub fn getLLVMTriple(allocator: *std.mem.Allocator, target: std.Target) ![:0]u8 {
|
||||
var result = try std.ArrayListSentineled(u8, 0).initSize(allocator, 0);
|
||||
defer result.deinit();
|
||||
|
||||
try result.outStream().print(
|
||||
"{}-unknown-{}-{}",
|
||||
.{ @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi) },
|
||||
);
|
||||
|
||||
return result.toOwnedSlice();
|
||||
}
|
||||
|
@ -1,102 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Allocator = mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const ast = std.zig.ast;
|
||||
const Visib = @import("visib.zig").Visib;
|
||||
const event = std.event;
|
||||
const Value = @import("value.zig").Value;
|
||||
const Token = std.zig.Token;
|
||||
const errmsg = @import("errmsg.zig");
|
||||
const Scope = @import("scope.zig").Scope;
|
||||
const Compilation = @import("compilation.zig").Compilation;
|
||||
|
||||
pub const Decl = struct {
|
||||
id: Id,
|
||||
name: []const u8,
|
||||
visib: Visib,
|
||||
resolution: event.Future(Compilation.BuildError!void),
|
||||
parent_scope: *Scope,
|
||||
|
||||
// TODO when we destroy the decl, deref the tree scope
|
||||
tree_scope: *Scope.AstTree,
|
||||
|
||||
pub const Table = std.StringHashMap(*Decl);
|
||||
|
||||
pub fn cast(base: *Decl, comptime T: type) ?*T {
|
||||
if (base.id != @field(Id, @typeName(T))) return null;
|
||||
return @fieldParentPtr(T, "base", base);
|
||||
}
|
||||
|
||||
pub fn isExported(base: *const Decl, tree: *ast.Tree) bool {
|
||||
switch (base.id) {
|
||||
.Fn => {
|
||||
const fn_decl = @fieldParentPtr(Fn, "base", base);
|
||||
return fn_decl.isExported(tree);
|
||||
},
|
||||
else => return false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getSpan(base: *const Decl) errmsg.Span {
|
||||
switch (base.id) {
|
||||
.Fn => {
|
||||
const fn_decl = @fieldParentPtr(Fn, "base", base);
|
||||
const fn_proto = fn_decl.fn_proto;
|
||||
const start = fn_proto.fn_token;
|
||||
const end = fn_proto.name_token orelse start;
|
||||
return errmsg.Span{
|
||||
.first = start,
|
||||
.last = end + 1,
|
||||
};
|
||||
},
|
||||
else => @panic("TODO"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn findRootScope(base: *const Decl) *Scope.Root {
|
||||
return base.parent_scope.findRoot();
|
||||
}
|
||||
|
||||
pub const Id = enum {
|
||||
Var,
|
||||
Fn,
|
||||
CompTime,
|
||||
};
|
||||
|
||||
pub const Var = struct {
|
||||
base: Decl,
|
||||
};
|
||||
|
||||
pub const Fn = struct {
|
||||
base: Decl,
|
||||
value: union(enum) {
|
||||
Unresolved,
|
||||
Fn: *Value.Fn,
|
||||
FnProto: *Value.FnProto,
|
||||
},
|
||||
fn_proto: *ast.Node.FnProto,
|
||||
|
||||
pub fn externLibName(self: Fn, tree: *ast.Tree) ?[]const u8 {
|
||||
return if (self.fn_proto.extern_export_inline_token) |tok_index| x: {
|
||||
const token = tree.tokens.at(tok_index);
|
||||
break :x switch (token.id) {
|
||||
.Extern => tree.tokenSlicePtr(token),
|
||||
else => null,
|
||||
};
|
||||
} else null;
|
||||
}
|
||||
|
||||
pub fn isExported(self: Fn, tree: *ast.Tree) bool {
|
||||
if (self.fn_proto.extern_export_inline_token) |tok_index| {
|
||||
const token = tree.tokens.at(tok_index);
|
||||
return token.id == .Keyword_export;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const CompTime = struct {
|
||||
base: Decl,
|
||||
};
|
||||
};
|
File diff suppressed because it is too large
Load Diff
@ -16,10 +16,16 @@ pub const Inst = struct {
|
||||
tag: Tag,
|
||||
/// Byte offset into the source.
|
||||
src: usize,
|
||||
name: []const u8,
|
||||
|
||||
/// These names are used directly as the instruction names in the text format.
|
||||
pub const Tag = enum {
|
||||
breakpoint,
|
||||
call,
|
||||
/// Represents a reference to a global decl by name.
|
||||
/// Canonicalized ZIR will not have any of these. The
|
||||
/// syntax `@foo` is equivalent to `declref("foo")`.
|
||||
declref,
|
||||
str,
|
||||
int,
|
||||
ptrtoint,
|
||||
@ -46,6 +52,8 @@ pub const Inst = struct {
|
||||
pub fn TagToType(tag: Tag) type {
|
||||
return switch (tag) {
|
||||
.breakpoint => Breakpoint,
|
||||
.call => Call,
|
||||
.declref => DeclRef,
|
||||
.str => Str,
|
||||
.int => Int,
|
||||
.ptrtoint => PtrToInt,
|
||||
@ -85,6 +93,29 @@ pub const Inst = struct {
|
||||
kw_args: struct {},
|
||||
};
|
||||
|
||||
pub const Call = struct {
|
||||
pub const base_tag = Tag.call;
|
||||
base: Inst,
|
||||
|
||||
positionals: struct {
|
||||
func: *Inst,
|
||||
args: []*Inst,
|
||||
},
|
||||
kw_args: struct {
|
||||
modifier: std.builtin.CallOptions.Modifier = .auto,
|
||||
},
|
||||
};
|
||||
|
||||
pub const DeclRef = struct {
|
||||
pub const base_tag = Tag.declref;
|
||||
base: Inst,
|
||||
|
||||
positionals: struct {
|
||||
name: *Inst,
|
||||
},
|
||||
kw_args: struct {},
|
||||
};
|
||||
|
||||
pub const Str = struct {
|
||||
pub const base_tag = Tag.str;
|
||||
base: Inst,
|
||||
@ -212,55 +243,55 @@ pub const Inst = struct {
|
||||
kw_args: struct {},
|
||||
|
||||
pub const BuiltinType = enum {
|
||||
@"isize",
|
||||
@"usize",
|
||||
@"c_short",
|
||||
@"c_ushort",
|
||||
@"c_int",
|
||||
@"c_uint",
|
||||
@"c_long",
|
||||
@"c_ulong",
|
||||
@"c_longlong",
|
||||
@"c_ulonglong",
|
||||
@"c_longdouble",
|
||||
@"c_void",
|
||||
@"f16",
|
||||
@"f32",
|
||||
@"f64",
|
||||
@"f128",
|
||||
@"bool",
|
||||
@"void",
|
||||
@"noreturn",
|
||||
@"type",
|
||||
@"anyerror",
|
||||
@"comptime_int",
|
||||
@"comptime_float",
|
||||
isize,
|
||||
usize,
|
||||
c_short,
|
||||
c_ushort,
|
||||
c_int,
|
||||
c_uint,
|
||||
c_long,
|
||||
c_ulong,
|
||||
c_longlong,
|
||||
c_ulonglong,
|
||||
c_longdouble,
|
||||
c_void,
|
||||
f16,
|
||||
f32,
|
||||
f64,
|
||||
f128,
|
||||
bool,
|
||||
void,
|
||||
noreturn,
|
||||
type,
|
||||
anyerror,
|
||||
comptime_int,
|
||||
comptime_float,
|
||||
|
||||
fn toType(self: BuiltinType) Type {
|
||||
return switch (self) {
|
||||
.@"isize" => Type.initTag(.@"isize"),
|
||||
.@"usize" => Type.initTag(.@"usize"),
|
||||
.@"c_short" => Type.initTag(.@"c_short"),
|
||||
.@"c_ushort" => Type.initTag(.@"c_ushort"),
|
||||
.@"c_int" => Type.initTag(.@"c_int"),
|
||||
.@"c_uint" => Type.initTag(.@"c_uint"),
|
||||
.@"c_long" => Type.initTag(.@"c_long"),
|
||||
.@"c_ulong" => Type.initTag(.@"c_ulong"),
|
||||
.@"c_longlong" => Type.initTag(.@"c_longlong"),
|
||||
.@"c_ulonglong" => Type.initTag(.@"c_ulonglong"),
|
||||
.@"c_longdouble" => Type.initTag(.@"c_longdouble"),
|
||||
.@"c_void" => Type.initTag(.@"c_void"),
|
||||
.@"f16" => Type.initTag(.@"f16"),
|
||||
.@"f32" => Type.initTag(.@"f32"),
|
||||
.@"f64" => Type.initTag(.@"f64"),
|
||||
.@"f128" => Type.initTag(.@"f128"),
|
||||
.@"bool" => Type.initTag(.@"bool"),
|
||||
.@"void" => Type.initTag(.@"void"),
|
||||
.@"noreturn" => Type.initTag(.@"noreturn"),
|
||||
.@"type" => Type.initTag(.@"type"),
|
||||
.@"anyerror" => Type.initTag(.@"anyerror"),
|
||||
.@"comptime_int" => Type.initTag(.@"comptime_int"),
|
||||
.@"comptime_float" => Type.initTag(.@"comptime_float"),
|
||||
.isize => Type.initTag(.isize),
|
||||
.usize => Type.initTag(.usize),
|
||||
.c_short => Type.initTag(.c_short),
|
||||
.c_ushort => Type.initTag(.c_ushort),
|
||||
.c_int => Type.initTag(.c_int),
|
||||
.c_uint => Type.initTag(.c_uint),
|
||||
.c_long => Type.initTag(.c_long),
|
||||
.c_ulong => Type.initTag(.c_ulong),
|
||||
.c_longlong => Type.initTag(.c_longlong),
|
||||
.c_ulonglong => Type.initTag(.c_ulonglong),
|
||||
.c_longdouble => Type.initTag(.c_longdouble),
|
||||
.c_void => Type.initTag(.c_void),
|
||||
.f16 => Type.initTag(.f16),
|
||||
.f32 => Type.initTag(.f32),
|
||||
.f64 => Type.initTag(.f64),
|
||||
.f128 => Type.initTag(.f128),
|
||||
.bool => Type.initTag(.bool),
|
||||
.void => Type.initTag(.void),
|
||||
.noreturn => Type.initTag(.noreturn),
|
||||
.type => Type.initTag(.type),
|
||||
.anyerror => Type.initTag(.anyerror),
|
||||
.comptime_int => Type.initTag(.comptime_int),
|
||||
.comptime_float => Type.initTag(.comptime_float),
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -376,7 +407,7 @@ pub const ErrorMsg = struct {
|
||||
pub const Module = struct {
|
||||
decls: []*Inst,
|
||||
errors: []ErrorMsg,
|
||||
arena: std.heap.ArenaAllocator,
|
||||
arena: std.heap.ArenaAllocator.State,
|
||||
|
||||
pub const Body = struct {
|
||||
instructions: []*Inst,
|
||||
@ -385,7 +416,7 @@ pub const Module = struct {
|
||||
pub fn deinit(self: *Module, allocator: *Allocator) void {
|
||||
allocator.free(self.decls);
|
||||
allocator.free(self.errors);
|
||||
self.arena.deinit();
|
||||
self.arena.promote(allocator).deinit();
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
@ -431,6 +462,7 @@ pub const Module = struct {
|
||||
// TODO I tried implementing this with an inline for loop and hit a compiler bug
|
||||
switch (decl.tag) {
|
||||
.breakpoint => return self.writeInstToStreamGeneric(stream, .breakpoint, decl, inst_table),
|
||||
.call => return self.writeInstToStreamGeneric(stream, .call, decl, inst_table),
|
||||
.str => return self.writeInstToStreamGeneric(stream, .str, decl, inst_table),
|
||||
.int => return self.writeInstToStreamGeneric(stream, .int, decl, inst_table),
|
||||
.ptrtoint => return self.writeInstToStreamGeneric(stream, .ptrtoint, decl, inst_table),
|
||||
@ -543,9 +575,9 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
|
||||
.arena = std.heap.ArenaAllocator.init(allocator),
|
||||
.i = 0,
|
||||
.source = source,
|
||||
.decls = std.ArrayList(*Inst).init(allocator),
|
||||
.errors = std.ArrayList(ErrorMsg).init(allocator),
|
||||
.global_name_map = &global_name_map,
|
||||
.errors = .{},
|
||||
.decls = .{},
|
||||
};
|
||||
errdefer parser.arena.deinit();
|
||||
|
||||
@ -555,10 +587,11 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return Module{
|
||||
.decls = parser.decls.toOwnedSlice(),
|
||||
.errors = parser.errors.toOwnedSlice(),
|
||||
.arena = parser.arena,
|
||||
.decls = parser.decls.toOwnedSlice(allocator),
|
||||
.errors = parser.errors.toOwnedSlice(allocator),
|
||||
.arena = parser.arena.state,
|
||||
};
|
||||
}
|
||||
|
||||
@ -567,8 +600,8 @@ const Parser = struct {
|
||||
arena: std.heap.ArenaAllocator,
|
||||
i: usize,
|
||||
source: [:0]const u8,
|
||||
errors: std.ArrayList(ErrorMsg),
|
||||
decls: std.ArrayList(*Inst),
|
||||
errors: std.ArrayListUnmanaged(ErrorMsg),
|
||||
decls: std.ArrayListUnmanaged(*Inst),
|
||||
global_name_map: *std.StringHashMap(usize),
|
||||
|
||||
const Body = struct {
|
||||
@ -893,8 +926,25 @@ const Parser = struct {
|
||||
const ident = self.source[name_start..self.i];
|
||||
const kv = map.get(ident) orelse {
|
||||
const bad_name = self.source[name_start - 1 .. self.i];
|
||||
self.i = name_start - 1;
|
||||
return self.fail("unrecognized identifier: {}", .{bad_name});
|
||||
const src = name_start - 1;
|
||||
if (local_ref) {
|
||||
self.i = src;
|
||||
return self.fail("unrecognized identifier: {}", .{bad_name});
|
||||
} else {
|
||||
const name = try self.arena.allocator.create(Inst.Str);
|
||||
name.* = .{
|
||||
.base = .{ .src = src, .tag = Inst.Str.base_tag },
|
||||
.positionals = .{ .bytes = ident },
|
||||
.kw_args = .{},
|
||||
};
|
||||
const declref = try self.arena.allocator.create(Inst.DeclRef);
|
||||
declref.* = .{
|
||||
.base = .{ .src = src, .tag = Inst.DeclRef.base_tag },
|
||||
.positionals = .{ .name = &name.base },
|
||||
.kw_args = .{},
|
||||
};
|
||||
return &declref.base;
|
||||
}
|
||||
};
|
||||
if (local_ref) {
|
||||
return body_ctx.?.instructions.items[kv.value];
|
||||
@ -1065,6 +1115,24 @@ const EmitZIR = struct {
|
||||
for (body.instructions) |inst| {
|
||||
const new_inst = switch (inst.tag) {
|
||||
.breakpoint => try self.emitTrivial(inst.src, Inst.Breakpoint),
|
||||
.call => blk: {
|
||||
const old_inst = inst.cast(ir.Inst.Call).?;
|
||||
const new_inst = try self.arena.allocator.create(Inst.Call);
|
||||
|
||||
const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len);
|
||||
for (args) |*elem, i| {
|
||||
elem.* = try self.resolveInst(inst_table, old_inst.args.args[i]);
|
||||
}
|
||||
new_inst.* = .{
|
||||
.base = .{ .src = inst.src, .tag = Inst.Call.base_tag },
|
||||
.positionals = .{
|
||||
.func = try self.resolveInst(inst_table, old_inst.args.func),
|
||||
.args = args,
|
||||
},
|
||||
.kw_args = .{},
|
||||
};
|
||||
break :blk &new_inst.base;
|
||||
},
|
||||
.unreach => try self.emitTrivial(inst.src, Inst.Unreachable),
|
||||
.ret => try self.emitTrivial(inst.src, Inst.Return),
|
||||
.constant => unreachable, // excluded from function bodies
|
||||
|
@ -1,6 +1,5 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const util = @import("util.zig");
|
||||
const Target = std.Target;
|
||||
const fs = std.fs;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
@ -9,50 +9,65 @@ const codegen = @import("codegen.zig");
|
||||
|
||||
const default_entry_addr = 0x8000000;
|
||||
|
||||
pub const ErrorMsg = struct {
|
||||
byte_offset: usize,
|
||||
msg: []const u8,
|
||||
};
|
||||
|
||||
pub const Result = struct {
|
||||
errors: []ErrorMsg,
|
||||
|
||||
pub fn deinit(self: *Result, allocator: *mem.Allocator) void {
|
||||
for (self.errors) |err| {
|
||||
allocator.free(err.msg);
|
||||
}
|
||||
allocator.free(self.errors);
|
||||
self.* = undefined;
|
||||
}
|
||||
pub const Options = struct {
|
||||
target: std.Target,
|
||||
output_mode: std.builtin.OutputMode,
|
||||
link_mode: std.builtin.LinkMode,
|
||||
object_format: std.builtin.ObjectFormat,
|
||||
/// Used for calculating how much space to reserve for symbols in case the binary file
|
||||
/// does not already have a symbol table.
|
||||
symbol_count_hint: u64 = 32,
|
||||
/// Used for calculating how much space to reserve for executable program code in case
|
||||
/// the binary file deos not already have such a section.
|
||||
program_code_size_hint: u64 = 256 * 1024,
|
||||
};
|
||||
|
||||
/// Attempts incremental linking, if the file already exists.
|
||||
/// If incremental linking fails, falls back to truncating the file and rewriting it.
|
||||
/// A malicious file is detected as incremental link failure and does not cause Illegal Behavior.
|
||||
/// This operation is not atomic.
|
||||
pub fn updateFilePath(
|
||||
pub fn openBinFilePath(
|
||||
allocator: *Allocator,
|
||||
module: ir.Module,
|
||||
dir: fs.Dir,
|
||||
sub_path: []const u8,
|
||||
) !Result {
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(module) });
|
||||
options: Options,
|
||||
) !ElfFile {
|
||||
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(options) });
|
||||
defer file.close();
|
||||
|
||||
return updateFile(allocator, module, file);
|
||||
return openBinFile(allocator, file, options);
|
||||
}
|
||||
|
||||
/// Atomically overwrites the old file, if present.
|
||||
pub fn writeFilePath(
|
||||
allocator: *Allocator,
|
||||
module: ir.Module,
|
||||
dir: fs.Dir,
|
||||
sub_path: []const u8,
|
||||
) !Result {
|
||||
const af = try dir.atomicFile(sub_path, .{ .mode = determineMode(module) });
|
||||
module: ir.Module,
|
||||
errors: *std.ArrayList(ir.ErrorMsg),
|
||||
) !void {
|
||||
const options: Options = .{
|
||||
.target = module.target,
|
||||
.output_mode = module.output_mode,
|
||||
.link_mode = module.link_mode,
|
||||
.object_format = module.object_format,
|
||||
.symbol_count_hint = module.decls.items.len,
|
||||
};
|
||||
const af = try dir.atomicFile(sub_path, .{ .mode = determineMode(options) });
|
||||
defer af.deinit();
|
||||
|
||||
const result = try writeFile(allocator, module, af.file);
|
||||
const elf_file = try createElfFile(allocator, af.file, options);
|
||||
for (module.decls.items) |decl| {
|
||||
try elf_file.updateDecl(module, decl, errors);
|
||||
}
|
||||
try elf_file.flush();
|
||||
if (elf_file.error_flags.no_entry_point_found) {
|
||||
try errors.ensureCapacity(errors.items.len + 1);
|
||||
errors.appendAssumeCapacity(.{
|
||||
.byte_offset = 0,
|
||||
.msg = try std.fmt.allocPrint(errors.allocator, "no entry point found", .{}),
|
||||
});
|
||||
}
|
||||
try af.finish();
|
||||
return result;
|
||||
}
|
||||
@ -62,49 +77,65 @@ pub fn writeFilePath(
|
||||
/// Returns an error if `file` is not already open with +read +write +seek abilities.
|
||||
/// A malicious file is detected as incremental link failure and does not cause Illegal Behavior.
|
||||
/// This operation is not atomic.
|
||||
pub fn updateFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result {
|
||||
return updateFileInner(allocator, module, file) catch |err| switch (err) {
|
||||
pub fn openBinFile(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
|
||||
return openBinFileInner(allocator, file, options) catch |err| switch (err) {
|
||||
error.IncrFailed => {
|
||||
return writeFile(allocator, module, file);
|
||||
return createElfFile(allocator, file, options);
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const Update = struct {
|
||||
pub const ElfFile = struct {
|
||||
allocator: *Allocator,
|
||||
file: fs.File,
|
||||
module: *const ir.Module,
|
||||
options: Options,
|
||||
ptr_width: enum { p32, p64 },
|
||||
|
||||
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
|
||||
/// Same order as in the file.
|
||||
sections: std.ArrayList(elf.Elf64_Shdr),
|
||||
shdr_table_offset: ?u64,
|
||||
sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{},
|
||||
shdr_table_offset: ?u64 = null,
|
||||
|
||||
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
|
||||
/// Same order as in the file.
|
||||
program_headers: std.ArrayList(elf.Elf64_Phdr),
|
||||
phdr_table_offset: ?u64,
|
||||
program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
|
||||
phdr_table_offset: ?u64 = null,
|
||||
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
|
||||
phdr_load_re_index: ?u16,
|
||||
entry_addr: ?u64,
|
||||
phdr_load_re_index: ?u16 = null,
|
||||
entry_addr: ?u64 = null,
|
||||
|
||||
shstrtab: std.ArrayList(u8),
|
||||
shstrtab_index: ?u16,
|
||||
shstrtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
shstrtab_index: ?u16 = null,
|
||||
|
||||
text_section_index: ?u16,
|
||||
symtab_section_index: ?u16,
|
||||
text_section_index: ?u16 = null,
|
||||
symtab_section_index: ?u16 = null,
|
||||
|
||||
/// The same order as in the file
|
||||
symbols: std.ArrayList(elf.Elf64_Sym),
|
||||
symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
|
||||
|
||||
errors: std.ArrayList(ErrorMsg),
|
||||
/// Same order as in the file.
|
||||
offset_table: std.ArrayListUnmanaged(aoeu) = .{},
|
||||
|
||||
fn deinit(self: *Update) void {
|
||||
self.sections.deinit();
|
||||
self.program_headers.deinit();
|
||||
self.shstrtab.deinit();
|
||||
self.symbols.deinit();
|
||||
self.errors.deinit();
|
||||
/// This means the entire read-only executable program code needs to be rewritten.
|
||||
phdr_load_re_dirty: bool = false,
|
||||
phdr_table_dirty: bool = false,
|
||||
shdr_table_dirty: bool = false,
|
||||
shstrtab_dirty: bool = false,
|
||||
symtab_dirty: bool = false,
|
||||
|
||||
error_flags: ErrorFlags = ErrorFlags{},
|
||||
|
||||
pub const ErrorFlags = struct {
|
||||
no_entry_point_found: bool = false,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *ElfFile) void {
|
||||
self.sections.deinit(self.allocator);
|
||||
self.program_headers.deinit(self.allocator);
|
||||
self.shstrtab.deinit(self.allocator);
|
||||
self.symbols.deinit(self.allocator);
|
||||
self.offset_table.deinit(self.allocator);
|
||||
}
|
||||
|
||||
// `expand_num / expand_den` is the factor of padding when allocation
|
||||
@ -112,8 +143,8 @@ const Update = struct {
|
||||
const alloc_den = 3;
|
||||
|
||||
/// Returns end pos of collision, if any.
|
||||
fn detectAllocCollision(self: *Update, start: u64, size: u64) ?u64 {
|
||||
const small_ptr = self.module.target.cpu.arch.ptrBitWidth() == 32;
|
||||
fn detectAllocCollision(self: *ElfFile, start: u64, size: u64) ?u64 {
|
||||
const small_ptr = self.options.target.cpu.arch.ptrBitWidth() == 32;
|
||||
const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
|
||||
if (start < ehdr_size)
|
||||
return ehdr_size;
|
||||
@ -157,7 +188,7 @@ const Update = struct {
|
||||
return null;
|
||||
}
|
||||
|
||||
fn allocatedSize(self: *Update, start: u64) u64 {
|
||||
fn allocatedSize(self: *ElfFile, start: u64) u64 {
|
||||
var min_pos: u64 = std.math.maxInt(u64);
|
||||
if (self.shdr_table_offset) |off| {
|
||||
if (off > start and off < min_pos) min_pos = off;
|
||||
@ -176,7 +207,7 @@ const Update = struct {
|
||||
return min_pos - start;
|
||||
}
|
||||
|
||||
fn findFreeSpace(self: *Update, object_size: u64, min_alignment: u16) u64 {
|
||||
fn findFreeSpace(self: *ElfFile, object_size: u64, min_alignment: u16) u64 {
|
||||
var start: u64 = 0;
|
||||
while (self.detectAllocCollision(start, object_size)) |item_end| {
|
||||
start = mem.alignForwardGeneric(u64, item_end, min_alignment);
|
||||
@ -184,33 +215,21 @@ const Update = struct {
|
||||
return start;
|
||||
}
|
||||
|
||||
fn makeString(self: *Update, bytes: []const u8) !u32 {
|
||||
fn makeString(self: *ElfFile, bytes: []const u8) !u32 {
|
||||
const result = self.shstrtab.items.len;
|
||||
try self.shstrtab.appendSlice(bytes);
|
||||
try self.shstrtab.append(0);
|
||||
return @intCast(u32, result);
|
||||
}
|
||||
|
||||
fn perform(self: *Update) !void {
|
||||
const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedArchitecture,
|
||||
};
|
||||
const small_ptr = switch (ptr_width) {
|
||||
pub fn populateMissingMetadata(self: *ElfFile) !void {
|
||||
const small_ptr = switch (self.ptr_width) {
|
||||
.p32 => true,
|
||||
.p64 => false,
|
||||
};
|
||||
// This means the entire read-only executable program code needs to be rewritten.
|
||||
var phdr_load_re_dirty = false;
|
||||
var phdr_table_dirty = false;
|
||||
var shdr_table_dirty = false;
|
||||
var shstrtab_dirty = false;
|
||||
var symtab_dirty = false;
|
||||
|
||||
if (self.phdr_load_re_index == null) {
|
||||
self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
|
||||
const file_size = 256 * 1024;
|
||||
const file_size = self.options.program_code_size_hint;
|
||||
const p_align = 0x1000;
|
||||
const off = self.findFreeSpace(file_size, p_align);
|
||||
//std.debug.warn("found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
@ -225,24 +244,8 @@ const Update = struct {
|
||||
.p_flags = elf.PF_X | elf.PF_R,
|
||||
});
|
||||
self.entry_addr = null;
|
||||
phdr_load_re_dirty = true;
|
||||
phdr_table_dirty = true;
|
||||
}
|
||||
if (self.sections.items.len == 0) {
|
||||
// There must always be a null section in index 0
|
||||
try self.sections.append(.{
|
||||
.sh_name = 0,
|
||||
.sh_type = elf.SHT_NULL,
|
||||
.sh_flags = 0,
|
||||
.sh_addr = 0,
|
||||
.sh_offset = 0,
|
||||
.sh_size = 0,
|
||||
.sh_link = 0,
|
||||
.sh_info = 0,
|
||||
.sh_addralign = 0,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
shdr_table_dirty = true;
|
||||
self.phdr_load_re_dirty = true;
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
if (self.shstrtab_index == null) {
|
||||
self.shstrtab_index = @intCast(u16, self.sections.items.len);
|
||||
@ -262,8 +265,8 @@ const Update = struct {
|
||||
.sh_addralign = 1,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
shstrtab_dirty = true;
|
||||
shdr_table_dirty = true;
|
||||
self.shstrtab_dirty = true;
|
||||
self.shdr_table_dirty = true;
|
||||
}
|
||||
if (self.text_section_index == null) {
|
||||
self.text_section_index = @intCast(u16, self.sections.items.len);
|
||||
@ -281,13 +284,13 @@ const Update = struct {
|
||||
.sh_addralign = phdr.p_align,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
shdr_table_dirty = true;
|
||||
self.shdr_table_dirty = true;
|
||||
}
|
||||
if (self.symtab_section_index == null) {
|
||||
self.symtab_section_index = @intCast(u16, self.sections.items.len);
|
||||
const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
|
||||
const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
|
||||
const file_size = self.module.exports.len * each_size;
|
||||
const file_size = self.options.symbol_count_hint * each_size;
|
||||
const off = self.findFreeSpace(file_size, min_align);
|
||||
//std.debug.warn("found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
|
||||
@ -300,12 +303,12 @@ const Update = struct {
|
||||
.sh_size = file_size,
|
||||
// The section header index of the associated string table.
|
||||
.sh_link = self.shstrtab_index.?,
|
||||
.sh_info = @intCast(u32, self.module.exports.len),
|
||||
.sh_info = @intCast(u32, self.symbols.items.len),
|
||||
.sh_addralign = min_align,
|
||||
.sh_entsize = each_size,
|
||||
});
|
||||
symtab_dirty = true;
|
||||
shdr_table_dirty = true;
|
||||
self.symtab_dirty = true;
|
||||
self.shdr_table_dirty = true;
|
||||
}
|
||||
const shsize: u64 = switch (ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Shdr),
|
||||
@ -317,7 +320,7 @@ const Update = struct {
|
||||
};
|
||||
if (self.shdr_table_offset == null) {
|
||||
self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
|
||||
shdr_table_dirty = true;
|
||||
self.shdr_table_dirty = true;
|
||||
}
|
||||
const phsize: u64 = switch (ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Phdr),
|
||||
@ -329,13 +332,15 @@ const Update = struct {
|
||||
};
|
||||
if (self.phdr_table_offset == null) {
|
||||
self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
|
||||
phdr_table_dirty = true;
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
}
|
||||
|
||||
try self.writeCodeAndSymbols(phdr_table_dirty, shdr_table_dirty);
|
||||
/// Commit pending changes and write headers.
|
||||
pub fn flush(self: *ElfFile) !void {
|
||||
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
|
||||
if (phdr_table_dirty) {
|
||||
if (self.phdr_table_dirty) {
|
||||
const allocated_size = self.allocatedSize(self.phdr_table_offset.?);
|
||||
const needed_size = self.program_headers.items.len * phsize;
|
||||
|
||||
@ -345,7 +350,7 @@ const Update = struct {
|
||||
}
|
||||
|
||||
const allocator = self.program_headers.allocator;
|
||||
switch (ptr_width) {
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
const buf = try allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
|
||||
defer allocator.free(buf);
|
||||
@ -371,11 +376,12 @@ const Update = struct {
|
||||
try self.file.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
|
||||
},
|
||||
}
|
||||
self.phdr_table_offset = false;
|
||||
}
|
||||
|
||||
{
|
||||
const shstrtab_sect = &self.sections.items[self.shstrtab_index.?];
|
||||
if (shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
|
||||
if (self.shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
|
||||
const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset);
|
||||
const needed_size = self.shstrtab.items.len;
|
||||
|
||||
@ -387,13 +393,14 @@ const Update = struct {
|
||||
//std.debug.warn("shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
|
||||
|
||||
try self.file.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
|
||||
if (!shdr_table_dirty) {
|
||||
if (!self.shdr_table_dirty) {
|
||||
// Then it won't get written with the others and we need to do it.
|
||||
try self.writeSectHeader(self.shstrtab_index.?);
|
||||
}
|
||||
self.shstrtab_dirty = false;
|
||||
}
|
||||
}
|
||||
if (shdr_table_dirty) {
|
||||
if (self.shdr_table_dirty) {
|
||||
const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
|
||||
const needed_size = self.sections.items.len * phsize;
|
||||
|
||||
@ -403,7 +410,7 @@ const Update = struct {
|
||||
}
|
||||
|
||||
const allocator = self.sections.allocator;
|
||||
switch (ptr_width) {
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
const buf = try allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
|
||||
defer allocator.free(buf);
|
||||
@ -431,38 +438,36 @@ const Update = struct {
|
||||
},
|
||||
}
|
||||
}
|
||||
if (self.entry_addr == null and self.module.output_mode == .Exe) {
|
||||
const msg = try std.fmt.allocPrint(self.errors.allocator, "no entry point found", .{});
|
||||
errdefer self.errors.allocator.free(msg);
|
||||
try self.errors.append(.{
|
||||
.byte_offset = 0,
|
||||
.msg = msg,
|
||||
});
|
||||
if (self.entry_addr == null and self.options.output_mode == .Exe) {
|
||||
self.error_flags.no_entry_point_found = true;
|
||||
} else {
|
||||
self.error_flags.no_entry_point_found = false;
|
||||
try self.writeElfHeader();
|
||||
}
|
||||
// TODO find end pos and truncate
|
||||
|
||||
// The point of flush() is to commit changes, so nothing should be dirty after this.
|
||||
assert(!self.phdr_load_re_dirty);
|
||||
assert(!self.phdr_table_dirty);
|
||||
assert(!self.shdr_table_dirty);
|
||||
assert(!self.shstrtab_dirty);
|
||||
assert(!self.symtab_dirty);
|
||||
}
|
||||
|
||||
fn writeElfHeader(self: *Update) !void {
|
||||
fn writeElfHeader(self: *ElfFile) !void {
|
||||
var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined;
|
||||
|
||||
var index: usize = 0;
|
||||
hdr_buf[0..4].* = "\x7fELF".*;
|
||||
index += 4;
|
||||
|
||||
const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedArchitecture,
|
||||
};
|
||||
hdr_buf[index] = switch (ptr_width) {
|
||||
hdr_buf[index] = switch (self.ptr_width) {
|
||||
.p32 => elf.ELFCLASS32,
|
||||
.p64 => elf.ELFCLASS64,
|
||||
};
|
||||
index += 1;
|
||||
|
||||
const endian = self.module.target.cpu.arch.endian();
|
||||
const endian = self.options.target.cpu.arch.endian();
|
||||
hdr_buf[index] = switch (endian) {
|
||||
.Little => elf.ELFDATA2LSB,
|
||||
.Big => elf.ELFDATA2MSB,
|
||||
@ -480,10 +485,10 @@ const Update = struct {
|
||||
|
||||
assert(index == 16);
|
||||
|
||||
const elf_type = switch (self.module.output_mode) {
|
||||
const elf_type = switch (self.options.output_mode) {
|
||||
.Exe => elf.ET.EXEC,
|
||||
.Obj => elf.ET.REL,
|
||||
.Lib => switch (self.module.link_mode) {
|
||||
.Lib => switch (self.options.link_mode) {
|
||||
.Static => elf.ET.REL,
|
||||
.Dynamic => elf.ET.DYN,
|
||||
},
|
||||
@ -491,7 +496,7 @@ const Update = struct {
|
||||
mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf_type), endian);
|
||||
index += 2;
|
||||
|
||||
const machine = self.module.target.cpu.arch.toElfMachine();
|
||||
const machine = self.options.target.cpu.arch.toElfMachine();
|
||||
mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(machine), endian);
|
||||
index += 2;
|
||||
|
||||
@ -501,7 +506,7 @@ const Update = struct {
|
||||
|
||||
const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?;
|
||||
|
||||
switch (ptr_width) {
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
|
||||
index += 4;
|
||||
@ -533,14 +538,14 @@ const Update = struct {
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], e_flags, endian);
|
||||
index += 4;
|
||||
|
||||
const e_ehsize: u16 = switch (ptr_width) {
|
||||
const e_ehsize: u16 = switch (self.ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Ehdr),
|
||||
.p64 => @sizeOf(elf.Elf64_Ehdr),
|
||||
};
|
||||
mem.writeInt(u16, hdr_buf[index..][0..2], e_ehsize, endian);
|
||||
index += 2;
|
||||
|
||||
const e_phentsize: u16 = switch (ptr_width) {
|
||||
const e_phentsize: u16 = switch (self.ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Phdr),
|
||||
.p64 => @sizeOf(elf.Elf64_Phdr),
|
||||
};
|
||||
@ -551,7 +556,7 @@ const Update = struct {
|
||||
mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
|
||||
index += 2;
|
||||
|
||||
const e_shentsize: u16 = switch (ptr_width) {
|
||||
const e_shentsize: u16 = switch (self.ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Shdr),
|
||||
.p64 => @sizeOf(elf.Elf64_Shdr),
|
||||
};
|
||||
@ -570,81 +575,172 @@ const Update = struct {
|
||||
try self.file.pwriteAll(hdr_buf[0..index], 0);
|
||||
}
|
||||
|
||||
fn writeCodeAndSymbols(self: *Update, phdr_table_dirty: bool, shdr_table_dirty: bool) !void {
|
||||
// index 0 is always a null symbol
|
||||
try self.symbols.resize(1);
|
||||
self.symbols.items[0] = .{
|
||||
.st_name = 0,
|
||||
.st_info = 0,
|
||||
.st_other = 0,
|
||||
.st_shndx = 0,
|
||||
.st_value = 0,
|
||||
.st_size = 0,
|
||||
};
|
||||
/// TODO Look into making this smaller to save memory.
|
||||
/// Lots of redundant info here with the data stored in symbol structs.
|
||||
const DeclSymbol = struct {
|
||||
symbol_indexes: []usize,
|
||||
vaddr: u64,
|
||||
file_offset: u64,
|
||||
size: u64,
|
||||
};
|
||||
|
||||
const AllocatedBlock = struct {
|
||||
vaddr: u64,
|
||||
file_offset: u64,
|
||||
size_capacity: u64,
|
||||
};
|
||||
|
||||
fn allocateDeclSymbol(self: *ElfFile, size: u64) AllocatedBlock {
|
||||
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
|
||||
var vaddr: u64 = phdr.p_vaddr;
|
||||
var file_off: u64 = phdr.p_offset;
|
||||
todo();
|
||||
//{
|
||||
// // Now that we know the code size, we need to update the program header for executable code
|
||||
// phdr.p_memsz = vaddr - phdr.p_vaddr;
|
||||
// phdr.p_filesz = phdr.p_memsz;
|
||||
|
||||
var code = std.ArrayList(u8).init(self.sections.allocator);
|
||||
defer code.deinit();
|
||||
// const shdr = &self.sections.items[self.text_section_index.?];
|
||||
// shdr.sh_size = phdr.p_filesz;
|
||||
|
||||
for (self.module.exports) |exp| {
|
||||
code.shrink(0);
|
||||
var symbol = try codegen.generateSymbol(exp.typed_value, self.module.*, &code);
|
||||
defer symbol.deinit(code.allocator);
|
||||
if (symbol.errors.len != 0) {
|
||||
for (symbol.errors) |err| {
|
||||
const msg = try mem.dupe(self.errors.allocator, u8, err.msg);
|
||||
errdefer self.errors.allocator.free(msg);
|
||||
try self.errors.append(.{
|
||||
.byte_offset = err.byte_offset,
|
||||
.msg = msg,
|
||||
});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
try self.file.pwriteAll(code.items, file_off);
|
||||
// self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
|
||||
// self.shdr_table_dirty = true; // TODO look into making only the one section dirty
|
||||
//}
|
||||
|
||||
if (mem.eql(u8, exp.name, "_start")) {
|
||||
self.entry_addr = vaddr;
|
||||
}
|
||||
(try self.symbols.addOne()).* = .{
|
||||
.st_name = try self.makeString(exp.name),
|
||||
.st_info = (elf.STB_LOCAL << 4) | elf.STT_FUNC,
|
||||
.st_other = 0,
|
||||
.st_shndx = self.text_section_index.?,
|
||||
.st_value = vaddr,
|
||||
.st_size = code.items.len,
|
||||
};
|
||||
vaddr += code.items.len;
|
||||
}
|
||||
|
||||
{
|
||||
// Now that we know the code size, we need to update the program header for executable code
|
||||
phdr.p_memsz = vaddr - phdr.p_vaddr;
|
||||
phdr.p_filesz = phdr.p_memsz;
|
||||
|
||||
const shdr = &self.sections.items[self.text_section_index.?];
|
||||
shdr.sh_size = phdr.p_filesz;
|
||||
|
||||
if (!phdr_table_dirty) {
|
||||
// Then it won't get written with the others and we need to do it.
|
||||
try self.writeProgHeader(self.phdr_load_re_index.?);
|
||||
}
|
||||
if (!shdr_table_dirty) {
|
||||
// Then it won't get written with the others and we need to do it.
|
||||
try self.writeSectHeader(self.text_section_index.?);
|
||||
}
|
||||
}
|
||||
|
||||
return self.writeSymbols();
|
||||
//return self.writeSymbols();
|
||||
}
|
||||
|
||||
fn writeProgHeader(self: *Update, index: usize) !void {
|
||||
const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
fn findAllocatedBlock(self: *ElfFile, vaddr: u64) AllocatedBlock {
|
||||
todo();
|
||||
}
|
||||
|
||||
pub fn updateDecl(
|
||||
self: *ElfFile,
|
||||
module: ir.Module,
|
||||
typed_value: ir.TypedValue,
|
||||
decl_export_node: ?*std.LinkedList(std.builtin.ExportOptions).Node,
|
||||
hash: ir.Module.Decl.Hash,
|
||||
err_msg_allocator: *Allocator,
|
||||
) !?ir.ErrorMsg {
|
||||
var code = std.ArrayList(u8).init(self.allocator);
|
||||
defer code.deinit();
|
||||
|
||||
const err_msg = try codegen.generateSymbol(typed_value, module, &code, err_msg_allocator);
|
||||
if (err_msg != null) |em| return em;
|
||||
|
||||
const export_count = blk: {
|
||||
var export_node = decl_export_node;
|
||||
var i: usize = 0;
|
||||
while (export_node) |node| : (export_node = node.next) i += 1;
|
||||
break :blk i;
|
||||
};
|
||||
|
||||
// Find or create a symbol from the decl
|
||||
var valid_sym_index_len: usize = 0;
|
||||
const decl_symbol = blk: {
|
||||
if (self.decl_table.getValue(hash)) |decl_symbol| {
|
||||
valid_sym_index_len = decl_symbol.symbol_indexes.len;
|
||||
decl_symbol.symbol_indexes = try self.allocator.realloc(usize, export_count);
|
||||
|
||||
const existing_block = self.findAllocatedBlock(decl_symbol.vaddr);
|
||||
if (code.items.len > existing_block.size_capacity) {
|
||||
const new_block = self.allocateDeclSymbol(code.items.len);
|
||||
decl_symbol.vaddr = new_block.vaddr;
|
||||
decl_symbol.file_offset = new_block.file_offset;
|
||||
decl_symbol.size = code.items.len;
|
||||
}
|
||||
break :blk decl_symbol;
|
||||
} else {
|
||||
const new_block = self.allocateDeclSymbol(code.items.len);
|
||||
|
||||
const decl_symbol = try self.allocator.create(DeclSymbol);
|
||||
errdefer self.allocator.destroy(decl_symbol);
|
||||
|
||||
decl_symbol.* = .{
|
||||
.symbol_indexes = try self.allocator.alloc(usize, export_count),
|
||||
.vaddr = new_block.vaddr,
|
||||
.file_offset = new_block.file_offset,
|
||||
.size = code.items.len,
|
||||
};
|
||||
errdefer self.allocator.free(decl_symbol.symbol_indexes);
|
||||
|
||||
try self.decl_table.put(hash, decl_symbol);
|
||||
break :blk decl_symbol;
|
||||
}
|
||||
};
|
||||
|
||||
// Allocate new symbols.
|
||||
{
|
||||
var i: usize = valid_sym_index_len;
|
||||
const old_len = self.symbols.items.len;
|
||||
try self.symbols.resize(old_len + (decl_symbol.symbol_indexes.len - i));
|
||||
while (i < decl_symbol.symbol_indexes) : (i += 1) {
|
||||
decl_symbol.symbol_indexes[i] = old_len + i;
|
||||
}
|
||||
}
|
||||
|
||||
var export_node = decl_export_node;
|
||||
var export_index: usize = 0;
|
||||
while (export_node) |node| : ({
|
||||
export_node = node.next;
|
||||
export_index += 1;
|
||||
}) {
|
||||
if (node.data.section) |section_name| {
|
||||
if (!mem.eql(u8, section_name, ".text")) {
|
||||
try errors.ensureCapacity(errors.items.len + 1);
|
||||
errors.appendAssumeCapacity(.{
|
||||
.byte_offset = 0,
|
||||
.msg = try std.fmt.allocPrint(errors.allocator, "Unimplemented: ExportOptions.section", .{}),
|
||||
});
|
||||
}
|
||||
}
|
||||
const stb_bits = switch (node.data.linkage) {
|
||||
.Internal => elf.STB_LOCAL,
|
||||
.Strong => blk: {
|
||||
if (mem.eql(u8, node.data.name, "_start")) {
|
||||
self.entry_addr = decl_symbol.vaddr;
|
||||
}
|
||||
break :blk elf.STB_GLOBAL;
|
||||
},
|
||||
.Weak => elf.STB_WEAK,
|
||||
.LinkOnce => {
|
||||
try errors.ensureCapacity(errors.items.len + 1);
|
||||
errors.appendAssumeCapacity(.{
|
||||
.byte_offset = 0,
|
||||
.msg = try std.fmt.allocPrint(errors.allocator, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
|
||||
});
|
||||
},
|
||||
};
|
||||
const stt_bits = switch (typed_value.ty.zigTypeTag()) {
|
||||
.Fn => elf.STT_FUNC,
|
||||
else => elf.STT_OBJECT,
|
||||
};
|
||||
const sym_index = decl_symbol.symbol_indexes[export_index];
|
||||
const name = blk: {
|
||||
if (i < valid_sym_index_len) {
|
||||
const name_stroff = self.symbols.items[sym_index].st_name;
|
||||
const existing_name = self.getString(name_stroff);
|
||||
if (mem.eql(u8, existing_name, node.data.name)) {
|
||||
break :blk name_stroff;
|
||||
}
|
||||
}
|
||||
break :blk try self.makeString(node.data.name);
|
||||
};
|
||||
self.symbols.items[sym_index] = .{
|
||||
.st_name = name,
|
||||
.st_info = (stb_bits << 4) | stt_bits,
|
||||
.st_other = 0,
|
||||
.st_shndx = self.text_section_index.?,
|
||||
.st_value = decl_symbol.vaddr,
|
||||
.st_size = code.items.len,
|
||||
};
|
||||
}
|
||||
|
||||
try self.file.pwriteAll(code.items, decl_symbol.file_offset);
|
||||
}
|
||||
|
||||
fn writeProgHeader(self: *ElfFile, index: usize) !void {
|
||||
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
const offset = self.program_headers.items[index].p_offset;
|
||||
switch (self.module.target.cpu.arch.ptrBitWidth()) {
|
||||
switch (self.options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => {
|
||||
var phdr = [1]elf.Elf32_Phdr{progHeaderTo32(self.program_headers.items[index])};
|
||||
if (foreign_endian) {
|
||||
@ -663,10 +759,10 @@ const Update = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeSectHeader(self: *Update, index: usize) !void {
|
||||
const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
fn writeSectHeader(self: *ElfFile, index: usize) !void {
|
||||
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
const offset = self.sections.items[index].sh_offset;
|
||||
switch (self.module.target.cpu.arch.ptrBitWidth()) {
|
||||
switch (self.options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => {
|
||||
var shdr: [1]elf.Elf32_Shdr = undefined;
|
||||
shdr[0] = sectHeaderTo32(self.sections.items[index]);
|
||||
@ -686,13 +782,8 @@ const Update = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeSymbols(self: *Update) !void {
|
||||
const ptr_width: enum { p32, p64 } = switch (self.module.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedArchitecture,
|
||||
};
|
||||
const small_ptr = ptr_width == .p32;
|
||||
fn writeSymbols(self: *ElfFile) !void {
|
||||
const small_ptr = self.ptr_width == .p32;
|
||||
const syms_sect = &self.sections.items[self.symtab_section_index.?];
|
||||
const sym_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
|
||||
const sym_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
|
||||
@ -708,8 +799,8 @@ const Update = struct {
|
||||
syms_sect.sh_size = needed_size;
|
||||
syms_sect.sh_info = @intCast(u32, self.symbols.items.len);
|
||||
const allocator = self.symbols.allocator;
|
||||
const foreign_endian = self.module.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
switch (ptr_width) {
|
||||
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
const buf = try allocator.alloc(elf.Elf32_Sym, self.symbols.items.len);
|
||||
defer allocator.free(buf);
|
||||
@ -754,13 +845,13 @@ const Update = struct {
|
||||
|
||||
/// Truncates the existing file contents and overwrites the contents.
|
||||
/// Returns an error if `file` is not already open with +read +write +seek abilities.
|
||||
pub fn writeFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result {
|
||||
switch (module.output_mode) {
|
||||
pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
switch (module.object_format) {
|
||||
switch (options.object_format) {
|
||||
.unknown => unreachable, // TODO remove this tag from the enum
|
||||
.coff => return error.TODOImplementWritingCOFF,
|
||||
.elf => {},
|
||||
@ -768,38 +859,79 @@ pub fn writeFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Resul
|
||||
.wasm => return error.TODOImplementWritingWasmObjects,
|
||||
}
|
||||
|
||||
var update = Update{
|
||||
var self: ElfFile = .{
|
||||
.allocator = allocator,
|
||||
.file = file,
|
||||
.module = &module,
|
||||
.sections = std.ArrayList(elf.Elf64_Shdr).init(allocator),
|
||||
.shdr_table_offset = null,
|
||||
.program_headers = std.ArrayList(elf.Elf64_Phdr).init(allocator),
|
||||
.phdr_table_offset = null,
|
||||
.phdr_load_re_index = null,
|
||||
.entry_addr = null,
|
||||
.shstrtab = std.ArrayList(u8).init(allocator),
|
||||
.shstrtab_index = null,
|
||||
.text_section_index = null,
|
||||
.symtab_section_index = null,
|
||||
|
||||
.symbols = std.ArrayList(elf.Elf64_Sym).init(allocator),
|
||||
|
||||
.errors = std.ArrayList(ErrorMsg).init(allocator),
|
||||
.options = options,
|
||||
.ptr_width = switch (self.options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
},
|
||||
.symtab_dirty = true,
|
||||
.shdr_table_dirty = true,
|
||||
};
|
||||
defer update.deinit();
|
||||
errdefer self.deinit();
|
||||
|
||||
try update.perform();
|
||||
return Result{
|
||||
.errors = update.errors.toOwnedSlice(),
|
||||
};
|
||||
// Index 0 is always a null symbol.
|
||||
try self.symbols.append(allocator, .{
|
||||
.st_name = 0,
|
||||
.st_info = 0,
|
||||
.st_other = 0,
|
||||
.st_shndx = 0,
|
||||
.st_value = 0,
|
||||
.st_size = 0,
|
||||
});
|
||||
|
||||
// There must always be a null section in index 0
|
||||
try self.sections.append(allocator, .{
|
||||
.sh_name = 0,
|
||||
.sh_type = elf.SHT_NULL,
|
||||
.sh_flags = 0,
|
||||
.sh_addr = 0,
|
||||
.sh_offset = 0,
|
||||
.sh_size = 0,
|
||||
.sh_link = 0,
|
||||
.sh_info = 0,
|
||||
.sh_addralign = 0,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
|
||||
try self.populateMissingMetadata();
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
/// Returns error.IncrFailed if incremental update could not be performed.
|
||||
fn updateFileInner(allocator: *Allocator, module: ir.Module, file: fs.File) !Result {
|
||||
//var ehdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined;
|
||||
fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
|
||||
switch (options.output_mode) {
|
||||
.Exe => {},
|
||||
.Obj => {},
|
||||
.Lib => return error.IncrFailed,
|
||||
}
|
||||
switch (options.object_format) {
|
||||
.unknown => unreachable, // TODO remove this tag from the enum
|
||||
.coff => return error.IncrFailed,
|
||||
.elf => {},
|
||||
.macho => return error.IncrFailed,
|
||||
.wasm => return error.IncrFailed,
|
||||
}
|
||||
var self: ElfFile = .{
|
||||
.allocator = allocator,
|
||||
.file = file,
|
||||
.options = options,
|
||||
.ptr_width = switch (self.options.target.cpu.arch.ptrBitWidth()) {
|
||||
32 => .p32,
|
||||
64 => .p64,
|
||||
else => return error.UnsupportedELFArchitecture,
|
||||
},
|
||||
};
|
||||
errdefer self.deinit();
|
||||
|
||||
// TODO implement incremental linking
|
||||
// TODO implement reading the elf file
|
||||
return error.IncrFailed;
|
||||
//try self.populateMissingMetadata();
|
||||
//return self;
|
||||
}
|
||||
|
||||
/// Saturating multiplication
|
||||
@ -840,14 +972,14 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr {
|
||||
};
|
||||
}
|
||||
|
||||
fn determineMode(module: ir.Module) fs.File.Mode {
|
||||
fn determineMode(options: Options) fs.File.Mode {
|
||||
// On common systems with a 0o022 umask, 0o777 will still result in a file created
|
||||
// with 0o755 permissions, but it works appropriately if the system is configured
|
||||
// more leniently. As another data point, C's fopen seems to open files with the
|
||||
// 666 mode.
|
||||
const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777;
|
||||
switch (module.output_mode) {
|
||||
.Lib => return switch (module.link_mode) {
|
||||
switch (options.output_mode) {
|
||||
.Lib => return switch (options.link_mode) {
|
||||
.Dynamic => executable_mode,
|
||||
.Static => fs.File.default_mode,
|
||||
},
|
||||
|
@ -1,31 +0,0 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
const ArrayListSentineled = std.ArrayListSentineled;
|
||||
|
||||
pub const Package = struct {
|
||||
root_src_dir: ArrayListSentineled(u8, 0),
|
||||
root_src_path: ArrayListSentineled(u8, 0),
|
||||
|
||||
/// relative to root_src_dir
|
||||
table: Table,
|
||||
|
||||
pub const Table = std.StringHashMap(*Package);
|
||||
|
||||
/// makes internal copies of root_src_dir and root_src_path
|
||||
/// allocator should be an arena allocator because Package never frees anything
|
||||
pub fn create(allocator: *mem.Allocator, root_src_dir: []const u8, root_src_path: []const u8) !*Package {
|
||||
const ptr = try allocator.create(Package);
|
||||
ptr.* = Package{
|
||||
.root_src_dir = try ArrayListSentineled(u8, 0).init(allocator, root_src_dir),
|
||||
.root_src_path = try ArrayListSentineled(u8, 0).init(allocator, root_src_path),
|
||||
.table = Table.init(allocator),
|
||||
};
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn add(self: *Package, name: []const u8, package: *Package) !void {
|
||||
const entry = try self.table.put(try mem.dupe(self.table.allocator, u8, name), package);
|
||||
assert(entry == null);
|
||||
}
|
||||
};
|
@ -1,418 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Allocator = mem.Allocator;
|
||||
const Decl = @import("decl.zig").Decl;
|
||||
const Compilation = @import("compilation.zig").Compilation;
|
||||
const mem = std.mem;
|
||||
const ast = std.zig.ast;
|
||||
const Value = @import("value.zig").Value;
|
||||
const Type = @import("type.zig").Type;
|
||||
const ir = @import("ir.zig");
|
||||
const Span = @import("errmsg.zig").Span;
|
||||
const assert = std.debug.assert;
|
||||
const event = std.event;
|
||||
const llvm = @import("llvm.zig");
|
||||
|
||||
pub const Scope = struct {
|
||||
id: Id,
|
||||
parent: ?*Scope,
|
||||
ref_count: std.atomic.Int(usize),
|
||||
|
||||
/// Thread-safe
|
||||
pub fn ref(base: *Scope) void {
|
||||
_ = base.ref_count.incr();
|
||||
}
|
||||
|
||||
/// Thread-safe
|
||||
pub fn deref(base: *Scope, comp: *Compilation) void {
|
||||
if (base.ref_count.decr() == 1) {
|
||||
if (base.parent) |parent| parent.deref(comp);
|
||||
switch (base.id) {
|
||||
.Root => @fieldParentPtr(Root, "base", base).destroy(comp),
|
||||
.Decls => @fieldParentPtr(Decls, "base", base).destroy(comp),
|
||||
.Block => @fieldParentPtr(Block, "base", base).destroy(comp),
|
||||
.FnDef => @fieldParentPtr(FnDef, "base", base).destroy(comp),
|
||||
.CompTime => @fieldParentPtr(CompTime, "base", base).destroy(comp),
|
||||
.Defer => @fieldParentPtr(Defer, "base", base).destroy(comp),
|
||||
.DeferExpr => @fieldParentPtr(DeferExpr, "base", base).destroy(comp),
|
||||
.Var => @fieldParentPtr(Var, "base", base).destroy(comp),
|
||||
.AstTree => @fieldParentPtr(AstTree, "base", base).destroy(comp),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn findRoot(base: *Scope) *Root {
|
||||
var scope = base;
|
||||
while (scope.parent) |parent| {
|
||||
scope = parent;
|
||||
}
|
||||
assert(scope.id == .Root);
|
||||
return @fieldParentPtr(Root, "base", scope);
|
||||
}
|
||||
|
||||
pub fn findFnDef(base: *Scope) ?*FnDef {
|
||||
var scope = base;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.FnDef => return @fieldParentPtr(FnDef, "base", scope),
|
||||
.Root, .Decls => return null,
|
||||
|
||||
.Block,
|
||||
.Defer,
|
||||
.DeferExpr,
|
||||
.CompTime,
|
||||
.Var,
|
||||
=> scope = scope.parent.?,
|
||||
|
||||
.AstTree => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn findDeferExpr(base: *Scope) ?*DeferExpr {
|
||||
var scope = base;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.DeferExpr => return @fieldParentPtr(DeferExpr, "base", scope),
|
||||
|
||||
.FnDef,
|
||||
.Decls,
|
||||
=> return null,
|
||||
|
||||
.Block,
|
||||
.Defer,
|
||||
.CompTime,
|
||||
.Root,
|
||||
.Var,
|
||||
=> scope = scope.parent orelse return null,
|
||||
|
||||
.AstTree => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn init(base: *Scope, id: Id, parent: *Scope) void {
|
||||
base.* = Scope{
|
||||
.id = id,
|
||||
.parent = parent,
|
||||
.ref_count = std.atomic.Int(usize).init(1),
|
||||
};
|
||||
parent.ref();
|
||||
}
|
||||
|
||||
pub const Id = enum {
|
||||
Root,
|
||||
AstTree,
|
||||
Decls,
|
||||
Block,
|
||||
FnDef,
|
||||
CompTime,
|
||||
Defer,
|
||||
DeferExpr,
|
||||
Var,
|
||||
};
|
||||
|
||||
pub const Root = struct {
|
||||
base: Scope,
|
||||
realpath: []const u8,
|
||||
decls: *Decls,
|
||||
|
||||
/// Creates a Root scope with 1 reference
|
||||
/// Takes ownership of realpath
|
||||
pub fn create(comp: *Compilation, realpath: []u8) !*Root {
|
||||
const self = try comp.gpa().create(Root);
|
||||
self.* = Root{
|
||||
.base = Scope{
|
||||
.id = .Root,
|
||||
.parent = null,
|
||||
.ref_count = std.atomic.Int(usize).init(1),
|
||||
},
|
||||
.realpath = realpath,
|
||||
.decls = undefined,
|
||||
};
|
||||
errdefer comp.gpa().destroy(self);
|
||||
self.decls = try Decls.create(comp, &self.base);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Root, comp: *Compilation) void {
|
||||
// TODO comp.fs_watch.removeFile(self.realpath);
|
||||
self.decls.base.deref(comp);
|
||||
comp.gpa().free(self.realpath);
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const AstTree = struct {
|
||||
base: Scope,
|
||||
tree: *ast.Tree,
|
||||
|
||||
/// Creates a scope with 1 reference
|
||||
/// Takes ownership of tree, will deinit and destroy when done.
|
||||
pub fn create(comp: *Compilation, tree: *ast.Tree, root_scope: *Root) !*AstTree {
|
||||
const self = try comp.gpa().create(AstTree);
|
||||
self.* = AstTree{
|
||||
.base = undefined,
|
||||
.tree = tree,
|
||||
};
|
||||
self.base.init(.AstTree, &root_scope.base);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *AstTree, comp: *Compilation) void {
|
||||
comp.gpa().free(self.tree.source);
|
||||
self.tree.deinit();
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
|
||||
pub fn root(self: *AstTree) *Root {
|
||||
return self.base.findRoot();
|
||||
}
|
||||
};
|
||||
|
||||
pub const Decls = struct {
|
||||
base: Scope,
|
||||
|
||||
/// This table remains Write Locked when the names are incomplete or possibly outdated.
|
||||
/// So if a reader manages to grab a lock, it can be sure that the set of names is complete
|
||||
/// and correct.
|
||||
table: event.RwLocked(Decl.Table),
|
||||
|
||||
/// Creates a Decls scope with 1 reference
|
||||
pub fn create(comp: *Compilation, parent: *Scope) !*Decls {
|
||||
const self = try comp.gpa().create(Decls);
|
||||
self.* = Decls{
|
||||
.base = undefined,
|
||||
.table = event.RwLocked(Decl.Table).init(Decl.Table.init(comp.gpa())),
|
||||
};
|
||||
self.base.init(.Decls, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Decls, comp: *Compilation) void {
|
||||
self.table.deinit();
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Block = struct {
|
||||
base: Scope,
|
||||
incoming_values: std.ArrayList(*ir.Inst),
|
||||
incoming_blocks: std.ArrayList(*ir.BasicBlock),
|
||||
end_block: *ir.BasicBlock,
|
||||
is_comptime: *ir.Inst,
|
||||
|
||||
safety: Safety,
|
||||
|
||||
const Safety = union(enum) {
|
||||
Auto,
|
||||
Manual: Manual,
|
||||
|
||||
const Manual = struct {
|
||||
/// the source span that disabled the safety value
|
||||
span: Span,
|
||||
|
||||
/// whether safety is enabled
|
||||
enabled: bool,
|
||||
};
|
||||
|
||||
fn get(self: Safety, comp: *Compilation) bool {
|
||||
return switch (self) {
|
||||
.Auto => switch (comp.build_mode) {
|
||||
.Debug,
|
||||
.ReleaseSafe,
|
||||
=> true,
|
||||
.ReleaseFast,
|
||||
.ReleaseSmall,
|
||||
=> false,
|
||||
},
|
||||
.Manual => |man| man.enabled,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// Creates a Block scope with 1 reference
|
||||
pub fn create(comp: *Compilation, parent: *Scope) !*Block {
|
||||
const self = try comp.gpa().create(Block);
|
||||
self.* = Block{
|
||||
.base = undefined,
|
||||
.incoming_values = undefined,
|
||||
.incoming_blocks = undefined,
|
||||
.end_block = undefined,
|
||||
.is_comptime = undefined,
|
||||
.safety = Safety.Auto,
|
||||
};
|
||||
self.base.init(.Block, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Block, comp: *Compilation) void {
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const FnDef = struct {
|
||||
base: Scope,
|
||||
|
||||
/// This reference is not counted so that the scope can get destroyed with the function
|
||||
fn_val: ?*Value.Fn,
|
||||
|
||||
/// Creates a FnDef scope with 1 reference
|
||||
/// Must set the fn_val later
|
||||
pub fn create(comp: *Compilation, parent: *Scope) !*FnDef {
|
||||
const self = try comp.gpa().create(FnDef);
|
||||
self.* = FnDef{
|
||||
.base = undefined,
|
||||
.fn_val = null,
|
||||
};
|
||||
self.base.init(.FnDef, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *FnDef, comp: *Compilation) void {
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const CompTime = struct {
|
||||
base: Scope,
|
||||
|
||||
/// Creates a CompTime scope with 1 reference
|
||||
pub fn create(comp: *Compilation, parent: *Scope) !*CompTime {
|
||||
const self = try comp.gpa().create(CompTime);
|
||||
self.* = CompTime{ .base = undefined };
|
||||
self.base.init(.CompTime, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *CompTime, comp: *Compilation) void {
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Defer = struct {
|
||||
base: Scope,
|
||||
defer_expr_scope: *DeferExpr,
|
||||
kind: Kind,
|
||||
|
||||
pub const Kind = enum {
|
||||
ScopeExit,
|
||||
ErrorExit,
|
||||
};
|
||||
|
||||
/// Creates a Defer scope with 1 reference
|
||||
pub fn create(
|
||||
comp: *Compilation,
|
||||
parent: *Scope,
|
||||
kind: Kind,
|
||||
defer_expr_scope: *DeferExpr,
|
||||
) !*Defer {
|
||||
const self = try comp.gpa().create(Defer);
|
||||
self.* = Defer{
|
||||
.base = undefined,
|
||||
.defer_expr_scope = defer_expr_scope,
|
||||
.kind = kind,
|
||||
};
|
||||
self.base.init(.Defer, parent);
|
||||
defer_expr_scope.base.ref();
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Defer, comp: *Compilation) void {
|
||||
self.defer_expr_scope.base.deref(comp);
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const DeferExpr = struct {
|
||||
base: Scope,
|
||||
expr_node: *ast.Node,
|
||||
reported_err: bool,
|
||||
|
||||
/// Creates a DeferExpr scope with 1 reference
|
||||
pub fn create(comp: *Compilation, parent: *Scope, expr_node: *ast.Node) !*DeferExpr {
|
||||
const self = try comp.gpa().create(DeferExpr);
|
||||
self.* = DeferExpr{
|
||||
.base = undefined,
|
||||
.expr_node = expr_node,
|
||||
.reported_err = false,
|
||||
};
|
||||
self.base.init(.DeferExpr, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *DeferExpr, comp: *Compilation) void {
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Var = struct {
|
||||
base: Scope,
|
||||
name: []const u8,
|
||||
src_node: *ast.Node,
|
||||
data: Data,
|
||||
|
||||
pub const Data = union(enum) {
|
||||
Param: Param,
|
||||
Const: *Value,
|
||||
};
|
||||
|
||||
pub const Param = struct {
|
||||
index: usize,
|
||||
typ: *Type,
|
||||
llvm_value: *llvm.Value,
|
||||
};
|
||||
|
||||
pub fn createParam(
|
||||
comp: *Compilation,
|
||||
parent: *Scope,
|
||||
name: []const u8,
|
||||
src_node: *ast.Node,
|
||||
param_index: usize,
|
||||
param_type: *Type,
|
||||
) !*Var {
|
||||
const self = try create(comp, parent, name, src_node);
|
||||
self.data = Data{
|
||||
.Param = Param{
|
||||
.index = param_index,
|
||||
.typ = param_type,
|
||||
.llvm_value = undefined,
|
||||
},
|
||||
};
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn createConst(
|
||||
comp: *Compilation,
|
||||
parent: *Scope,
|
||||
name: []const u8,
|
||||
src_node: *ast.Node,
|
||||
value: *Value,
|
||||
) !*Var {
|
||||
const self = try create(comp, parent, name, src_node);
|
||||
self.data = Data{ .Const = value };
|
||||
value.ref();
|
||||
return self;
|
||||
}
|
||||
|
||||
fn create(comp: *Compilation, parent: *Scope, name: []const u8, src_node: *ast.Node) !*Var {
|
||||
const self = try comp.gpa().create(Var);
|
||||
self.* = Var{
|
||||
.base = undefined,
|
||||
.name = name,
|
||||
.src_node = src_node,
|
||||
.data = undefined,
|
||||
};
|
||||
self.base.init(.Var, parent);
|
||||
return self;
|
||||
}
|
||||
|
||||
pub fn destroy(self: *Var, comp: *Compilation) void {
|
||||
switch (self.data) {
|
||||
.Param => {},
|
||||
.Const => |value| value.deref(comp),
|
||||
}
|
||||
comp.gpa().destroy(self);
|
||||
}
|
||||
};
|
||||
};
|
@ -3,15 +3,14 @@ const link = @import("link.zig");
|
||||
const ir = @import("ir.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
var global_ctx: TestContext = undefined;
|
||||
|
||||
test "self-hosted" {
|
||||
try global_ctx.init();
|
||||
defer global_ctx.deinit();
|
||||
var ctx: TestContext = undefined;
|
||||
try ctx.init();
|
||||
defer ctx.deinit();
|
||||
|
||||
try @import("stage2_tests").addCases(&global_ctx);
|
||||
try @import("stage2_tests").addCases(&ctx);
|
||||
|
||||
try global_ctx.run();
|
||||
try ctx.run();
|
||||
}
|
||||
|
||||
pub const TestContext = struct {
|
||||
|
@ -52,6 +52,7 @@ pub const Type = extern union {
|
||||
.comptime_float => return .ComptimeFloat,
|
||||
.noreturn => return .NoReturn,
|
||||
|
||||
.fn_noreturn_no_args => return .Fn,
|
||||
.fn_naked_noreturn_no_args => return .Fn,
|
||||
.fn_ccc_void_no_args => return .Fn,
|
||||
|
||||
@ -184,6 +185,7 @@ pub const Type = extern union {
|
||||
=> return out_stream.writeAll(@tagName(t)),
|
||||
|
||||
.const_slice_u8 => return out_stream.writeAll("[]const u8"),
|
||||
.fn_noreturn_no_args => return out_stream.writeAll("fn() noreturn"),
|
||||
.fn_naked_noreturn_no_args => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
|
||||
.fn_ccc_void_no_args => return out_stream.writeAll("fn() callconv(.C) void"),
|
||||
.single_const_pointer_to_comptime_int => return out_stream.writeAll("*const comptime_int"),
|
||||
@ -244,6 +246,7 @@ pub const Type = extern union {
|
||||
.comptime_int => return Value.initTag(.comptime_int_type),
|
||||
.comptime_float => return Value.initTag(.comptime_float_type),
|
||||
.noreturn => return Value.initTag(.noreturn_type),
|
||||
.fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type),
|
||||
.fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type),
|
||||
.fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type),
|
||||
.single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type),
|
||||
@ -286,6 +289,7 @@ pub const Type = extern union {
|
||||
.array,
|
||||
.array_u8_sentinel_0,
|
||||
.const_slice_u8,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.int_unsigned,
|
||||
@ -329,6 +333,7 @@ pub const Type = extern union {
|
||||
.array_u8_sentinel_0,
|
||||
.single_const_pointer,
|
||||
.single_const_pointer_to_comptime_int,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.int_unsigned,
|
||||
@ -369,6 +374,7 @@ pub const Type = extern union {
|
||||
.noreturn,
|
||||
.array,
|
||||
.array_u8_sentinel_0,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.int_unsigned,
|
||||
@ -410,6 +416,7 @@ pub const Type = extern union {
|
||||
.comptime_int,
|
||||
.comptime_float,
|
||||
.noreturn,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.int_unsigned,
|
||||
@ -451,6 +458,7 @@ pub const Type = extern union {
|
||||
.comptime_int,
|
||||
.comptime_float,
|
||||
.noreturn,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.single_const_pointer,
|
||||
@ -481,6 +489,7 @@ pub const Type = extern union {
|
||||
.comptime_int,
|
||||
.comptime_float,
|
||||
.noreturn,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.array,
|
||||
@ -524,6 +533,7 @@ pub const Type = extern union {
|
||||
.comptime_int,
|
||||
.comptime_float,
|
||||
.noreturn,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.array,
|
||||
@ -579,6 +589,7 @@ pub const Type = extern union {
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnParamLen(self: Type) usize {
|
||||
return switch (self.tag()) {
|
||||
.fn_noreturn_no_args => 0,
|
||||
.fn_naked_noreturn_no_args => 0,
|
||||
.fn_ccc_void_no_args => 0,
|
||||
|
||||
@ -622,6 +633,7 @@ pub const Type = extern union {
|
||||
/// given by `fnParamLen`.
|
||||
pub fn fnParamTypes(self: Type, types: []Type) void {
|
||||
switch (self.tag()) {
|
||||
.fn_noreturn_no_args => return,
|
||||
.fn_naked_noreturn_no_args => return,
|
||||
.fn_ccc_void_no_args => return,
|
||||
|
||||
@ -664,6 +676,7 @@ pub const Type = extern union {
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnReturnType(self: Type) Type {
|
||||
return switch (self.tag()) {
|
||||
.fn_noreturn_no_args => Type.initTag(.noreturn),
|
||||
.fn_naked_noreturn_no_args => Type.initTag(.noreturn),
|
||||
.fn_ccc_void_no_args => Type.initTag(.void),
|
||||
|
||||
@ -706,6 +719,7 @@ pub const Type = extern union {
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention {
|
||||
return switch (self.tag()) {
|
||||
.fn_noreturn_no_args => .Unspecified,
|
||||
.fn_naked_noreturn_no_args => .Naked,
|
||||
.fn_ccc_void_no_args => .C,
|
||||
|
||||
@ -745,6 +759,49 @@ pub const Type = extern union {
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the type is a function.
|
||||
pub fn fnIsVarArgs(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.fn_noreturn_no_args => false,
|
||||
.fn_naked_noreturn_no_args => false,
|
||||
.fn_ccc_void_no_args => false,
|
||||
|
||||
.f16,
|
||||
.f32,
|
||||
.f64,
|
||||
.f128,
|
||||
.c_longdouble,
|
||||
.c_void,
|
||||
.bool,
|
||||
.void,
|
||||
.type,
|
||||
.anyerror,
|
||||
.comptime_int,
|
||||
.comptime_float,
|
||||
.noreturn,
|
||||
.array,
|
||||
.single_const_pointer,
|
||||
.single_const_pointer_to_comptime_int,
|
||||
.array_u8_sentinel_0,
|
||||
.const_slice_u8,
|
||||
.u8,
|
||||
.i8,
|
||||
.usize,
|
||||
.isize,
|
||||
.c_short,
|
||||
.c_ushort,
|
||||
.c_int,
|
||||
.c_uint,
|
||||
.c_long,
|
||||
.c_ulong,
|
||||
.c_longlong,
|
||||
.c_ulonglong,
|
||||
.int_unsigned,
|
||||
.int_signed,
|
||||
=> unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isNumeric(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.f16,
|
||||
@ -776,6 +833,7 @@ pub const Type = extern union {
|
||||
.type,
|
||||
.anyerror,
|
||||
.noreturn,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.array,
|
||||
@ -812,6 +870,7 @@ pub const Type = extern union {
|
||||
.bool,
|
||||
.type,
|
||||
.anyerror,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.single_const_pointer_to_comptime_int,
|
||||
@ -865,6 +924,7 @@ pub const Type = extern union {
|
||||
.bool,
|
||||
.type,
|
||||
.anyerror,
|
||||
.fn_noreturn_no_args,
|
||||
.fn_naked_noreturn_no_args,
|
||||
.fn_ccc_void_no_args,
|
||||
.single_const_pointer_to_comptime_int,
|
||||
@ -902,11 +962,11 @@ pub const Type = extern union {
|
||||
c_longlong,
|
||||
c_ulonglong,
|
||||
c_longdouble,
|
||||
c_void,
|
||||
f16,
|
||||
f32,
|
||||
f64,
|
||||
f128,
|
||||
c_void,
|
||||
bool,
|
||||
void,
|
||||
type,
|
||||
@ -914,6 +974,7 @@ pub const Type = extern union {
|
||||
comptime_int,
|
||||
comptime_float,
|
||||
noreturn,
|
||||
fn_noreturn_no_args,
|
||||
fn_naked_noreturn_no_args,
|
||||
fn_ccc_void_no_args,
|
||||
single_const_pointer_to_comptime_int,
|
||||
|
@ -1,47 +0,0 @@
|
||||
const std = @import("std");
|
||||
const Target = std.Target;
|
||||
const llvm = @import("llvm.zig");
|
||||
|
||||
pub fn getDarwinArchString(self: Target) [:0]const u8 {
|
||||
switch (self.cpu.arch) {
|
||||
.aarch64 => return "arm64",
|
||||
.thumb,
|
||||
.arm,
|
||||
=> return "arm",
|
||||
.powerpc => return "ppc",
|
||||
.powerpc64 => return "ppc64",
|
||||
.powerpc64le => return "ppc64le",
|
||||
// @tagName should be able to return sentinel terminated slice
|
||||
else => @panic("TODO https://github.com/ziglang/zig/issues/3779"), //return @tagName(arch),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn llvmTargetFromTriple(triple: [:0]const u8) !*llvm.Target {
|
||||
var result: *llvm.Target = undefined;
|
||||
var err_msg: [*:0]u8 = undefined;
|
||||
if (llvm.GetTargetFromTriple(triple, &result, &err_msg) != 0) {
|
||||
std.debug.warn("triple: {s} error: {s}\n", .{ triple, err_msg });
|
||||
return error.UnsupportedTarget;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
pub fn initializeAllTargets() void {
|
||||
llvm.InitializeAllTargets();
|
||||
llvm.InitializeAllTargetInfos();
|
||||
llvm.InitializeAllTargetMCs();
|
||||
llvm.InitializeAllAsmPrinters();
|
||||
llvm.InitializeAllAsmParsers();
|
||||
}
|
||||
|
||||
pub fn getLLVMTriple(allocator: *std.mem.Allocator, target: std.Target) ![:0]u8 {
|
||||
var result = try std.ArrayListSentineled(u8, 0).initSize(allocator, 0);
|
||||
defer result.deinit();
|
||||
|
||||
try result.outStream().print(
|
||||
"{}-unknown-{}-{}",
|
||||
.{ @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi) },
|
||||
);
|
||||
|
||||
return result.toOwnedSlice();
|
||||
}
|
@ -6,6 +6,7 @@ const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const Target = std.Target;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const ir = @import("ir.zig");
|
||||
|
||||
/// This is the raw data, with no bookkeeping, no memory awareness,
|
||||
/// no de-duplication, and no type system awareness.
|
||||
@ -45,6 +46,7 @@ pub const Value = extern union {
|
||||
comptime_int_type,
|
||||
comptime_float_type,
|
||||
noreturn_type,
|
||||
fn_noreturn_no_args_type,
|
||||
fn_naked_noreturn_no_args_type,
|
||||
fn_ccc_void_no_args_type,
|
||||
single_const_pointer_to_comptime_int_type,
|
||||
@ -64,8 +66,8 @@ pub const Value = extern union {
|
||||
int_big_positive,
|
||||
int_big_negative,
|
||||
function,
|
||||
ref,
|
||||
ref_val,
|
||||
decl_ref,
|
||||
elem_ptr,
|
||||
bytes,
|
||||
repeated, // the value is a value repeated some number of times
|
||||
|
||||
@ -136,6 +138,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type => return out_stream.writeAll("comptime_int"),
|
||||
.comptime_float_type => return out_stream.writeAll("comptime_float"),
|
||||
.noreturn_type => return out_stream.writeAll("noreturn"),
|
||||
.fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
|
||||
.fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
|
||||
.fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
|
||||
.single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"),
|
||||
@ -153,11 +156,11 @@ pub const Value = extern union {
|
||||
.int_big_positive => return out_stream.print("{}", .{val.cast(Payload.IntBigPositive).?.asBigInt()}),
|
||||
.int_big_negative => return out_stream.print("{}", .{val.cast(Payload.IntBigNegative).?.asBigInt()}),
|
||||
.function => return out_stream.writeAll("(function)"),
|
||||
.ref => return out_stream.writeAll("(ref)"),
|
||||
.ref_val => {
|
||||
try out_stream.writeAll("*const ");
|
||||
val = val.cast(Payload.RefVal).?.val;
|
||||
continue;
|
||||
.decl_ref => return out_stream.writeAll("(decl ref)"),
|
||||
.elem_ptr => {
|
||||
const elem_ptr = val.cast(Payload.Int_u64).?;
|
||||
try out_stream.print("&[{}] ", .{elem_ptr.index});
|
||||
val = elem_ptr.array_ptr;
|
||||
},
|
||||
.bytes => return std.zig.renderStringLiteral(self.cast(Payload.Bytes).?.data, out_stream),
|
||||
.repeated => {
|
||||
@ -181,31 +184,32 @@ pub const Value = extern union {
|
||||
return switch (self.tag()) {
|
||||
.ty => self.cast(Payload.Ty).?.ty,
|
||||
|
||||
.u8_type => Type.initTag(.@"u8"),
|
||||
.i8_type => Type.initTag(.@"i8"),
|
||||
.isize_type => Type.initTag(.@"isize"),
|
||||
.usize_type => Type.initTag(.@"usize"),
|
||||
.c_short_type => Type.initTag(.@"c_short"),
|
||||
.c_ushort_type => Type.initTag(.@"c_ushort"),
|
||||
.c_int_type => Type.initTag(.@"c_int"),
|
||||
.c_uint_type => Type.initTag(.@"c_uint"),
|
||||
.c_long_type => Type.initTag(.@"c_long"),
|
||||
.c_ulong_type => Type.initTag(.@"c_ulong"),
|
||||
.c_longlong_type => Type.initTag(.@"c_longlong"),
|
||||
.c_ulonglong_type => Type.initTag(.@"c_ulonglong"),
|
||||
.c_longdouble_type => Type.initTag(.@"c_longdouble"),
|
||||
.f16_type => Type.initTag(.@"f16"),
|
||||
.f32_type => Type.initTag(.@"f32"),
|
||||
.f64_type => Type.initTag(.@"f64"),
|
||||
.f128_type => Type.initTag(.@"f128"),
|
||||
.c_void_type => Type.initTag(.@"c_void"),
|
||||
.bool_type => Type.initTag(.@"bool"),
|
||||
.void_type => Type.initTag(.@"void"),
|
||||
.type_type => Type.initTag(.@"type"),
|
||||
.anyerror_type => Type.initTag(.@"anyerror"),
|
||||
.comptime_int_type => Type.initTag(.@"comptime_int"),
|
||||
.comptime_float_type => Type.initTag(.@"comptime_float"),
|
||||
.noreturn_type => Type.initTag(.@"noreturn"),
|
||||
.u8_type => Type.initTag(.u8),
|
||||
.i8_type => Type.initTag(.i8),
|
||||
.isize_type => Type.initTag(.isize),
|
||||
.usize_type => Type.initTag(.usize),
|
||||
.c_short_type => Type.initTag(.c_short),
|
||||
.c_ushort_type => Type.initTag(.c_ushort),
|
||||
.c_int_type => Type.initTag(.c_int),
|
||||
.c_uint_type => Type.initTag(.c_uint),
|
||||
.c_long_type => Type.initTag(.c_long),
|
||||
.c_ulong_type => Type.initTag(.c_ulong),
|
||||
.c_longlong_type => Type.initTag(.c_longlong),
|
||||
.c_ulonglong_type => Type.initTag(.c_ulonglong),
|
||||
.c_longdouble_type => Type.initTag(.c_longdouble),
|
||||
.f16_type => Type.initTag(.f16),
|
||||
.f32_type => Type.initTag(.f32),
|
||||
.f64_type => Type.initTag(.f64),
|
||||
.f128_type => Type.initTag(.f128),
|
||||
.c_void_type => Type.initTag(.c_void),
|
||||
.bool_type => Type.initTag(.bool),
|
||||
.void_type => Type.initTag(.void),
|
||||
.type_type => Type.initTag(.type),
|
||||
.anyerror_type => Type.initTag(.anyerror),
|
||||
.comptime_int_type => Type.initTag(.comptime_int),
|
||||
.comptime_float_type => Type.initTag(.comptime_float),
|
||||
.noreturn_type => Type.initTag(.noreturn),
|
||||
.fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args),
|
||||
.fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args),
|
||||
.fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args),
|
||||
.single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int),
|
||||
@ -222,8 +226,8 @@ pub const Value = extern union {
|
||||
.int_big_positive,
|
||||
.int_big_negative,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.repeated,
|
||||
=> unreachable,
|
||||
@ -259,6 +263,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -267,8 +272,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.undef,
|
||||
.repeated,
|
||||
@ -314,6 +319,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -322,8 +328,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.undef,
|
||||
.repeated,
|
||||
@ -370,6 +376,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -378,8 +385,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.undef,
|
||||
.repeated,
|
||||
@ -431,6 +438,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -439,8 +447,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.repeated,
|
||||
=> unreachable,
|
||||
@ -521,6 +529,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -529,8 +538,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.repeated,
|
||||
.undef,
|
||||
@ -573,6 +582,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -581,8 +591,8 @@ pub const Value = extern union {
|
||||
.bool_false,
|
||||
.null_value,
|
||||
.function,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.repeated,
|
||||
.undef,
|
||||
@ -636,7 +646,7 @@ pub const Value = extern union {
|
||||
}
|
||||
|
||||
/// Asserts the value is a pointer and dereferences it.
|
||||
pub fn pointerDeref(self: Value) Value {
|
||||
pub fn pointerDeref(self: Value, module: *ir.Module) !Value {
|
||||
return switch (self.tag()) {
|
||||
.ty,
|
||||
.u8_type,
|
||||
@ -664,6 +674,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -683,14 +694,21 @@ pub const Value = extern union {
|
||||
=> unreachable,
|
||||
|
||||
.the_one_possible_value => Value.initTag(.the_one_possible_value),
|
||||
.ref => self.cast(Payload.Ref).?.cell.contents,
|
||||
.ref_val => self.cast(Payload.RefVal).?.val,
|
||||
.decl_ref => {
|
||||
const index = self.cast(Payload.DeclRef).?.index;
|
||||
return module.getDeclValue(index);
|
||||
},
|
||||
.elem_ptr => {
|
||||
const elem_ptr = self.cast(ElemPtr).?;
|
||||
const array_val = try elem_ptr.array_ptr.pointerDeref(module);
|
||||
return self.elemValue(array_val, elem_ptr.index);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the value is a single-item pointer to an array, or an array,
|
||||
/// or an unknown-length pointer, and returns the element value at the index.
|
||||
pub fn elemValueAt(self: Value, allocator: *Allocator, index: usize) Allocator.Error!Value {
|
||||
pub fn elemValue(self: Value, index: usize) Value {
|
||||
switch (self.tag()) {
|
||||
.ty,
|
||||
.u8_type,
|
||||
@ -718,6 +736,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -733,13 +752,12 @@ pub const Value = extern union {
|
||||
.int_big_positive,
|
||||
.int_big_negative,
|
||||
.undef,
|
||||
.elem_ptr,
|
||||
.decl_ref,
|
||||
=> unreachable,
|
||||
|
||||
.ref => @panic("TODO figure out how MemoryCell works"),
|
||||
.ref_val => @panic("TODO figure out how MemoryCell works"),
|
||||
|
||||
.bytes => {
|
||||
const int_payload = try allocator.create(Value.Payload.Int_u64);
|
||||
const int_payload = try allocator.create(Payload.Int_u64);
|
||||
int_payload.* = .{ .int = self.cast(Payload.Bytes).?.data[index] };
|
||||
return Value.initPayload(&int_payload.base);
|
||||
},
|
||||
@ -749,6 +767,17 @@ pub const Value = extern union {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a pointer to the element value at the index.
|
||||
pub fn elemPtr(self: Value, allocator: *Allocator, index: usize) !Value {
|
||||
const payload = try allocator.create(Payload.ElemPtr);
|
||||
if (self.cast(Payload.ElemPtr)) |elem_ptr| {
|
||||
payload.* = .{ .array_ptr = elem_ptr.array_ptr, .index = elem_ptr.index + index };
|
||||
} else {
|
||||
payload.* = .{ .array_ptr = self, .index = index };
|
||||
}
|
||||
return Value.initPayload(&payload.base);
|
||||
}
|
||||
|
||||
pub fn isUndef(self: Value) bool {
|
||||
return self.tag() == .undef;
|
||||
}
|
||||
@ -783,6 +812,7 @@ pub const Value = extern union {
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
.noreturn_type,
|
||||
.fn_noreturn_no_args_type,
|
||||
.fn_naked_noreturn_no_args_type,
|
||||
.fn_ccc_void_no_args_type,
|
||||
.single_const_pointer_to_comptime_int_type,
|
||||
@ -796,8 +826,8 @@ pub const Value = extern union {
|
||||
.int_i64,
|
||||
.int_big_positive,
|
||||
.int_big_negative,
|
||||
.ref,
|
||||
.ref_val,
|
||||
.decl_ref,
|
||||
.elem_ptr,
|
||||
.bytes,
|
||||
.repeated,
|
||||
=> false,
|
||||
@ -841,8 +871,7 @@ pub const Value = extern union {
|
||||
|
||||
pub const Function = struct {
|
||||
base: Payload = Payload{ .tag = .function },
|
||||
/// Index into the `fns` array of the `ir.Module`
|
||||
index: usize,
|
||||
func: *ir.Module.Fn,
|
||||
};
|
||||
|
||||
pub const ArraySentinel0_u8_Type = struct {
|
||||
@ -855,14 +884,17 @@ pub const Value = extern union {
|
||||
elem_type: *Type,
|
||||
};
|
||||
|
||||
pub const Ref = struct {
|
||||
base: Payload = Payload{ .tag = .ref },
|
||||
cell: *MemoryCell,
|
||||
/// Represents a pointer to a decl, not the value of the decl.
|
||||
pub const DeclRef = struct {
|
||||
base: Payload = Payload{ .tag = .decl_ref },
|
||||
/// Index into the Module's decls list
|
||||
index: usize,
|
||||
};
|
||||
|
||||
pub const RefVal = struct {
|
||||
base: Payload = Payload{ .tag = .ref_val },
|
||||
val: Value,
|
||||
pub const ElemPtr = struct {
|
||||
base: Payload = Payload{ .tag = .elem_ptr },
|
||||
array_ptr: Value,
|
||||
index: usize,
|
||||
};
|
||||
|
||||
pub const Bytes = struct {
|
||||
@ -890,29 +922,3 @@ pub const Value = extern union {
|
||||
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
|
||||
};
|
||||
};
|
||||
|
||||
/// This is the heart of resource management of the Zig compiler. The Zig compiler uses
|
||||
/// stop-the-world mark-and-sweep garbage collection during compilation to manage the resources
|
||||
/// associated with evaluating compile-time code and semantic analysis. Each `MemoryCell` represents
|
||||
/// a root.
|
||||
pub const MemoryCell = struct {
|
||||
parent: Parent,
|
||||
contents: Value,
|
||||
|
||||
pub const Parent = union(enum) {
|
||||
none,
|
||||
struct_field: struct {
|
||||
struct_base: *MemoryCell,
|
||||
field_index: usize,
|
||||
},
|
||||
array_elem: struct {
|
||||
array_base: *MemoryCell,
|
||||
elem_index: usize,
|
||||
},
|
||||
union_field: *MemoryCell,
|
||||
err_union_code: *MemoryCell,
|
||||
err_union_payload: *MemoryCell,
|
||||
optional_payload: *MemoryCell,
|
||||
optional_flag: *MemoryCell,
|
||||
};
|
||||
};
|
||||
|
@ -1,4 +0,0 @@
|
||||
pub const Visib = enum {
|
||||
Private,
|
||||
Pub,
|
||||
};
|
Loading…
x
Reference in New Issue
Block a user