self-hosted: fix the rest of the compile errors

This commit is contained in:
Andrew Kelley 2020-05-13 22:12:38 -04:00
parent 080022f6c6
commit 6a2425c38c
3 changed files with 57 additions and 50 deletions

View File

@ -527,19 +527,21 @@ pub const File = struct {
}
}
pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) PWriteError!usize {
pub const CopyRangeError = PWriteError || PReadError;
pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) CopyRangeError!usize {
// TODO take advantage of copy_file_range OS APIs
var buf: [8 * 4096]u8 = undefined;
const adjusted_count = math.min(buf.len, len);
const amt_read = try in.pread(buf[0..adjusted_count], in_offset);
if (amt_read == 0) return 0;
if (amt_read == 0) return @as(usize, 0);
return out.pwrite(buf[0..amt_read], out_offset);
}
/// Returns the number of bytes copied. If the number read is smaller than `buffer.len`, it
/// means the in file reached the end. Reaching the end of a file is not an error condition.
pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) PWriteError!usize {
var total_bytes_copied = 0;
pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) CopyRangeError!usize {
var total_bytes_copied: usize = 0;
var in_off = in_offset;
var out_off = out_offset;
while (total_bytes_copied < len) {
@ -549,6 +551,7 @@ pub const File = struct {
in_off += amt_copied;
out_off += amt_copied;
}
return total_bytes_copied;
}
pub const WriteFileOptions = struct {

View File

@ -5,16 +5,17 @@ const ir = @import("ir.zig");
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const link = @import("link.zig");
const Target = std.Target;
const Allocator = mem.Allocator;
pub fn generateSymbol(typed_value: TypedValue, module: ir.Module, code: *std.ArrayList(u8)) !?*ir.ErrorMsg {
pub fn generateSymbol(bin_file: *link.ElfFile, typed_value: TypedValue, code: *std.ArrayList(u8)) !?*ir.ErrorMsg {
switch (typed_value.ty.zigTypeTag()) {
.Fn => {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
var function = Function{
.module = &module,
.target = &bin_file.options.target,
.mod_fn = module_fn,
.code = code,
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(code.allocator),
@ -22,7 +23,7 @@ pub fn generateSymbol(typed_value: TypedValue, module: ir.Module, code: *std.Arr
};
defer function.inst_table.deinit();
for (module_fn.body.instructions) |inst| {
for (module_fn.analysis.success.instructions) |inst| {
const new_inst = function.genFuncInst(inst) catch |err| switch (err) {
error.CodegenFail => {
assert(function.err_msg != null);
@ -40,7 +41,7 @@ pub fn generateSymbol(typed_value: TypedValue, module: ir.Module, code: *std.Arr
}
const Function = struct {
module: *const ir.Module,
target: *const std.Target,
mod_fn: *const ir.Module.Fn,
code: *std.ArrayList(u8),
inst_table: std.AutoHashMap(*ir.Inst, MCValue),
@ -76,60 +77,60 @@ const Function = struct {
}
fn genBreakpoint(self: *Function, src: usize) !MCValue {
switch (self.module.target.cpu.arch) {
switch (self.target.cpu.arch) {
.i386, .x86_64 => {
try self.code.append(0xcc); // int3
},
else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.module.target.cpu.arch}),
else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.target.cpu.arch}),
}
return .unreach;
}
fn genCall(self: *Function, inst: *ir.Inst.Call) !MCValue {
switch (self.module.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.module.target.cpu.arch}),
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.target.cpu.arch}),
}
return .unreach;
}
fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue {
switch (self.module.target.cpu.arch) {
switch (self.target.cpu.arch) {
.i386, .x86_64 => {
try self.code.append(0xc3); // ret
},
else => return self.fail(inst.base.src, "TODO implement return for {}", .{self.module.target.cpu.arch}),
else => return self.fail(inst.base.src, "TODO implement return for {}", .{self.target.cpu.arch}),
}
return .unreach;
}
fn genCmp(self: *Function, inst: *ir.Inst.Cmp) !MCValue {
switch (self.module.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.module.target.cpu.arch}),
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}),
}
}
fn genCondBr(self: *Function, inst: *ir.Inst.CondBr) !MCValue {
switch (self.module.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.module.target.cpu.arch}),
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.target.cpu.arch}),
}
}
fn genIsNull(self: *Function, inst: *ir.Inst.IsNull) !MCValue {
switch (self.module.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.module.target.cpu.arch}),
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}),
}
}
fn genIsNonNull(self: *Function, inst: *ir.Inst.IsNonNull) !MCValue {
// Here you can specialize this instruction if it makes sense to, otherwise the default
// will call genIsNull and invert the result.
switch (self.module.target.cpu.arch) {
switch (self.target.cpu.arch) {
else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}),
}
}
fn genRelativeFwdJump(self: *Function, src: usize, amount: u32) !void {
switch (self.module.target.cpu.arch) {
switch (self.target.cpu.arch) {
.i386, .x86_64 => {
// TODO x86 treats the operands as signed
if (amount <= std.math.maxInt(u8)) {
@ -143,13 +144,13 @@ const Function = struct {
mem.writeIntLittle(u32, imm_ptr, amount);
}
},
else => return self.fail(src, "TODO implement relative forward jump for {}", .{self.module.target.cpu.arch}),
else => return self.fail(src, "TODO implement relative forward jump for {}", .{self.target.cpu.arch}),
}
}
fn genAsm(self: *Function, inst: *ir.Inst.Assembly) !MCValue {
// TODO convert to inline function
switch (self.module.target.cpu.arch) {
switch (self.target.cpu.arch) {
.arm => return self.genAsmArch(.arm, inst),
.armeb => return self.genAsmArch(.armeb, inst),
.aarch64 => return self.genAsmArch(.aarch64, inst),
@ -388,7 +389,7 @@ const Function = struct {
}
}
fn genTypedValue(self: *Function, src: usize, typed_value: ir.TypedValue) !MCValue {
fn genTypedValue(self: *Function, src: usize, typed_value: TypedValue) !MCValue {
switch (typed_value.ty.zigTypeTag()) {
.Pointer => {
const ptr_elem_type = typed_value.ty.elemType();
@ -410,8 +411,8 @@ const Function = struct {
}
},
.Int => {
const info = typed_value.ty.intInfo(self.module.target);
const ptr_bits = self.module.target.cpu.arch.ptrBitWidth();
const info = typed_value.ty.intInfo(self.target.*);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
if (info.bits > ptr_bits or info.signed) {
return self.fail(src, "TODO const int bigger than ptr and signed int", .{});
}

View File

@ -685,23 +685,20 @@ pub const ElfFile = struct {
// TODO Also detect virtual address collisions.
const text_capacity = self.allocatedSize(shdr.sh_offset);
// TODO instead of looping here, maintain a free list and a pointer to the end.
const end_vaddr = blk: {
var start: u64 = 0;
var size: u64 = 0;
for (self.symbols.items) |sym| {
if (sym.st_value > start) {
start = sm.st_value;
size = sym.st_size;
}
var last_start: u64 = 0;
var last_size: u64 = 0;
for (self.symbols.items) |sym| {
if (sym.st_value > last_start) {
last_start = sym.st_value;
last_size = sym.st_size;
}
break :blk start + (size * alloc_num / alloc_den);
};
const text_size = end_vaddr - phdr.p_vaddr;
const needed_size = text_size + new_block_size;
}
const end_vaddr = last_start + (last_size * alloc_num / alloc_den);
const needed_size = (end_vaddr + new_block_size) - phdr.p_vaddr;
if (needed_size > text_capacity) {
// Must move the entire text section.
const new_offset = self.findFreeSpace(needed_size, 0x1000);
const text_size = (last_start + last_size) - phdr.p_vaddr;
const amt = try self.file.copyRangeAll(shdr.sh_offset, self.file, new_offset, text_size);
if (amt != text_size) return error.InputOutput;
shdr.sh_offset = new_offset;
@ -713,6 +710,12 @@ pub const ElfFile = struct {
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
return AllocatedBlock{
.vaddr = end_vaddr,
.file_offset = shdr.sh_offset + (end_vaddr - phdr.p_vaddr),
.size_capacity = text_capacity - end_vaddr,
};
}
fn findAllocatedTextBlock(self: *ElfFile, sym: elf.Elf64_Sym) AllocatedBlock {
@ -739,8 +742,8 @@ pub const ElfFile = struct {
defer code.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
const err_msg = try codegen.generateSymbol(typed_value, module.*, &code);
if (err_msg != null) |em| {
const err_msg = try codegen.generateSymbol(self, typed_value, &code);
if (err_msg) |em| {
decl.analysis = .codegen_failure;
_ = try module.failed_decls.put(decl, em);
return;
@ -755,7 +758,7 @@ pub const ElfFile = struct {
if (decl.link.local_sym_index != 0) {
const local_sym = &self.symbols.items[decl.link.local_sym_index];
const existing_block = self.findAllocatedTextBlock(local_sym);
const existing_block = self.findAllocatedTextBlock(local_sym.*);
const file_offset = if (code_size > existing_block.size_capacity) fo: {
const new_block = try self.allocateTextBlock(code_size);
local_sym.st_value = new_block.vaddr;
@ -765,7 +768,7 @@ pub const ElfFile = struct {
break :fo new_block.file_offset;
} else existing_block.file_offset;
local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(u8, decl.name));
local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
// TODO this write could be avoided if no fields of the symbol were changed.
try self.writeSymbol(decl.link.local_sym_index);
@ -773,7 +776,7 @@ pub const ElfFile = struct {
} else {
try self.symbols.ensureCapacity(self.allocator, self.symbols.items.len + 1);
try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
const decl_name = mem.spanZ(u8, decl.name);
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const new_block = try self.allocateTextBlock(code_size);
const local_sym_index = self.symbols.items.len;
@ -796,8 +799,8 @@ pub const ElfFile = struct {
self.symbol_count_dirty = true;
self.offset_table_count_dirty = true;
decl.link = .{
.local_sym_index = local_sym_index,
.offset_table_index = offset_table_index,
.local_sym_index = @intCast(u32, local_sym_index),
.offset_table_index = @intCast(u32, offset_table_index),
};
break :blk new_block.file_offset;
@ -807,7 +810,7 @@ pub const ElfFile = struct {
try self.file.pwriteAll(code.items, file_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*ir.Module.Export{};
const decl_exports = module.decl_exports.getValue(decl) orelse &[0]*ir.Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
@ -938,9 +941,9 @@ pub const ElfFile = struct {
const needed_size = self.symbols.items.len * shdr.sh_entsize;
if (needed_size > allocated_size) {
// Must move the entire got section.
const new_offset = self.findFreeSpace(needed_size, shdr.sh_entsize);
const new_offset = self.findFreeSpace(needed_size, @intCast(u16, shdr.sh_entsize));
const amt = try self.file.copyRangeAll(shdr.sh_offset, self.file, new_offset, shdr.sh_size);
if (amt != text_size) return error.InputOutput;
if (amt != shdr.sh_size) return error.InputOutput;
shdr.sh_offset = new_offset;
}
shdr.sh_size = needed_size;