Merge pull request #5158 from ziglang/zir-to-elf

beginnings of (non-LLVM) self-hosted machine code generation and linking
This commit is contained in:
Andrew Kelley 2020-04-24 15:37:21 -04:00 committed by GitHub
commit 7634e67ba5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1457 additions and 999 deletions

View File

@ -1345,8 +1345,10 @@ pub const Dir = struct {
mode: File.Mode = File.default_mode, mode: File.Mode = File.default_mode,
}; };
/// `dest_path` must remain valid for the lifetime of `AtomicFile`. /// Directly access the `.file` field, and then call `AtomicFile.finish`
/// Call `AtomicFile.finish` to atomically replace `dest_path` with contents. /// to atomically replace `dest_path` with contents.
/// Always call `AtomicFile.deinit` to clean up, regardless of whether `AtomicFile.finish` succeeded.
/// `dest_path` must remain valid until `AtomicFile.deinit` is called.
pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile { pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile {
if (path.dirname(dest_path)) |dirname| { if (path.dirname(dest_path)) |dirname| {
const dir = try self.openDir(dirname, .{}); const dir = try self.openDir(dirname, .{});

View File

@ -93,7 +93,7 @@ pub const File = struct {
/// This means that a process that does not respect the locking API can still get access /// This means that a process that does not respect the locking API can still get access
/// to the file, despite the lock. /// to the file, despite the lock.
/// ///
/// Windows' file locks are mandatory, and any process attempting to access the file will /// Windows's file locks are mandatory, and any process attempting to access the file will
/// receive an error. /// receive an error.
/// ///
/// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt /// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt

View File

@ -2027,7 +2027,13 @@ test "sliceAsBytes and bytesAsSlice back" {
/// Round an address up to the nearest aligned address /// Round an address up to the nearest aligned address
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
pub fn alignForward(addr: usize, alignment: usize) usize { pub fn alignForward(addr: usize, alignment: usize) usize {
return alignBackward(addr + (alignment - 1), alignment); return alignForwardGeneric(usize, addr, alignment);
}
/// Round an address up to the nearest aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
} }
test "alignForward" { test "alignForward" {
@ -2048,8 +2054,14 @@ test "alignForward" {
/// Round an address up to the previous aligned address /// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize { pub fn alignBackward(addr: usize, alignment: usize) usize {
assert(@popCount(usize, alignment) == 1); return alignBackwardGeneric(usize, addr, alignment);
// 000010000 // example addr }
/// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(@popCount(T, alignment) == 1);
// 000010000 // example alignment
// 000001111 // subtract 1 // 000001111 // subtract 1
// 111110000 // binary not // 111110000 // binary not
return addr & ~(alignment - 1); return addr & ~(alignment - 1);

View File

@ -1,447 +1,524 @@
const std = @import("std"); const std = @import("std");
const Compilation = @import("compilation.zig").Compilation; const mem = std.mem;
const llvm = @import("llvm.zig");
const c = @import("c.zig");
const ir = @import("ir.zig");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const Scope = @import("scope.zig").Scope;
const util = @import("util.zig");
const event = std.event;
const assert = std.debug.assert; const assert = std.debug.assert;
const DW = std.dwarf; const ir = @import("ir.zig");
const maxInt = std.math.maxInt; const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const Target = std.Target;
pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code) Compilation.BuildError!void { pub const ErrorMsg = struct {
fn_val.base.ref(); byte_offset: usize,
defer fn_val.base.deref(comp); msg: []const u8,
defer code.destroy(comp.gpa()); };
var output_path = try comp.createRandomOutputPath(comp.target.oFileExt()); pub const Symbol = struct {
errdefer output_path.deinit(); errors: []ErrorMsg,
const llvm_handle = try comp.zig_compiler.getAnyLlvmContext(); pub fn deinit(self: *Symbol, allocator: *mem.Allocator) void {
defer llvm_handle.release(comp.zig_compiler); for (self.errors) |err| {
allocator.free(err.msg);
const context = llvm_handle.node.data;
const module = llvm.ModuleCreateWithNameInContext(comp.name.span(), context) orelse return error.OutOfMemory;
defer llvm.DisposeModule(module);
llvm.SetTarget(module, comp.llvm_triple.span());
llvm.SetDataLayout(module, comp.target_layout_str);
if (comp.target.getObjectFormat() == .coff) {
llvm.AddModuleCodeViewFlag(module);
} else {
llvm.AddModuleDebugInfoFlag(module);
}
const builder = llvm.CreateBuilderInContext(context) orelse return error.OutOfMemory;
defer llvm.DisposeBuilder(builder);
const dibuilder = llvm.CreateDIBuilder(module, true) orelse return error.OutOfMemory;
defer llvm.DisposeDIBuilder(dibuilder);
// Don't use ZIG_VERSION_STRING here. LLVM misparses it when it includes
// the git revision.
const producer = try std.fmt.allocPrintZ(&code.arena.allocator, "zig {}.{}.{}", .{
@as(u32, c.ZIG_VERSION_MAJOR),
@as(u32, c.ZIG_VERSION_MINOR),
@as(u32, c.ZIG_VERSION_PATCH),
});
const flags = "";
const runtime_version = 0;
const compile_unit_file = llvm.CreateFile(
dibuilder,
comp.name.span(),
comp.root_package.root_src_dir.span(),
) orelse return error.OutOfMemory;
const is_optimized = comp.build_mode != .Debug;
const compile_unit = llvm.CreateCompileUnit(
dibuilder,
DW.LANG_C99,
compile_unit_file,
producer,
is_optimized,
flags,
runtime_version,
"",
0,
!comp.strip,
) orelse return error.OutOfMemory;
var ofile = ObjectFile{
.comp = comp,
.module = module,
.builder = builder,
.dibuilder = dibuilder,
.context = context,
.lock = event.Lock.init(),
.arena = &code.arena.allocator,
};
try renderToLlvmModule(&ofile, fn_val, code);
// TODO module level assembly
//if (buf_len(&g->global_asm) != 0) {
// LLVMSetModuleInlineAsm(g->module, buf_ptr(&g->global_asm));
//}
llvm.DIBuilderFinalize(dibuilder);
if (comp.verbose_llvm_ir) {
std.debug.warn("raw module:\n", .{});
llvm.DumpModule(ofile.module);
}
// verify the llvm module when safety is on
if (std.debug.runtime_safety) {
var error_ptr: ?[*:0]u8 = null;
_ = llvm.VerifyModule(ofile.module, llvm.AbortProcessAction, &error_ptr);
}
const is_small = comp.build_mode == .ReleaseSmall;
const is_debug = comp.build_mode == .Debug;
var err_msg: [*:0]u8 = undefined;
// TODO integrate this with evented I/O
if (llvm.TargetMachineEmitToFile(
comp.target_machine,
module,
output_path.span(),
llvm.EmitBinary,
&err_msg,
is_debug,
is_small,
)) {
if (std.debug.runtime_safety) {
std.debug.panic("unable to write object file {}: {s}\n", .{ output_path.span(), err_msg });
} }
return error.WritingObjectFileFailed; allocator.free(self.errors);
} self.* = undefined;
//validate_inline_fns(g); TODO
fn_val.containing_object = output_path;
if (comp.verbose_llvm_ir) {
std.debug.warn("optimized module:\n", .{});
llvm.DumpModule(ofile.module);
}
if (comp.verbose_link) {
std.debug.warn("created {}\n", .{output_path.span()});
}
}
pub const ObjectFile = struct {
comp: *Compilation,
module: *llvm.Module,
builder: *llvm.Builder,
dibuilder: *llvm.DIBuilder,
context: *llvm.Context,
lock: event.Lock,
arena: *std.mem.Allocator,
fn gpa(self: *ObjectFile) *std.mem.Allocator {
return self.comp.gpa();
} }
}; };
pub fn renderToLlvmModule(ofile: *ObjectFile, fn_val: *Value.Fn, code: *ir.Code) !void { pub fn generateSymbol(typed_value: ir.TypedValue, module: ir.Module, code: *std.ArrayList(u8)) !Symbol {
// TODO audit more of codegen.cpp:fn_llvm_value and port more logic switch (typed_value.ty.zigTypeTag()) {
const llvm_fn_type = try fn_val.base.typ.getLlvmType(ofile.arena, ofile.context); .Fn => {
const llvm_fn = llvm.AddFunction( const index = typed_value.val.cast(Value.Payload.Function).?.index;
ofile.module, const module_fn = module.fns[index];
fn_val.symbol_name.span(),
llvm_fn_type,
) orelse return error.OutOfMemory;
const want_fn_safety = fn_val.block_scope.?.safety.get(ofile.comp); var function = Function{
if (want_fn_safety and ofile.comp.haveLibC()) { .module = &module,
try addLLVMFnAttr(ofile, llvm_fn, "sspstrong"); .mod_fn = &module_fn,
try addLLVMFnAttrStr(ofile, llvm_fn, "stack-protector-buffer-size", "4"); .code = code,
.inst_table = std.AutoHashMap(*ir.Inst, Function.MCValue).init(code.allocator),
.errors = std.ArrayList(ErrorMsg).init(code.allocator),
};
defer function.inst_table.deinit();
defer function.errors.deinit();
for (module_fn.body) |inst| {
const new_inst = function.genFuncInst(inst) catch |err| switch (err) {
error.CodegenFail => {
assert(function.errors.items.len != 0);
break;
},
else => |e| return e,
};
try function.inst_table.putNoClobber(inst, new_inst);
}
return Symbol{ .errors = function.errors.toOwnedSlice() };
},
else => @panic("TODO implement generateSymbol for non-function types"),
}
}
const Function = struct {
module: *const ir.Module,
mod_fn: *const ir.Module.Fn,
code: *std.ArrayList(u8),
inst_table: std.AutoHashMap(*ir.Inst, MCValue),
errors: std.ArrayList(ErrorMsg),
const MCValue = union(enum) {
none,
unreach,
/// A pointer-sized integer that fits in a register.
immediate: u64,
/// The constant was emitted into the code, at this offset.
embedded_in_code: usize,
/// The value is in a target-specific register. The value can
/// be @intToEnum casted to the respective Reg enum.
register: usize,
};
fn genFuncInst(self: *Function, inst: *ir.Inst) !MCValue {
switch (inst.tag) {
.unreach => return self.genPanic(inst.src),
.constant => unreachable, // excluded from function bodies
.assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?),
.ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?),
.bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?),
}
} }
// TODO fn genPanic(self: *Function, src: usize) !MCValue {
//if (fn_val.align_stack) |align_stack| { // TODO change this to call the panic function
// try addLLVMFnAttrInt(ofile, llvm_fn, "alignstack", align_stack); switch (self.module.target.cpu.arch) {
//} .i386, .x86_64 => {
try self.code.append(0xcc); // int3
const fn_type = fn_val.base.typ.cast(Type.Fn).?; },
const fn_type_normal = &fn_type.key.data.Normal; else => return self.fail(src, "TODO implement panic for {}", .{self.module.target.cpu.arch}),
}
try addLLVMFnAttr(ofile, llvm_fn, "nounwind"); return .unreach;
//add_uwtable_attr(g, fn_table_entry->llvm_value);
try addLLVMFnAttr(ofile, llvm_fn, "nobuiltin");
//if (g->build_mode == BuildModeDebug && fn_table_entry->fn_inline != FnInlineAlways) {
// ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim", "true");
// ZigLLVMAddFunctionAttr(fn_table_entry->llvm_value, "no-frame-pointer-elim-non-leaf", nullptr);
//}
//if (fn_table_entry->section_name) {
// LLVMSetSection(fn_table_entry->llvm_value, buf_ptr(fn_table_entry->section_name));
//}
//if (fn_table_entry->align_bytes > 0) {
// LLVMSetAlignment(fn_table_entry->llvm_value, (unsigned)fn_table_entry->align_bytes);
//} else {
// // We'd like to set the best alignment for the function here, but on Darwin LLVM gives
// // "Cannot getTypeInfo() on a type that is unsized!" assertion failure when calling
// // any of the functions for getting alignment. Not specifying the alignment should
// // use the ABI alignment, which is fine.
//}
//if (!type_has_bits(return_type)) {
// // nothing to do
//} else if (type_is_codegen_pointer(return_type)) {
// addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull");
//} else if (handle_is_ptr(return_type) &&
// calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc))
//{
// addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret");
// addLLVMArgAttr(fn_table_entry->llvm_value, 0, "nonnull");
//}
// TODO set parameter attributes
// TODO
//uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry);
//if (err_ret_trace_arg_index != UINT32_MAX) {
// addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull");
//}
const cur_ret_ptr = if (fn_type_normal.return_type.handleIsPtr()) llvm.GetParam(llvm_fn, 0) else null;
// build all basic blocks
for (code.basic_block_list.span()) |bb| {
bb.llvm_block = llvm.AppendBasicBlockInContext(
ofile.context,
llvm_fn,
bb.name_hint,
) orelse return error.OutOfMemory;
} }
const entry_bb = code.basic_block_list.at(0);
llvm.PositionBuilderAtEnd(ofile.builder, entry_bb.llvm_block);
llvm.ClearCurrentDebugLocation(ofile.builder); fn genRet(self: *Function, src: usize) !void {
// TODO change this to call the panic function
switch (self.module.target.cpu.arch) {
.i386, .x86_64 => {
try self.code.append(0xc3); // ret
},
else => return self.fail(src, "TODO implement ret for {}", .{self.module.target.cpu.arch}),
}
}
// TODO set up error return tracing fn genRelativeFwdJump(self: *Function, src: usize, amount: u32) !void {
// TODO allocate temporary stack values switch (self.module.target.cpu.arch) {
.i386, .x86_64 => {
if (amount <= std.math.maxInt(u8)) {
try self.code.resize(self.code.items.len + 2);
self.code.items[self.code.items.len - 2] = 0xeb;
self.code.items[self.code.items.len - 1] = @intCast(u8, amount);
} else {
try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xe9; // jmp rel32
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
mem.writeIntLittle(u32, imm_ptr, amount);
}
},
else => return self.fail(src, "TODO implement relative forward jump for {}", .{self.module.target.cpu.arch}),
}
}
const var_list = fn_type.non_key.Normal.variable_list.span(); fn genAsm(self: *Function, inst: *ir.Inst.Assembly) !MCValue {
// create debug variable declarations for variables and allocate all local variables // TODO convert to inline function
for (var_list) |var_scope, i| { switch (self.module.target.cpu.arch) {
const var_type = switch (var_scope.data) { .arm => return self.genAsmArch(.arm, inst),
.Const => unreachable, .armeb => return self.genAsmArch(.armeb, inst),
.Param => |param| param.typ, .aarch64 => return self.genAsmArch(.aarch64, inst),
}; .aarch64_be => return self.genAsmArch(.aarch64_be, inst),
// if (!type_has_bits(var->value->type)) { .aarch64_32 => return self.genAsmArch(.aarch64_32, inst),
// continue; .arc => return self.genAsmArch(.arc, inst),
// } .avr => return self.genAsmArch(.avr, inst),
// if (ir_get_var_is_comptime(var)) .bpfel => return self.genAsmArch(.bpfel, inst),
// continue; .bpfeb => return self.genAsmArch(.bpfeb, inst),
// if (type_requires_comptime(var->value->type)) .hexagon => return self.genAsmArch(.hexagon, inst),
// continue; .mips => return self.genAsmArch(.mips, inst),
// if (var->src_arg_index == SIZE_MAX) { .mipsel => return self.genAsmArch(.mipsel, inst),
// var->value_ref = build_alloca(g, var->value->type, buf_ptr(&var->name), var->align_bytes); .mips64 => return self.genAsmArch(.mips64, inst),
.mips64el => return self.genAsmArch(.mips64el, inst),
.msp430 => return self.genAsmArch(.msp430, inst),
.powerpc => return self.genAsmArch(.powerpc, inst),
.powerpc64 => return self.genAsmArch(.powerpc64, inst),
.powerpc64le => return self.genAsmArch(.powerpc64le, inst),
.r600 => return self.genAsmArch(.r600, inst),
.amdgcn => return self.genAsmArch(.amdgcn, inst),
.riscv32 => return self.genAsmArch(.riscv32, inst),
.riscv64 => return self.genAsmArch(.riscv64, inst),
.sparc => return self.genAsmArch(.sparc, inst),
.sparcv9 => return self.genAsmArch(.sparcv9, inst),
.sparcel => return self.genAsmArch(.sparcel, inst),
.s390x => return self.genAsmArch(.s390x, inst),
.tce => return self.genAsmArch(.tce, inst),
.tcele => return self.genAsmArch(.tcele, inst),
.thumb => return self.genAsmArch(.thumb, inst),
.thumbeb => return self.genAsmArch(.thumbeb, inst),
.i386 => return self.genAsmArch(.i386, inst),
.x86_64 => return self.genAsmArch(.x86_64, inst),
.xcore => return self.genAsmArch(.xcore, inst),
.nvptx => return self.genAsmArch(.nvptx, inst),
.nvptx64 => return self.genAsmArch(.nvptx64, inst),
.le32 => return self.genAsmArch(.le32, inst),
.le64 => return self.genAsmArch(.le64, inst),
.amdil => return self.genAsmArch(.amdil, inst),
.amdil64 => return self.genAsmArch(.amdil64, inst),
.hsail => return self.genAsmArch(.hsail, inst),
.hsail64 => return self.genAsmArch(.hsail64, inst),
.spir => return self.genAsmArch(.spir, inst),
.spir64 => return self.genAsmArch(.spir64, inst),
.kalimba => return self.genAsmArch(.kalimba, inst),
.shave => return self.genAsmArch(.shave, inst),
.lanai => return self.genAsmArch(.lanai, inst),
.wasm32 => return self.genAsmArch(.wasm32, inst),
.wasm64 => return self.genAsmArch(.wasm64, inst),
.renderscript32 => return self.genAsmArch(.renderscript32, inst),
.renderscript64 => return self.genAsmArch(.renderscript64, inst),
.ve => return self.genAsmArch(.ve, inst),
}
}
// var->di_loc_var = ZigLLVMCreateAutoVariable(g->dbuilder, get_di_scope(g, var->parent_scope), fn genAsmArch(self: *Function, comptime arch: Target.Cpu.Arch, inst: *ir.Inst.Assembly) !MCValue {
// buf_ptr(&var->name), import->di_file, (unsigned)(var->decl_node->line + 1), if (arch != .x86_64 and arch != .i386) {
// var->value->type->di_type, !g->strip_debug_symbols, 0); return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{});
}
for (inst.args.inputs) |input, i| {
if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') {
return self.fail(inst.base.src, "unrecognized asm input constraint: '{}'", .{input});
}
const reg_name = input[1 .. input.len - 1];
const reg = parseRegName(arch, reg_name) orelse
return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name});
const arg = try self.resolveInst(inst.args.args[i]);
try self.genSetReg(inst.base.src, arch, reg, arg);
}
// } else { if (mem.eql(u8, inst.args.asm_source, "syscall")) {
// it's a parameter try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
// assert(var->gen_arg_index != SIZE_MAX);
// TypeTableEntry *gen_type;
// FnGenParamInfo *gen_info = &fn_table_entry->type_entry->data.fn.gen_param_info[var->src_arg_index];
if (var_type.handleIsPtr()) {
// if (gen_info->is_byval) {
// gen_type = var->value->type;
// } else {
// gen_type = gen_info->type;
// }
var_scope.data.Param.llvm_value = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
} else { } else {
// gen_type = var->value->type; return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
var_scope.data.Param.llvm_value = try renderAlloca(ofile, var_type, var_scope.name, .Abi);
}
// if (var->decl_node) {
// var->di_loc_var = ZigLLVMCreateParameterVariable(g->dbuilder, get_di_scope(g, var->parent_scope),
// buf_ptr(&var->name), import->di_file,
// (unsigned)(var->decl_node->line + 1),
// gen_type->di_type, !g->strip_debug_symbols, 0, (unsigned)(var->gen_arg_index + 1));
// }
// }
}
// TODO finishing error return trace setup. we have to do this after all the allocas.
// create debug variable declarations for parameters
// rely on the first variables in the variable_list being parameters.
//size_t next_var_i = 0;
for (fn_type.key.data.Normal.params) |param, i| {
//FnGenParamInfo *info = &fn_table_entry->type_entry->data.fn.gen_param_info[param_i];
//if (info->gen_index == SIZE_MAX)
// continue;
const scope_var = var_list[i];
//assert(variable->src_arg_index != SIZE_MAX);
//next_var_i += 1;
//assert(variable);
//assert(variable->value_ref);
if (!param.typ.handleIsPtr()) {
//clear_debug_source_node(g);
const llvm_param = llvm.GetParam(llvm_fn, @intCast(c_uint, i));
_ = try renderStoreUntyped(
ofile,
llvm_param,
scope_var.data.Param.llvm_value,
.Abi,
.Non,
);
} }
//if (variable->decl_node) { if (inst.args.output) |output| {
// gen_var_debug_decl(g, variable); if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') {
//} return self.fail(inst.base.src, "unrecognized asm output constraint: '{}'", .{output});
} }
const reg_name = output[2 .. output.len - 1];
for (code.basic_block_list.span()) |current_block| { const reg = parseRegName(arch, reg_name) orelse
llvm.PositionBuilderAtEnd(ofile.builder, current_block.llvm_block); return self.fail(inst.base.src, "unrecognized register: '{}'", .{reg_name});
for (current_block.instruction_list.span()) |instruction| { return MCValue{ .register = @enumToInt(reg) };
if (instruction.ref_count == 0 and !instruction.hasSideEffects()) continue; } else {
return MCValue.none;
instruction.llvm_value = try instruction.render(ofile, fn_val);
} }
current_block.llvm_exit_block = llvm.GetInsertBlock(ofile.builder);
} }
}
fn addLLVMAttr( fn genSetReg(self: *Function, src: usize, comptime arch: Target.Cpu.Arch, reg: Reg(arch), mcv: MCValue) !void {
ofile: *ObjectFile, switch (arch) {
val: *llvm.Value, .x86_64 => switch (reg) {
attr_index: llvm.AttributeIndex, .rax => switch (mcv) {
attr_name: []const u8, .none, .unreach => unreachable,
) !void { .immediate => |x| {
const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len); // Setting the eax register zeroes the upper part of rax, so if the number is small
assert(kind_id != 0); // enough, that is preferable.
const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, 0) orelse return error.OutOfMemory; // Best case: zero
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); // 31 c0 xor eax,eax
} if (x == 0) {
return self.code.appendSlice(&[_]u8{ 0x31, 0xc0 });
fn addLLVMAttrStr( }
ofile: *ObjectFile, // Next best case: set eax with 4 bytes
val: *llvm.Value, // b8 04 03 02 01 mov eax,0x01020304
attr_index: llvm.AttributeIndex, if (x <= std.math.maxInt(u32)) {
attr_name: []const u8, try self.code.resize(self.code.items.len + 5);
attr_val: []const u8, self.code.items[self.code.items.len - 5] = 0xb8;
) !void { const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
const llvm_attr = llvm.CreateStringAttribute( mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x));
ofile.context, return;
attr_name.ptr, }
@intCast(c_uint, attr_name.len), // Worst case: set rax with 8 bytes
attr_val.ptr, // 48 b8 08 07 06 05 04 03 02 01 movabs rax,0x0102030405060708
@intCast(c_uint, attr_val.len), try self.code.resize(self.code.items.len + 10);
) orelse return error.OutOfMemory; self.code.items[self.code.items.len - 10] = 0x48;
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); self.code.items[self.code.items.len - 9] = 0xb8;
} const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
mem.writeIntLittle(u64, imm_ptr, x);
fn addLLVMAttrInt( return;
val: *llvm.Value, },
attr_index: llvm.AttributeIndex, .embedded_in_code => return self.fail(src, "TODO implement x86_64 genSetReg %rax = embedded_in_code", .{}),
attr_name: []const u8, .register => return self.fail(src, "TODO implement x86_64 genSetReg %rax = register", .{}),
attr_val: u64, },
) !void { .rdx => switch (mcv) {
const kind_id = llvm.GetEnumAttributeKindForName(attr_name.ptr, attr_name.len); .none, .unreach => unreachable,
assert(kind_id != 0); .immediate => |x| {
const llvm_attr = llvm.CreateEnumAttribute(ofile.context, kind_id, attr_val) orelse return error.OutOfMemory; // Setting the edx register zeroes the upper part of rdx, so if the number is small
llvm.AddAttributeAtIndex(val, attr_index, llvm_attr); // enough, that is preferable.
} // Best case: zero
// 31 d2 xor edx,edx
fn addLLVMFnAttr(ofile: *ObjectFile, fn_val: *llvm.Value, attr_name: []const u8) !void { if (x == 0) {
return addLLVMAttr(ofile, fn_val, maxInt(llvm.AttributeIndex), attr_name); return self.code.appendSlice(&[_]u8{ 0x31, 0xd2 });
} }
// Next best case: set edx with 4 bytes
fn addLLVMFnAttrStr(ofile: *ObjectFile, fn_val: *llvm.Value, attr_name: []const u8, attr_val: []const u8) !void { // ba 04 03 02 01 mov edx,0x1020304
return addLLVMAttrStr(ofile, fn_val, maxInt(llvm.AttributeIndex), attr_name, attr_val); if (x <= std.math.maxInt(u32)) {
} try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xba;
fn addLLVMFnAttrInt(ofile: *ObjectFile, fn_val: *llvm.Value, attr_name: []const u8, attr_val: u64) !void { const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
return addLLVMAttrInt(ofile, fn_val, maxInt(llvm.AttributeIndex), attr_name, attr_val); mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x));
} return;
}
fn renderLoadUntyped( // Worst case: set rdx with 8 bytes
ofile: *ObjectFile, // 48 ba 08 07 06 05 04 03 02 01 movabs rdx,0x0102030405060708
ptr: *llvm.Value, try self.code.resize(self.code.items.len + 10);
alignment: Type.Pointer.Align, self.code.items[self.code.items.len - 10] = 0x48;
vol: Type.Pointer.Vol, self.code.items[self.code.items.len - 9] = 0xba;
name: [*:0]const u8, const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
) !*llvm.Value { mem.writeIntLittle(u64, imm_ptr, x);
const result = llvm.BuildLoad(ofile.builder, ptr, name) orelse return error.OutOfMemory; return;
switch (vol) { },
.Non => {}, .embedded_in_code => return self.fail(src, "TODO implement x86_64 genSetReg %rdx = embedded_in_code", .{}),
.Volatile => llvm.SetVolatile(result, 1), .register => return self.fail(src, "TODO implement x86_64 genSetReg %rdx = register", .{}),
},
.rdi => switch (mcv) {
.none, .unreach => unreachable,
.immediate => |x| {
// Setting the edi register zeroes the upper part of rdi, so if the number is small
// enough, that is preferable.
// Best case: zero
// 31 ff xor edi,edi
if (x == 0) {
return self.code.appendSlice(&[_]u8{ 0x31, 0xff });
}
// Next best case: set edi with 4 bytes
// bf 04 03 02 01 mov edi,0x1020304
if (x <= std.math.maxInt(u32)) {
try self.code.resize(self.code.items.len + 5);
self.code.items[self.code.items.len - 5] = 0xbf;
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
mem.writeIntLittle(u32, imm_ptr, @intCast(u32, x));
return;
}
// Worst case: set rdi with 8 bytes
// 48 bf 08 07 06 05 04 03 02 01 movabs rax,0x0102030405060708
try self.code.resize(self.code.items.len + 10);
self.code.items[self.code.items.len - 10] = 0x48;
self.code.items[self.code.items.len - 9] = 0xbf;
const imm_ptr = self.code.items[self.code.items.len - 8 ..][0..8];
mem.writeIntLittle(u64, imm_ptr, x);
return;
},
.embedded_in_code => return self.fail(src, "TODO implement x86_64 genSetReg %rdi = embedded_in_code", .{}),
.register => return self.fail(src, "TODO implement x86_64 genSetReg %rdi = register", .{}),
},
.rsi => switch (mcv) {
.none, .unreach => unreachable,
.immediate => return self.fail(src, "TODO implement x86_64 genSetReg %rsi = immediate", .{}),
.embedded_in_code => |code_offset| {
// Examples:
// lea rsi, [rip + 0x01020304]
// lea rsi, [rip - 7]
// f: 48 8d 35 04 03 02 01 lea rsi,[rip+0x1020304] # 102031a <_start+0x102031a>
// 16: 48 8d 35 f9 ff ff ff lea rsi,[rip+0xfffffffffffffff9] # 16 <_start+0x16>
//
// We need the offset from RIP in a signed i32 twos complement.
// The instruction is 7 bytes long and RIP points to the next instruction.
try self.code.resize(self.code.items.len + 7);
const rip = self.code.items.len;
const big_offset = @intCast(i64, code_offset) - @intCast(i64, rip);
const offset = @intCast(i32, big_offset);
self.code.items[self.code.items.len - 7] = 0x48;
self.code.items[self.code.items.len - 6] = 0x8d;
self.code.items[self.code.items.len - 5] = 0x35;
const imm_ptr = self.code.items[self.code.items.len - 4 ..][0..4];
mem.writeIntLittle(i32, imm_ptr, offset);
return;
},
.register => return self.fail(src, "TODO implement x86_64 genSetReg %rsi = register", .{}),
},
else => return self.fail(src, "TODO implement genSetReg for x86_64 '{}'", .{@tagName(reg)}),
},
else => return self.fail(src, "TODO implement genSetReg for more architectures", .{}),
}
} }
llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.GetElementType(llvm.TypeOf(ptr))));
return result;
}
fn renderLoad(ofile: *ObjectFile, ptr: *llvm.Value, ptr_type: *Type.Pointer, name: [*:0]const u8) !*llvm.Value { fn genPtrToInt(self: *Function, inst: *ir.Inst.PtrToInt) !MCValue {
return renderLoadUntyped(ofile, ptr, ptr_type.key.alignment, ptr_type.key.vol, name); // no-op
} return self.resolveInst(inst.args.ptr);
pub fn getHandleValue(ofile: *ObjectFile, ptr: *llvm.Value, ptr_type: *Type.Pointer) !?*llvm.Value {
const child_type = ptr_type.key.child_type;
if (!child_type.hasBits()) {
return null;
} }
if (child_type.handleIsPtr()) {
return ptr; fn genBitCast(self: *Function, inst: *ir.Inst.BitCast) !MCValue {
const operand = try self.resolveInst(inst.args.operand);
return operand;
} }
return try renderLoad(ofile, ptr, ptr_type, "");
}
pub fn renderStoreUntyped( fn resolveInst(self: *Function, inst: *ir.Inst) !MCValue {
ofile: *ObjectFile, if (self.inst_table.getValue(inst)) |mcv| {
value: *llvm.Value, return mcv;
ptr: *llvm.Value, }
alignment: Type.Pointer.Align, if (inst.cast(ir.Inst.Constant)) |const_inst| {
vol: Type.Pointer.Vol, const mcvalue = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val });
) !*llvm.Value { try self.inst_table.putNoClobber(inst, mcvalue);
const result = llvm.BuildStore(ofile.builder, value, ptr) orelse return error.OutOfMemory; return mcvalue;
switch (vol) { } else {
.Non => {}, return self.inst_table.getValue(inst).?;
.Volatile => llvm.SetVolatile(result, 1), }
} }
llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm.TypeOf(value)));
return result;
}
pub fn renderStore( fn genTypedValue(self: *Function, src: usize, typed_value: ir.TypedValue) !MCValue {
ofile: *ObjectFile, switch (typed_value.ty.zigTypeTag()) {
value: *llvm.Value, .Pointer => {
ptr: *llvm.Value, const ptr_elem_type = typed_value.ty.elemType();
ptr_type: *Type.Pointer, switch (ptr_elem_type.zigTypeTag()) {
) !*llvm.Value { .Array => {
return renderStoreUntyped(ofile, value, ptr, ptr_type.key.alignment, ptr_type.key.vol); // TODO more checks to make sure this can be emitted as a string literal
} const bytes = try typed_value.val.toAllocatedBytes(self.code.allocator);
defer self.code.allocator.free(bytes);
const smaller_len = std.math.cast(u32, bytes.len) catch
return self.fail(src, "TODO handle a larger string constant", .{});
pub fn renderAlloca( // Emit the string literal directly into the code; jump over it.
ofile: *ObjectFile, try self.genRelativeFwdJump(src, smaller_len);
var_type: *Type, const offset = self.code.items.len;
name: []const u8, try self.code.appendSlice(bytes);
alignment: Type.Pointer.Align, return MCValue{ .embedded_in_code = offset };
) !*llvm.Value { },
const llvm_var_type = try var_type.getLlvmType(ofile.arena, ofile.context); else => |t| return self.fail(src, "TODO implement emitTypedValue for pointer to '{}'", .{@tagName(t)}),
const name_with_null = try std.cstr.addNullByte(ofile.arena, name); }
const result = llvm.BuildAlloca(ofile.builder, llvm_var_type, @ptrCast([*:0]const u8, name_with_null.ptr)) orelse return error.OutOfMemory; },
llvm.SetAlignment(result, resolveAlign(ofile, alignment, llvm_var_type)); .Int => {
return result; const info = typed_value.ty.intInfo(self.module.target);
} const ptr_bits = self.module.target.cpu.arch.ptrBitWidth();
if (info.bits > ptr_bits or info.signed) {
return self.fail(src, "TODO const int bigger than ptr and signed int", .{});
}
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
},
.ComptimeInt => unreachable, // semantic analysis prevents this
.ComptimeFloat => unreachable, // semantic analysis prevents this
else => return self.fail(src, "TODO implement const of type '{}'", .{typed_value.ty}),
}
}
pub fn resolveAlign(ofile: *ObjectFile, alignment: Type.Pointer.Align, llvm_type: *llvm.Type) u32 { fn fail(self: *Function, src: usize, comptime format: []const u8, args: var) error{ CodegenFail, OutOfMemory } {
return switch (alignment) { @setCold(true);
.Abi => return llvm.ABIAlignmentOfType(ofile.comp.target_data_ref, llvm_type), const msg = try std.fmt.allocPrint(self.errors.allocator, format, args);
.Override => |a| a, {
errdefer self.errors.allocator.free(msg);
(try self.errors.addOne()).* = .{
.byte_offset = src,
.msg = msg,
};
}
return error.CodegenFail;
}
};
fn Reg(comptime arch: Target.Cpu.Arch) type {
return switch (arch) {
.i386 => enum {
eax,
ebx,
ecx,
edx,
ebp,
esp,
esi,
edi,
ax,
bx,
cx,
dx,
bp,
sp,
si,
di,
ah,
bh,
ch,
dh,
al,
bl,
cl,
dl,
},
.x86_64 => enum {
rax,
rbx,
rcx,
rdx,
rbp,
rsp,
rsi,
rdi,
r8,
r9,
r10,
r11,
r12,
r13,
r14,
r15,
eax,
ebx,
ecx,
edx,
ebp,
esp,
esi,
edi,
r8d,
r9d,
r10d,
r11d,
r12d,
r13d,
r14d,
r15d,
ax,
bx,
cx,
dx,
bp,
sp,
si,
di,
r8w,
r9w,
r10w,
r11w,
r12w,
r13w,
r14w,
r15w,
ah,
bh,
ch,
dh,
al,
bl,
cl,
dl,
r8b,
r9b,
r10b,
r11b,
r12b,
r13b,
r14b,
r15b,
},
else => @compileError("TODO add more register enums"),
}; };
} }
fn parseRegName(comptime arch: Target.Cpu.Arch, name: []const u8) ?Reg(arch) {
return std.meta.stringToEnum(Reg(arch), name);
}

View File

@ -24,6 +24,7 @@ pub const Inst = struct {
constant, constant,
assembly, assembly,
ptrtoint, ptrtoint,
bitcast,
}; };
pub fn cast(base: *Inst, comptime T: type) ?*T { pub fn cast(base: *Inst, comptime T: type) ?*T {
@ -45,6 +46,7 @@ pub const Inst = struct {
.assembly, .assembly,
.ptrtoint, .ptrtoint,
.bitcast,
=> null, => null,
}; };
} }
@ -84,6 +86,15 @@ pub const Inst = struct {
ptr: *Inst, ptr: *Inst,
}, },
}; };
pub const BitCast = struct {
pub const base_tag = Tag.bitcast;
base: Inst,
args: struct {
operand: *Inst,
},
};
}; };
pub const TypedValue = struct { pub const TypedValue = struct {
@ -96,6 +107,7 @@ pub const Module = struct {
errors: []ErrorMsg, errors: []ErrorMsg,
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
fns: []Fn, fns: []Fn,
target: Target,
pub const Export = struct { pub const Export = struct {
name: []const u8, name: []const u8,
@ -122,9 +134,7 @@ pub const ErrorMsg = struct {
msg: []const u8, msg: []const u8,
}; };
pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module { pub fn analyze(allocator: *Allocator, old_module: text.Module, target: Target) !Module {
const native_info = try std.zig.system.NativeTargetInfo.detect(allocator, .{});
var ctx = Analyze{ var ctx = Analyze{
.allocator = allocator, .allocator = allocator,
.arena = std.heap.ArenaAllocator.init(allocator), .arena = std.heap.ArenaAllocator.init(allocator),
@ -133,7 +143,7 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module {
.decl_table = std.AutoHashMap(*text.Inst, Analyze.NewDecl).init(allocator), .decl_table = std.AutoHashMap(*text.Inst, Analyze.NewDecl).init(allocator),
.exports = std.ArrayList(Module.Export).init(allocator), .exports = std.ArrayList(Module.Export).init(allocator),
.fns = std.ArrayList(Module.Fn).init(allocator), .fns = std.ArrayList(Module.Fn).init(allocator),
.target = native_info.target, .target = target,
}; };
defer ctx.errors.deinit(); defer ctx.errors.deinit();
defer ctx.decl_table.deinit(); defer ctx.decl_table.deinit();
@ -152,6 +162,7 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module {
.errors = ctx.errors.toOwnedSlice(), .errors = ctx.errors.toOwnedSlice(),
.fns = ctx.fns.toOwnedSlice(), .fns = ctx.fns.toOwnedSlice(),
.arena = ctx.arena, .arena = ctx.arena,
.target = target,
}; };
} }
@ -234,7 +245,7 @@ const Analyze = struct {
fn resolveConstString(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) ![]u8 { fn resolveConstString(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) ![]u8 {
const new_inst = try self.resolveInst(func, old_inst); const new_inst = try self.resolveInst(func, old_inst);
const wanted_type = Type.initTag(.const_slice_u8); const wanted_type = Type.initTag(.const_slice_u8);
const coerced_inst = try self.coerce(wanted_type, new_inst); const coerced_inst = try self.coerce(func, wanted_type, new_inst);
const val = try self.resolveConstValue(coerced_inst); const val = try self.resolveConstValue(coerced_inst);
return val.toAllocatedBytes(&self.arena.allocator); return val.toAllocatedBytes(&self.arena.allocator);
} }
@ -242,7 +253,7 @@ const Analyze = struct {
fn resolveType(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) !Type { fn resolveType(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) !Type {
const new_inst = try self.resolveInst(func, old_inst); const new_inst = try self.resolveInst(func, old_inst);
const wanted_type = Type.initTag(.@"type"); const wanted_type = Type.initTag(.@"type");
const coerced_inst = try self.coerce(wanted_type, new_inst); const coerced_inst = try self.coerce(func, wanted_type, new_inst);
const val = try self.resolveConstValue(coerced_inst); const val = try self.resolveConstValue(coerced_inst);
return val.toType(); return val.toType();
} }
@ -409,6 +420,7 @@ const Analyze = struct {
.primitive => return self.analyzeInstPrimitive(func, old_inst.cast(text.Inst.Primitive).?), .primitive => return self.analyzeInstPrimitive(func, old_inst.cast(text.Inst.Primitive).?),
.fntype => return self.analyzeInstFnType(func, old_inst.cast(text.Inst.FnType).?), .fntype => return self.analyzeInstFnType(func, old_inst.cast(text.Inst.FnType).?),
.intcast => return self.analyzeInstIntCast(func, old_inst.cast(text.Inst.IntCast).?), .intcast => return self.analyzeInstIntCast(func, old_inst.cast(text.Inst.IntCast).?),
.bitcast => return self.analyzeInstBitCast(func, old_inst.cast(text.Inst.BitCast).?),
} }
} }
@ -472,7 +484,7 @@ const Analyze = struct {
fn analyzeInstAs(self: *Analyze, func: ?*Fn, as: *text.Inst.As) InnerError!*Inst { fn analyzeInstAs(self: *Analyze, func: ?*Fn, as: *text.Inst.As) InnerError!*Inst {
const dest_type = try self.resolveType(func, as.positionals.dest_type); const dest_type = try self.resolveType(func, as.positionals.dest_type);
const new_inst = try self.resolveInst(func, as.positionals.value); const new_inst = try self.resolveInst(func, as.positionals.value);
return self.coerce(dest_type, new_inst); return self.coerce(func, dest_type, new_inst);
} }
fn analyzeInstPtrToInt(self: *Analyze, func: ?*Fn, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst { fn analyzeInstPtrToInt(self: *Analyze, func: ?*Fn, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst {
@ -545,12 +557,18 @@ const Analyze = struct {
} }
if (dest_is_comptime_int or new_inst.value() != null) { if (dest_is_comptime_int or new_inst.value() != null) {
return self.coerce(dest_type, new_inst); return self.coerce(func, dest_type, new_inst);
} }
return self.fail(intcast.base.src, "TODO implement analyze widen or shorten int", .{}); return self.fail(intcast.base.src, "TODO implement analyze widen or shorten int", .{});
} }
fn analyzeInstBitCast(self: *Analyze, func: ?*Fn, inst: *text.Inst.BitCast) InnerError!*Inst {
const dest_type = try self.resolveType(func, inst.positionals.dest_type);
const operand = try self.resolveInst(func, inst.positionals.operand);
return self.bitcast(func, dest_type, operand);
}
fn analyzeInstDeref(self: *Analyze, func: ?*Fn, deref: *text.Inst.Deref) InnerError!*Inst { fn analyzeInstDeref(self: *Analyze, func: ?*Fn, deref: *text.Inst.Deref) InnerError!*Inst {
const ptr = try self.resolveInst(func, deref.positionals.ptr); const ptr = try self.resolveInst(func, deref.positionals.ptr);
const elem_ty = switch (ptr.ty.zigTypeTag()) { const elem_ty = switch (ptr.ty.zigTypeTag()) {
@ -583,7 +601,8 @@ const Analyze = struct {
elem.* = try self.resolveConstString(func, assembly.kw_args.clobbers[i]); elem.* = try self.resolveConstString(func, assembly.kw_args.clobbers[i]);
} }
for (args) |*elem, i| { for (args) |*elem, i| {
elem.* = try self.resolveInst(func, assembly.kw_args.args[i]); const arg = try self.resolveInst(func, assembly.kw_args.args[i]);
elem.* = try self.coerce(func, Type.initTag(.usize), arg);
} }
const f = try self.requireFunctionBody(func, assembly.base.src); const f = try self.requireFunctionBody(func, assembly.base.src);
@ -602,10 +621,14 @@ const Analyze = struct {
return self.addNewInstArgs(f, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {}); return self.addNewInstArgs(f, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {});
} }
fn coerce(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst { fn coerce(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst {
// If the types are the same, we can return the operand.
if (dest_type.eql(inst.ty))
return inst;
const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty);
if (in_memory_result == .ok) { if (in_memory_result == .ok) {
return self.bitcast(dest_type, inst); return self.bitcast(func, dest_type, inst);
} }
// *[N]T to []T // *[N]T to []T
@ -634,12 +657,14 @@ const Analyze = struct {
return self.fail(inst.src, "TODO implement type coercion", .{}); return self.fail(inst.src, "TODO implement type coercion", .{});
} }
fn bitcast(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst { fn bitcast(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst {
if (inst.value()) |val| { if (inst.value()) |val| {
// Keep the comptime Value representation; take the new type. // Keep the comptime Value representation; take the new type.
return self.constInst(inst.src, .{ .ty = dest_type, .val = val }); return self.constInst(inst.src, .{ .ty = dest_type, .val = val });
} }
return self.fail(inst.src, "TODO implement runtime bitcast", .{}); // TODO validate the type size and other compile errors
const f = try self.requireFunctionBody(func, inst.src);
return self.addNewInstArgs(f, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst });
} }
fn coerceArrayPtrToSlice(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst { fn coerceArrayPtrToSlice(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst {
@ -699,7 +724,9 @@ pub fn main() anyerror!void {
std.process.exit(1); std.process.exit(1);
} }
var analyzed_module = try analyze(allocator, zir_module); const native_info = try std.zig.system.NativeTargetInfo.detect(allocator, .{});
var analyzed_module = try analyze(allocator, zir_module, native_info.target);
defer analyzed_module.deinit(allocator); defer analyzed_module.deinit(allocator);
if (analyzed_module.errors.len != 0) { if (analyzed_module.errors.len != 0) {
@ -711,12 +738,27 @@ pub fn main() anyerror!void {
std.process.exit(1); std.process.exit(1);
} }
var new_zir_module = try text.emit_zir(allocator, analyzed_module); const output_zir = true;
defer new_zir_module.deinit(allocator); if (output_zir) {
var new_zir_module = try text.emit_zir(allocator, analyzed_module);
defer new_zir_module.deinit(allocator);
var bos = std.io.bufferedOutStream(std.io.getStdOut().outStream()); var bos = std.io.bufferedOutStream(std.io.getStdOut().outStream());
try new_zir_module.writeToStream(allocator, bos.outStream()); try new_zir_module.writeToStream(allocator, bos.outStream());
try bos.flush(); try bos.flush();
}
const link = @import("link.zig");
var result = try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out");
defer result.deinit(allocator);
if (result.errors.len != 0) {
for (result.errors) |err_msg| {
const loc = findLineColumn(source, err_msg.byte_offset);
std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg });
}
if (debug_error_trace) return error.ParseFailure;
std.process.exit(1);
}
} }
fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } { fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } {

View File

@ -31,6 +31,7 @@ pub const Inst = struct {
primitive, primitive,
fntype, fntype,
intcast, intcast,
bitcast,
}; };
pub fn TagToType(tag: Tag) type { pub fn TagToType(tag: Tag) type {
@ -48,6 +49,7 @@ pub const Inst = struct {
.primitive => Primitive, .primitive => Primitive,
.fntype => FnType, .fntype => FnType,
.intcast => IntCast, .intcast => IntCast,
.bitcast => BitCast,
}; };
} }
@ -258,6 +260,17 @@ pub const Inst = struct {
}, },
kw_args: struct {}, kw_args: struct {},
}; };
pub const BitCast = struct {
pub const base_tag = Tag.bitcast;
base: Inst,
positionals: struct {
dest_type: *Inst,
operand: *Inst,
},
kw_args: struct {},
};
}; };
pub const ErrorMsg = struct { pub const ErrorMsg = struct {
@ -331,6 +344,7 @@ pub const Module = struct {
.primitive => return self.writeInstToStreamGeneric(stream, .primitive, decl, inst_table), .primitive => return self.writeInstToStreamGeneric(stream, .primitive, decl, inst_table),
.fntype => return self.writeInstToStreamGeneric(stream, .fntype, decl, inst_table), .fntype => return self.writeInstToStreamGeneric(stream, .fntype, decl, inst_table),
.intcast => return self.writeInstToStreamGeneric(stream, .intcast, decl, inst_table), .intcast => return self.writeInstToStreamGeneric(stream, .intcast, decl, inst_table),
.bitcast => return self.writeInstToStreamGeneric(stream, .bitcast, decl, inst_table),
} }
} }
@ -957,6 +971,19 @@ const EmitZIR = struct {
}; };
break :blk &new_inst.base; break :blk &new_inst.base;
}, },
.bitcast => blk: {
const old_inst = inst.cast(ir.Inst.BitCast).?;
const new_inst = try self.arena.allocator.create(Inst.BitCast);
new_inst.* = .{
.base = .{ .src = inst.src, .tag = Inst.BitCast.base_tag },
.positionals = .{
.dest_type = try self.emitType(inst.src, inst.ty),
.operand = try self.resolveInst(&inst_table, old_inst.args.operand),
},
.kw_args = .{},
};
break :blk &new_inst.base;
},
}; };
try instructions.append(new_inst); try instructions.append(new_inst);
try inst_table.putNoClobber(inst, new_inst); try inst_table.putNoClobber(inst, new_inst);

File diff suppressed because it is too large Load Diff

View File

@ -264,6 +264,56 @@ pub const Value = extern union {
} }
} }
/// Asserts the value is an integer and it fits in a u64
pub fn toUnsignedInt(self: Value) u64 {
switch (self.tag()) {
.ty,
.u8_type,
.i8_type,
.isize_type,
.usize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.fn_naked_noreturn_no_args_type,
.single_const_pointer_to_comptime_int_type,
.const_slice_u8_type,
.void_value,
.noreturn_value,
.bool_true,
.bool_false,
.function,
.ref,
.ref_val,
.bytes,
=> unreachable,
.zero => return 0,
.int_u64 => return self.cast(Payload.Int_u64).?.int,
.int_i64 => return @intCast(u64, self.cast(Payload.Int_u64).?.int),
.int_big => return self.cast(Payload.IntBig).?.big_int.to(u64) catch unreachable,
}
}
/// Asserts the value is an integer, and the destination type is ComptimeInt or Int. /// Asserts the value is an integer, and the destination type is ComptimeInt or Int.
pub fn intFitsInType(self: Value, ty: Type, target: Target) bool { pub fn intFitsInType(self: Value, ty: Type, target: Target) bool {
switch (self.tag()) { switch (self.tag()) {