zir-to-elf skeleton

This commit is contained in:
Andrew Kelley 2020-04-22 23:42:58 -04:00
parent e8545db9d4
commit a3dfe36ca1
5 changed files with 596 additions and 561 deletions

View File

@ -1345,8 +1345,10 @@ pub const Dir = struct {
mode: File.Mode = File.default_mode, mode: File.Mode = File.default_mode,
}; };
/// `dest_path` must remain valid for the lifetime of `AtomicFile`. /// Directly access the `.file` field, and then call `AtomicFile.finish`
/// Call `AtomicFile.finish` to atomically replace `dest_path` with contents. /// to atomically replace `dest_path` with contents.
/// Always call `AtomicFile.deinit` to clean up, regardless of whether `AtomicFile.finish` succeeded.
/// `dest_path` must remain valid until `AtomicFile.deinit` is called.
pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile { pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions) !AtomicFile {
if (path.dirname(dest_path)) |dirname| { if (path.dirname(dest_path)) |dirname| {
const dir = try self.openDir(dirname, .{}); const dir = try self.openDir(dirname, .{});

View File

@ -93,7 +93,7 @@ pub const File = struct {
/// This means that a process that does not respect the locking API can still get access /// This means that a process that does not respect the locking API can still get access
/// to the file, despite the lock. /// to the file, despite the lock.
/// ///
/// Windows' file locks are mandatory, and any process attempting to access the file will /// Windows's file locks are mandatory, and any process attempting to access the file will
/// receive an error. /// receive an error.
/// ///
/// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt /// [1]: https://www.kernel.org/doc/Documentation/filesystems/mandatory-locking.txt

View File

@ -2027,7 +2027,13 @@ test "sliceAsBytes and bytesAsSlice back" {
/// Round an address up to the nearest aligned address /// Round an address up to the nearest aligned address
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
pub fn alignForward(addr: usize, alignment: usize) usize { pub fn alignForward(addr: usize, alignment: usize) usize {
return alignBackward(addr + (alignment - 1), alignment); return alignForwardGeneric(usize, addr, alignment);
}
/// Round an address up to the nearest aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
} }
test "alignForward" { test "alignForward" {
@ -2048,7 +2054,13 @@ test "alignForward" {
/// Round an address up to the previous aligned address /// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0. /// The alignment must be a power of 2 and greater than 0.
pub fn alignBackward(addr: usize, alignment: usize) usize { pub fn alignBackward(addr: usize, alignment: usize) usize {
assert(@popCount(usize, alignment) == 1); return alignBackwardGeneric(usize, addr, alignment);
}
/// Round an address up to the previous aligned address
/// The alignment must be a power of 2 and greater than 0.
pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(@popCount(T, alignment) == 1);
// 000010000 // example addr // 000010000 // example addr
// 000001111 // subtract 1 // 000001111 // subtract 1
// 111110000 // binary not // 111110000 // binary not

View File

@ -96,6 +96,7 @@ pub const Module = struct {
errors: []ErrorMsg, errors: []ErrorMsg,
arena: std.heap.ArenaAllocator, arena: std.heap.ArenaAllocator,
fns: []Fn, fns: []Fn,
target: Target,
pub const Export = struct { pub const Export = struct {
name: []const u8, name: []const u8,
@ -122,9 +123,7 @@ pub const ErrorMsg = struct {
msg: []const u8, msg: []const u8,
}; };
pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module { pub fn analyze(allocator: *Allocator, old_module: text.Module, target: Target) !Module {
const native_info = try std.zig.system.NativeTargetInfo.detect(allocator, .{});
var ctx = Analyze{ var ctx = Analyze{
.allocator = allocator, .allocator = allocator,
.arena = std.heap.ArenaAllocator.init(allocator), .arena = std.heap.ArenaAllocator.init(allocator),
@ -133,7 +132,7 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module {
.decl_table = std.AutoHashMap(*text.Inst, Analyze.NewDecl).init(allocator), .decl_table = std.AutoHashMap(*text.Inst, Analyze.NewDecl).init(allocator),
.exports = std.ArrayList(Module.Export).init(allocator), .exports = std.ArrayList(Module.Export).init(allocator),
.fns = std.ArrayList(Module.Fn).init(allocator), .fns = std.ArrayList(Module.Fn).init(allocator),
.target = native_info.target, .target = target,
}; };
defer ctx.errors.deinit(); defer ctx.errors.deinit();
defer ctx.decl_table.deinit(); defer ctx.decl_table.deinit();
@ -152,6 +151,7 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module) !Module {
.errors = ctx.errors.toOwnedSlice(), .errors = ctx.errors.toOwnedSlice(),
.fns = ctx.fns.toOwnedSlice(), .fns = ctx.fns.toOwnedSlice(),
.arena = ctx.arena, .arena = ctx.arena,
.target = target,
}; };
} }
@ -699,7 +699,9 @@ pub fn main() anyerror!void {
std.process.exit(1); std.process.exit(1);
} }
var analyzed_module = try analyze(allocator, zir_module); const native_info = try std.zig.system.NativeTargetInfo.detect(allocator, .{});
var analyzed_module = try analyze(allocator, zir_module, native_info.target);
defer analyzed_module.deinit(allocator); defer analyzed_module.deinit(allocator);
if (analyzed_module.errors.len != 0) { if (analyzed_module.errors.len != 0) {
@ -711,12 +713,18 @@ pub fn main() anyerror!void {
std.process.exit(1); std.process.exit(1);
} }
var new_zir_module = try text.emit_zir(allocator, analyzed_module); const output_zir = false;
defer new_zir_module.deinit(allocator); if (output_zir) {
var new_zir_module = try text.emit_zir(allocator, analyzed_module);
defer new_zir_module.deinit(allocator);
var bos = std.io.bufferedOutStream(std.io.getStdOut().outStream()); var bos = std.io.bufferedOutStream(std.io.getStdOut().outStream());
try new_zir_module.writeToStream(allocator, bos.outStream()); try new_zir_module.writeToStream(allocator, bos.outStream());
try bos.flush(); try bos.flush();
}
const link = @import("link.zig");
try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out");
} }
fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } { fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } {

File diff suppressed because it is too large Load Diff