Merge branch 'stage2-vaddr-alloc'
commit
dadc4327e1
|
@ -55,9 +55,20 @@ failed_files: std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg),
|
|||
/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
|
||||
failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
|
||||
|
||||
/// Incrementing integer used to compare against the corresponding Decl
|
||||
/// field to determine whether a Decl's status applies to an ongoing update, or a
|
||||
/// previous analysis.
|
||||
generation: u32 = 0,
|
||||
|
||||
/// Candidates for deletion. After a semantic analysis update completes, this list
|
||||
/// contains Decls that need to be deleted if they end up having no references to them.
|
||||
deletion_set: std.ArrayListUnmanaged(*Decl) = std.ArrayListUnmanaged(*Decl){},
|
||||
|
||||
pub const WorkItem = union(enum) {
|
||||
/// Write the machine code for a Decl to the output file.
|
||||
codegen_decl: *Decl,
|
||||
/// Decl has been determined to be outdated; perform semantic analysis again.
|
||||
re_analyze_decl: *Decl,
|
||||
};
|
||||
|
||||
pub const Export = struct {
|
||||
|
@ -68,6 +79,8 @@ pub const Export = struct {
|
|||
link: link.ElfFile.Export,
|
||||
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
|
||||
owner_decl: *Decl,
|
||||
/// The Decl being exported. Note this is *not* the Decl performing the export.
|
||||
exported_decl: *Decl,
|
||||
status: enum {
|
||||
in_progress,
|
||||
failed,
|
||||
|
@ -94,8 +107,7 @@ pub const Decl = struct {
|
|||
/// This is the base offset that src offsets within this Decl are relative to.
|
||||
src: usize,
|
||||
/// The most recent value of the Decl after a successful semantic analysis.
|
||||
/// The tag for this union is determined by the tag value of the analysis field.
|
||||
typed_value: union {
|
||||
typed_value: union(enum) {
|
||||
never_succeeded: void,
|
||||
most_recent: TypedValue.Managed,
|
||||
},
|
||||
|
@ -104,50 +116,56 @@ pub const Decl = struct {
|
|||
/// analysis of the function body is performed with this value set to `success`. Functions
|
||||
/// have their own analysis status field.
|
||||
analysis: enum {
|
||||
initial_in_progress,
|
||||
/// Semantic analysis for this Decl is running right now. This state detects dependency loops.
|
||||
in_progress,
|
||||
/// This Decl might be OK but it depends on another one which did not successfully complete
|
||||
/// semantic analysis. This Decl never had a value computed.
|
||||
initial_dependency_failure,
|
||||
/// Semantic analysis failure. This Decl never had a value computed.
|
||||
/// semantic analysis.
|
||||
dependency_failure,
|
||||
/// Semantic analysis failure.
|
||||
/// There will be a corresponding ErrorMsg in Module.failed_decls.
|
||||
initial_sema_failure,
|
||||
/// In this case the `typed_value.most_recent` can still be accessed.
|
||||
sema_failure,
|
||||
/// There will be a corresponding ErrorMsg in Module.failed_decls.
|
||||
codegen_failure,
|
||||
/// In this case the `typed_value.most_recent` can still be accessed.
|
||||
/// There will be a corresponding ErrorMsg in Module.failed_decls.
|
||||
/// This indicates the failure was something like running out of disk space,
|
||||
/// and attempting codegen again may succeed.
|
||||
codegen_failure_retryable,
|
||||
/// This Decl might be OK but it depends on another one which did not successfully complete
|
||||
/// semantic analysis. There is a most recent value available.
|
||||
repeat_dependency_failure,
|
||||
/// Semantic anlaysis failure, but the `typed_value.most_recent` can be accessed.
|
||||
/// There will be a corresponding ErrorMsg in Module.failed_decls.
|
||||
repeat_sema_failure,
|
||||
/// Completed successfully before; the `typed_value.most_recent` can be accessed, and
|
||||
/// new semantic analysis is in progress.
|
||||
repeat_in_progress,
|
||||
/// Everything is done and updated.
|
||||
/// Everything is done. During an update, this Decl may be out of date, depending
|
||||
/// on its dependencies. The `generation` field can be used to determine if this
|
||||
/// completion status occurred before or after a given update.
|
||||
complete,
|
||||
/// A Module update is in progress, and this Decl has been flagged as being known
|
||||
/// to require re-analysis.
|
||||
outdated,
|
||||
},
|
||||
/// This flag is set when this Decl is added to a check_for_deletion set, and cleared
|
||||
/// when removed.
|
||||
deletion_flag: bool,
|
||||
/// An integer that can be checked against the corresponding incrementing
|
||||
/// generation field of Module. This is used to determine whether `complete` status
|
||||
/// represents pre- or post- re-analysis.
|
||||
generation: u32,
|
||||
|
||||
/// Represents the position of the code in the output file.
|
||||
/// This is populated regardless of semantic analysis and code generation.
|
||||
link: link.ElfFile.Decl = link.ElfFile.Decl.empty,
|
||||
link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty,
|
||||
|
||||
contents_hash: Hash,
|
||||
|
||||
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
|
||||
/// typed_value is modified.
|
||||
/// TODO look into using a lightweight map/set data structure rather than a linear array.
|
||||
dependants: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
|
||||
|
||||
contents_hash: Hash,
|
||||
/// The shallow set of other decls whose typed_value changing indicates that this Decl's
|
||||
/// typed_value may need to be regenerated.
|
||||
dependencies: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
|
||||
|
||||
pub fn destroy(self: *Decl, allocator: *Allocator) void {
|
||||
allocator.free(mem.spanZ(self.name));
|
||||
if (self.typedValueManaged()) |tvm| {
|
||||
tvm.deinit(allocator);
|
||||
}
|
||||
self.dependants.deinit(allocator);
|
||||
self.dependencies.deinit(allocator);
|
||||
allocator.destroy(self);
|
||||
}
|
||||
|
||||
|
@ -172,7 +190,7 @@ pub const Decl = struct {
|
|||
pub fn fullyQualifiedNameHash(self: Decl) Hash {
|
||||
// Right now we only have ZIRModule as the source. So this is simply the
|
||||
// relative name of the decl.
|
||||
return hashSimpleName(mem.spanZ(u8, self.name));
|
||||
return hashSimpleName(mem.spanZ(self.name));
|
||||
}
|
||||
|
||||
pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue {
|
||||
|
@ -200,20 +218,31 @@ pub const Decl = struct {
|
|||
}
|
||||
|
||||
fn typedValueManaged(self: *Decl) ?*TypedValue.Managed {
|
||||
switch (self.analysis) {
|
||||
.initial_in_progress,
|
||||
.initial_dependency_failure,
|
||||
.initial_sema_failure,
|
||||
=> return null,
|
||||
.codegen_failure,
|
||||
.codegen_failure_retryable,
|
||||
.repeat_dependency_failure,
|
||||
.repeat_sema_failure,
|
||||
.repeat_in_progress,
|
||||
.complete,
|
||||
=> return &self.typed_value.most_recent,
|
||||
switch (self.typed_value) {
|
||||
.most_recent => |*x| return x,
|
||||
.never_succeeded => return null,
|
||||
}
|
||||
}
|
||||
|
||||
fn removeDependant(self: *Decl, other: *Decl) void {
|
||||
for (self.dependants.items) |item, i| {
|
||||
if (item == other) {
|
||||
_ = self.dependants.swapRemove(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn removeDependency(self: *Decl, other: *Decl) void {
|
||||
for (self.dependencies.items) |item, i| {
|
||||
if (item == other) {
|
||||
_ = self.dependencies.swapRemove(i);
|
||||
return;
|
||||
}
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
};
|
||||
|
||||
/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
|
||||
|
@ -266,12 +295,12 @@ pub const Scope = struct {
|
|||
|
||||
/// Asserts the scope has a parent which is a DeclAnalysis and
|
||||
/// returns the Decl.
|
||||
pub fn decl(self: *Scope) *Decl {
|
||||
switch (self.tag) {
|
||||
.block => return self.cast(Block).?.decl,
|
||||
.decl => return self.cast(DeclAnalysis).?.decl,
|
||||
.zir_module => unreachable,
|
||||
}
|
||||
pub fn decl(self: *Scope) ?*Decl {
|
||||
return switch (self.tag) {
|
||||
.block => self.cast(Block).?.decl,
|
||||
.decl => self.cast(DeclAnalysis).?.decl,
|
||||
.zir_module => null,
|
||||
};
|
||||
}
|
||||
|
||||
/// Asserts the scope has a parent which is a ZIRModule and
|
||||
|
@ -477,6 +506,7 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
|
|||
pub fn deinit(self: *Module) void {
|
||||
self.bin_file.deinit();
|
||||
const allocator = self.allocator;
|
||||
self.deletion_set.deinit(allocator);
|
||||
self.work_queue.deinit();
|
||||
{
|
||||
var it = self.decl_table.iterator();
|
||||
|
@ -517,11 +547,7 @@ pub fn deinit(self: *Module) void {
|
|||
{
|
||||
var it = self.export_owners.iterator();
|
||||
while (it.next()) |kv| {
|
||||
const export_list = kv.value;
|
||||
for (export_list) |exp| {
|
||||
allocator.destroy(exp);
|
||||
}
|
||||
allocator.free(export_list);
|
||||
freeExportList(allocator, kv.value);
|
||||
}
|
||||
self.export_owners.deinit();
|
||||
}
|
||||
|
@ -532,12 +558,21 @@ pub fn deinit(self: *Module) void {
|
|||
self.* = undefined;
|
||||
}
|
||||
|
||||
fn freeExportList(allocator: *Allocator, export_list: []*Export) void {
|
||||
for (export_list) |exp| {
|
||||
allocator.destroy(exp);
|
||||
}
|
||||
allocator.free(export_list);
|
||||
}
|
||||
|
||||
pub fn target(self: Module) std.Target {
|
||||
return self.bin_file.options.target;
|
||||
}
|
||||
|
||||
/// Detect changes to source files, perform semantic analysis, and update the output files.
|
||||
pub fn update(self: *Module) !void {
|
||||
self.generation += 1;
|
||||
|
||||
// TODO Use the cache hash file system to detect which source files changed.
|
||||
// Here we simulate a full cache miss.
|
||||
// Analyze the root source file now.
|
||||
|
@ -550,6 +585,15 @@ pub fn update(self: *Module) !void {
|
|||
|
||||
try self.performAllTheWork();
|
||||
|
||||
// Process the deletion set.
|
||||
while (self.deletion_set.popOrNull()) |decl| {
|
||||
if (decl.dependants.items.len != 0) {
|
||||
decl.deletion_flag = false;
|
||||
continue;
|
||||
}
|
||||
try self.deleteDecl(decl);
|
||||
}
|
||||
|
||||
// Unload all the source files from memory.
|
||||
self.root_scope.unload(self.allocator);
|
||||
|
||||
|
@ -634,15 +678,12 @@ const InnerError = error{ OutOfMemory, AnalysisFail };
|
|||
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
||||
while (self.work_queue.readItem()) |work_item| switch (work_item) {
|
||||
.codegen_decl => |decl| switch (decl.analysis) {
|
||||
.initial_in_progress,
|
||||
.repeat_in_progress,
|
||||
=> unreachable,
|
||||
.in_progress => unreachable,
|
||||
.outdated => unreachable,
|
||||
|
||||
.initial_sema_failure,
|
||||
.repeat_sema_failure,
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.initial_dependency_failure,
|
||||
.repeat_dependency_failure,
|
||||
.dependency_failure,
|
||||
=> continue,
|
||||
|
||||
.complete, .codegen_failure_retryable => {
|
||||
|
@ -668,7 +709,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
|||
self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
decl.analysis = .repeat_dependency_failure;
|
||||
decl.analysis = .dependency_failure;
|
||||
},
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
|
||||
|
@ -683,9 +724,60 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
|
|||
};
|
||||
},
|
||||
},
|
||||
.re_analyze_decl => |decl| switch (decl.analysis) {
|
||||
.in_progress => unreachable,
|
||||
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.dependency_failure,
|
||||
.complete,
|
||||
.codegen_failure_retryable,
|
||||
=> continue,
|
||||
|
||||
.outdated => {
|
||||
const zir_module = self.getSrcModule(decl.scope) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
try self.failed_decls.ensureCapacity(self.failed_decls.size + 1);
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
|
||||
self.allocator,
|
||||
decl.src,
|
||||
"unable to load source file '{}': {}",
|
||||
.{decl.scope.sub_file_path, @errorName(err)},
|
||||
));
|
||||
decl.analysis = .codegen_failure_retryable;
|
||||
continue;
|
||||
},
|
||||
};
|
||||
const decl_name = mem.spanZ(decl.name);
|
||||
// We already detected deletions, so we know this will be found.
|
||||
const src_decl = zir_module.findDecl(decl_name).?;
|
||||
self.reAnalyzeDecl(decl, src_decl) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
|
||||
try depender.dependencies.ensureCapacity(self.allocator, depender.dependencies.items.len + 1);
|
||||
try dependee.dependants.ensureCapacity(self.allocator, dependee.dependants.items.len + 1);
|
||||
|
||||
for (depender.dependencies.items) |item| {
|
||||
if (item == dependee) break; // Already in the set.
|
||||
} else {
|
||||
depender.dependencies.appendAssumeCapacity(dependee);
|
||||
}
|
||||
|
||||
for (dependee.dependants.items) |item| {
|
||||
if (item == depender) break; // Already in the set.
|
||||
} else {
|
||||
dependee.dependants.appendAssumeCapacity(depender);
|
||||
}
|
||||
}
|
||||
|
||||
fn getSource(self: *Module, root_scope: *Scope.ZIRModule) ![:0]const u8 {
|
||||
switch (root_scope.source) {
|
||||
.unloaded => {
|
||||
|
@ -742,13 +834,6 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
|
|||
}
|
||||
|
||||
fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
||||
// TODO use the cache to identify, from the modified source files, the decls which have
|
||||
// changed based on the span of memory that represents the decl in the re-parsed source file.
|
||||
// Use the cached dependency graph to recursively determine the set of decls which need
|
||||
// regeneration.
|
||||
// Here we simulate adding a source file which was previously not part of the compilation,
|
||||
// which means scanning the decls looking for exports.
|
||||
// TODO also identify decls that need to be deleted.
|
||||
switch (root_scope.status) {
|
||||
.never_loaded => {
|
||||
const src_module = try self.getSrcModule(root_scope);
|
||||
|
@ -759,7 +844,7 @@ fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
|||
|
||||
for (src_module.decls) |decl| {
|
||||
if (decl.cast(zir.Inst.Export)) |export_inst| {
|
||||
_ = try self.resolveDecl(&root_scope.base, &export_inst.base, link.ElfFile.Decl.empty);
|
||||
_ = try self.resolveDecl(&root_scope.base, &export_inst.base);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -772,41 +857,112 @@ fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
|
|||
=> {
|
||||
const src_module = try self.getSrcModule(root_scope);
|
||||
|
||||
// Look for changed decls.
|
||||
var exports_to_resolve = std.ArrayList(*zir.Inst).init(self.allocator);
|
||||
defer exports_to_resolve.deinit();
|
||||
|
||||
// Keep track of the decls that we expect to see in this file so that
|
||||
// we know which ones have been deleted.
|
||||
var deleted_decls = std.AutoHashMap(*Decl, void).init(self.allocator);
|
||||
defer deleted_decls.deinit();
|
||||
try deleted_decls.ensureCapacity(self.decl_table.size);
|
||||
{
|
||||
var it = self.decl_table.iterator();
|
||||
while (it.next()) |kv| {
|
||||
deleted_decls.putAssumeCapacityNoClobber(kv.value, {});
|
||||
}
|
||||
}
|
||||
|
||||
for (src_module.decls) |src_decl| {
|
||||
const name_hash = Decl.hashSimpleName(src_decl.name);
|
||||
if (self.decl_table.get(name_hash)) |kv| {
|
||||
const decl = kv.value;
|
||||
deleted_decls.removeAssertDiscard(decl);
|
||||
const new_contents_hash = Decl.hashSimpleName(src_decl.contents);
|
||||
if (!mem.eql(u8, &new_contents_hash, &decl.contents_hash)) {
|
||||
// TODO recursive dependency management
|
||||
//std.debug.warn("noticed that '{}' changed\n", .{src_decl.name});
|
||||
self.decl_table.removeAssertDiscard(name_hash);
|
||||
const saved_link = decl.link;
|
||||
decl.destroy(self.allocator);
|
||||
if (self.export_owners.getValue(decl)) |exports| {
|
||||
@panic("TODO handle updating a decl that does an export");
|
||||
}
|
||||
const new_decl = self.resolveDecl(
|
||||
&root_scope.base,
|
||||
src_decl,
|
||||
saved_link,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => continue,
|
||||
};
|
||||
if (self.decl_exports.remove(decl)) |entry| {
|
||||
self.decl_exports.putAssumeCapacityNoClobber(new_decl, entry.value);
|
||||
}
|
||||
//std.debug.warn("noticed '{}' source changed\n", .{src_decl.name});
|
||||
decl.analysis = .outdated;
|
||||
decl.contents_hash = new_contents_hash;
|
||||
try self.work_queue.writeItem(.{ .re_analyze_decl = decl });
|
||||
}
|
||||
} else if (src_decl.cast(zir.Inst.Export)) |export_inst| {
|
||||
_ = try self.resolveDecl(&root_scope.base, &export_inst.base, link.ElfFile.Decl.empty);
|
||||
try exports_to_resolve.append(&export_inst.base);
|
||||
}
|
||||
}
|
||||
{
|
||||
// Handle explicitly deleted decls from the source code. Not to be confused
|
||||
// with when we delete decls because they are no longer referenced.
|
||||
var it = deleted_decls.iterator();
|
||||
while (it.next()) |kv| {
|
||||
//std.debug.warn("noticed '{}' deleted from source\n", .{kv.key.name});
|
||||
try self.deleteDecl(kv.key);
|
||||
}
|
||||
}
|
||||
for (exports_to_resolve.items) |export_inst| {
|
||||
_ = try self.resolveDecl(&root_scope.base, export_inst);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn deleteDecl(self: *Module, decl: *Decl) !void {
|
||||
//std.debug.warn("deleting decl '{}'\n", .{decl.name});
|
||||
const name_hash = decl.fullyQualifiedNameHash();
|
||||
self.decl_table.removeAssertDiscard(name_hash);
|
||||
// Remove itself from its dependencies, because we are about to destroy the decl pointer.
|
||||
for (decl.dependencies.items) |dep| {
|
||||
dep.removeDependant(decl);
|
||||
if (dep.dependants.items.len == 0) {
|
||||
// We don't recursively perform a deletion here, because during the update,
|
||||
// another reference to it may turn up.
|
||||
assert(!dep.deletion_flag);
|
||||
dep.deletion_flag = true;
|
||||
try self.deletion_set.append(self.allocator, dep);
|
||||
}
|
||||
}
|
||||
// Anything that depends on this deleted decl certainly needs to be re-analyzed.
|
||||
for (decl.dependants.items) |dep| {
|
||||
dep.removeDependency(decl);
|
||||
if (dep.analysis != .outdated) {
|
||||
dep.analysis = .outdated;
|
||||
try self.work_queue.writeItem(.{ .re_analyze_decl = dep });
|
||||
}
|
||||
}
|
||||
self.deleteDeclExports(decl);
|
||||
self.bin_file.freeDecl(decl);
|
||||
decl.destroy(self.allocator);
|
||||
}
|
||||
|
||||
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
|
||||
/// this Decl will cause them to be re-created (or not).
|
||||
fn deleteDeclExports(self: *Module, decl: *Decl) void {
|
||||
const kv = self.export_owners.remove(decl) orelse return;
|
||||
|
||||
for (kv.value) |exp| {
|
||||
if (self.decl_exports.get(exp.exported_decl)) |decl_exports_kv| {
|
||||
// Remove exports with owner_decl matching the regenerating decl.
|
||||
const list = decl_exports_kv.value;
|
||||
var i: usize = 0;
|
||||
var new_len = list.len;
|
||||
while (i < new_len) {
|
||||
if (list[i].owner_decl == decl) {
|
||||
mem.copyBackwards(*Export, list[i..], list[i + 1..new_len]);
|
||||
new_len -= 1;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
decl_exports_kv.value = self.allocator.shrink(list, new_len);
|
||||
if (new_len == 0) {
|
||||
self.decl_exports.removeAssertDiscard(exp.exported_decl);
|
||||
}
|
||||
}
|
||||
|
||||
self.bin_file.deleteExport(exp.link);
|
||||
self.allocator.destroy(exp);
|
||||
}
|
||||
self.allocator.free(kv.value);
|
||||
}
|
||||
|
||||
fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
||||
// Use the Decl's arena for function memory.
|
||||
var arena = decl.typed_value.most_recent.arena.?.promote(self.allocator);
|
||||
|
@ -836,15 +992,111 @@ fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
|
|||
};
|
||||
}
|
||||
|
||||
fn resolveDecl(
|
||||
self: *Module,
|
||||
scope: *Scope,
|
||||
old_inst: *zir.Inst,
|
||||
bin_file_link: link.ElfFile.Decl,
|
||||
) InnerError!*Decl {
|
||||
fn reAnalyzeDecl(self: *Module, decl: *Decl, old_inst: *zir.Inst) InnerError!void {
|
||||
switch (decl.analysis) {
|
||||
.in_progress => unreachable,
|
||||
.dependency_failure,
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.codegen_failure_retryable,
|
||||
.complete,
|
||||
=> return,
|
||||
|
||||
.outdated => {}, // Decl re-analysis
|
||||
}
|
||||
//std.debug.warn("re-analyzing {}\n", .{decl.name});
|
||||
decl.src = old_inst.src;
|
||||
|
||||
// The exports this Decl performs will be re-discovered, so we remove them here
|
||||
// prior to re-analysis.
|
||||
self.deleteDeclExports(decl);
|
||||
// Dependencies will be re-discovered, so we remove them here prior to re-analysis.
|
||||
for (decl.dependencies.items) |dep| {
|
||||
dep.removeDependant(decl);
|
||||
if (dep.dependants.items.len == 0) {
|
||||
// We don't perform a deletion here, because this Decl or another one
|
||||
// may end up referencing it before the update is complete.
|
||||
assert(!dep.deletion_flag);
|
||||
dep.deletion_flag = true;
|
||||
try self.deletion_set.append(self.allocator, dep);
|
||||
}
|
||||
}
|
||||
decl.dependencies.shrink(self.allocator, 0);
|
||||
var decl_scope: Scope.DeclAnalysis = .{
|
||||
.decl = decl,
|
||||
.arena = std.heap.ArenaAllocator.init(self.allocator),
|
||||
};
|
||||
errdefer decl_scope.arena.deinit();
|
||||
|
||||
const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
switch (decl.analysis) {
|
||||
.in_progress => decl.analysis = .dependency_failure,
|
||||
else => {},
|
||||
}
|
||||
decl.generation = self.generation;
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
};
|
||||
const arena_state = try decl_scope.arena.allocator.create(std.heap.ArenaAllocator.State);
|
||||
arena_state.* = decl_scope.arena.state;
|
||||
|
||||
var prev_type_has_bits = false;
|
||||
var type_changed = true;
|
||||
|
||||
if (decl.typedValueManaged()) |tvm| {
|
||||
prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
|
||||
type_changed = !tvm.typed_value.ty.eql(typed_value.ty);
|
||||
|
||||
tvm.deinit(self.allocator);
|
||||
}
|
||||
decl.typed_value = .{
|
||||
.most_recent = .{
|
||||
.typed_value = typed_value,
|
||||
.arena = arena_state,
|
||||
},
|
||||
};
|
||||
decl.analysis = .complete;
|
||||
decl.generation = self.generation;
|
||||
if (typed_value.ty.hasCodeGenBits()) {
|
||||
// We don't fully codegen the decl until later, but we do need to reserve a global
|
||||
// offset table index for it. This allows us to codegen decls out of dependency order,
|
||||
// increasing how many computations can be done in parallel.
|
||||
try self.bin_file.allocateDeclIndexes(decl);
|
||||
try self.work_queue.writeItem(.{ .codegen_decl = decl });
|
||||
} else if (prev_type_has_bits) {
|
||||
self.bin_file.freeDecl(decl);
|
||||
}
|
||||
|
||||
// If the decl is a function, and the type is the same, we do not need
|
||||
// to chase the dependants.
|
||||
if (type_changed or typed_value.val.tag() != .function) {
|
||||
for (decl.dependants.items) |dep| {
|
||||
switch (dep.analysis) {
|
||||
.in_progress => unreachable,
|
||||
.outdated => continue, // already queued for update
|
||||
|
||||
.dependency_failure,
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.codegen_failure_retryable,
|
||||
.complete,
|
||||
=> if (dep.generation != self.generation) {
|
||||
dep.analysis = .outdated;
|
||||
try self.work_queue.writeItem(.{ .re_analyze_decl = dep });
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
|
||||
const hash = Decl.hashSimpleName(old_inst.name);
|
||||
if (self.decl_table.get(hash)) |kv| {
|
||||
return kv.value;
|
||||
const decl = kv.value;
|
||||
try self.reAnalyzeDecl(decl, old_inst);
|
||||
return decl;
|
||||
} else {
|
||||
const new_decl = blk: {
|
||||
try self.decl_table.ensureCapacity(self.decl_table.size + 1);
|
||||
|
@ -857,9 +1109,11 @@ fn resolveDecl(
|
|||
.scope = scope.namespace(),
|
||||
.src = old_inst.src,
|
||||
.typed_value = .{ .never_succeeded = {} },
|
||||
.analysis = .initial_in_progress,
|
||||
.analysis = .in_progress,
|
||||
.deletion_flag = false,
|
||||
.contents_hash = Decl.hashSimpleName(old_inst.contents),
|
||||
.link = bin_file_link,
|
||||
.link = link.ElfFile.TextBlock.empty,
|
||||
.generation = 0,
|
||||
};
|
||||
self.decl_table.putAssumeCapacityNoClobber(hash, new_decl);
|
||||
break :blk new_decl;
|
||||
|
@ -875,10 +1129,10 @@ fn resolveDecl(
|
|||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => {
|
||||
switch (new_decl.analysis) {
|
||||
.initial_in_progress => new_decl.analysis = .initial_dependency_failure,
|
||||
.repeat_in_progress => new_decl.analysis = .repeat_dependency_failure,
|
||||
.in_progress => new_decl.analysis = .dependency_failure,
|
||||
else => {},
|
||||
}
|
||||
new_decl.generation = self.generation;
|
||||
return error.AnalysisFail;
|
||||
},
|
||||
};
|
||||
|
@ -893,34 +1147,37 @@ fn resolveDecl(
|
|||
},
|
||||
};
|
||||
new_decl.analysis = .complete;
|
||||
new_decl.generation = self.generation;
|
||||
if (typed_value.ty.hasCodeGenBits()) {
|
||||
// We don't fully codegen the decl until later, but we do need to reserve a global
|
||||
// offset table index for it. This allows us to codegen decls out of dependency order,
|
||||
// increasing how many computations can be done in parallel.
|
||||
try self.bin_file.allocateDeclIndexes(new_decl);
|
||||
|
||||
// We ensureCapacity when scanning for decls.
|
||||
self.work_queue.writeItemAssumeCapacity(.{ .codegen_decl = new_decl });
|
||||
try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
|
||||
}
|
||||
return new_decl;
|
||||
}
|
||||
}
|
||||
|
||||
/// Declares a dependency on the decl.
|
||||
fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
|
||||
const decl = try self.resolveDecl(scope, old_inst, link.ElfFile.Decl.empty);
|
||||
const decl = try self.resolveDecl(scope, old_inst);
|
||||
switch (decl.analysis) {
|
||||
.initial_in_progress => unreachable,
|
||||
.repeat_in_progress => unreachable,
|
||||
.initial_dependency_failure,
|
||||
.repeat_dependency_failure,
|
||||
.initial_sema_failure,
|
||||
.repeat_sema_failure,
|
||||
.in_progress => unreachable,
|
||||
.outdated => unreachable,
|
||||
|
||||
.dependency_failure,
|
||||
.sema_failure,
|
||||
.codegen_failure,
|
||||
.codegen_failure_retryable,
|
||||
=> return error.AnalysisFail,
|
||||
|
||||
.complete => return decl,
|
||||
.complete => {},
|
||||
}
|
||||
if (scope.decl()) |scope_decl| {
|
||||
try self.declareDeclDependency(scope_decl, decl);
|
||||
}
|
||||
return decl;
|
||||
}
|
||||
|
||||
fn resolveInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst {
|
||||
|
@ -998,13 +1255,14 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
|
|||
const new_export = try self.allocator.create(Export);
|
||||
errdefer self.allocator.destroy(new_export);
|
||||
|
||||
const owner_decl = scope.decl();
|
||||
const owner_decl = scope.decl().?;
|
||||
|
||||
new_export.* = .{
|
||||
.options = .{ .name = symbol_name },
|
||||
.src = export_inst.base.src,
|
||||
.link = .{},
|
||||
.owner_decl = owner_decl,
|
||||
.exported_decl = exported_decl,
|
||||
.status = .in_progress,
|
||||
};
|
||||
|
||||
|
@ -1327,7 +1585,7 @@ fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError
|
|||
new_func.* = .{
|
||||
.fn_type = fn_type,
|
||||
.analysis = .{ .queued = fn_inst },
|
||||
.owner_decl = scope.decl(),
|
||||
.owner_decl = scope.decl().?,
|
||||
};
|
||||
const fn_payload = try scope.arena().create(Value.Payload.Function);
|
||||
fn_payload.* = .{ .func = new_func };
|
||||
|
@ -2024,11 +2282,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
|
|||
switch (scope.tag) {
|
||||
.decl => {
|
||||
const decl = scope.cast(Scope.DeclAnalysis).?.decl;
|
||||
switch (decl.analysis) {
|
||||
.initial_in_progress => decl.analysis = .initial_sema_failure,
|
||||
.repeat_in_progress => decl.analysis = .repeat_sema_failure,
|
||||
else => unreachable,
|
||||
}
|
||||
decl.analysis = .sema_failure;
|
||||
self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg);
|
||||
},
|
||||
.block => {
|
||||
|
|
|
@ -126,6 +126,10 @@ pub const ElfFile = struct {
|
|||
local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
|
||||
global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
|
||||
|
||||
local_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
global_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
offset_table_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
|
||||
|
||||
/// Same order as in the file. The value is the absolute vaddr value.
|
||||
/// If the vaddr of the executable program header changes, the entire
|
||||
/// offset table needs to be rewritten.
|
||||
|
@ -138,11 +142,39 @@ pub const ElfFile = struct {
|
|||
|
||||
error_flags: ErrorFlags = ErrorFlags{},
|
||||
|
||||
/// A list of text blocks that have surplus capacity. This list can have false
|
||||
/// positives, as functions grow and shrink over time, only sometimes being added
|
||||
/// or removed from the freelist.
|
||||
///
|
||||
/// A text block has surplus capacity when its overcapacity value is greater than
|
||||
/// minimum_text_block_size * alloc_num / alloc_den. That is, when it has so
|
||||
/// much extra capacity, that we could fit a small new symbol in it, itself with
|
||||
/// ideal_capacity or more.
|
||||
///
|
||||
/// Ideal capacity is defined by size * alloc_num / alloc_den.
|
||||
///
|
||||
/// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
|
||||
/// overcapacity can be negative. A simple way to have negative overcapacity is to
|
||||
/// allocate a fresh text block, which will have ideal capacity, and then grow it
|
||||
/// by 1 byte. It will then have -1 overcapacity.
|
||||
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
|
||||
last_text_block: ?*TextBlock = null,
|
||||
|
||||
/// `alloc_num / alloc_den` is the factor of padding when allocating.
|
||||
const alloc_num = 4;
|
||||
const alloc_den = 3;
|
||||
|
||||
/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
|
||||
/// it as a possible place to put new symbols, it must have enough room for this many bytes
|
||||
/// (plus extra for reserved capacity).
|
||||
const minimum_text_block_size = 64;
|
||||
const min_text_capacity = minimum_text_block_size * alloc_num / alloc_den;
|
||||
|
||||
pub const ErrorFlags = struct {
|
||||
no_entry_point_found: bool = false,
|
||||
};
|
||||
|
||||
pub const Decl = struct {
|
||||
pub const TextBlock = struct {
|
||||
/// Each decl always gets a local symbol with the fully qualified name.
|
||||
/// The vaddr and size are found here directly.
|
||||
/// The file offset is found by computing the vaddr offset from the section vaddr
|
||||
|
@ -152,11 +184,43 @@ pub const ElfFile = struct {
|
|||
local_sym_index: u32,
|
||||
/// This field is undefined for symbols with size = 0.
|
||||
offset_table_index: u32,
|
||||
/// Points to the previous and next neighbors, based on the `text_offset`.
|
||||
/// This can be used to find, for example, the capacity of this `TextBlock`.
|
||||
prev: ?*TextBlock,
|
||||
next: ?*TextBlock,
|
||||
|
||||
pub const empty = Decl{
|
||||
pub const empty = TextBlock{
|
||||
.local_sym_index = 0,
|
||||
.offset_table_index = undefined,
|
||||
.prev = null,
|
||||
.next = null,
|
||||
};
|
||||
|
||||
/// Returns how much room there is to grow in virtual address space.
|
||||
/// File offset relocation happens transparently, so it is not included in
|
||||
/// this calculation.
|
||||
fn capacity(self: TextBlock, elf_file: ElfFile) u64 {
|
||||
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
|
||||
if (self.next) |next| {
|
||||
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
|
||||
return next_sym.st_value - self_sym.st_value;
|
||||
} else {
|
||||
// We are the last block. The capacity is limited only by virtual address space.
|
||||
return std.math.maxInt(u32) - self_sym.st_value;
|
||||
}
|
||||
}
|
||||
|
||||
fn freeListEligible(self: TextBlock, elf_file: ElfFile) bool {
|
||||
// No need to keep a free list node for the last block.
|
||||
const next = self.next orelse return false;
|
||||
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
|
||||
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
|
||||
const cap = next_sym.st_value - self_sym.st_value;
|
||||
const ideal_cap = self_sym.st_size * alloc_num / alloc_den;
|
||||
if (cap <= ideal_cap) return false;
|
||||
const surplus = cap - ideal_cap;
|
||||
return surplus >= min_text_capacity;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Export = struct {
|
||||
|
@ -169,6 +233,10 @@ pub const ElfFile = struct {
|
|||
self.shstrtab.deinit(self.allocator);
|
||||
self.local_symbols.deinit(self.allocator);
|
||||
self.global_symbols.deinit(self.allocator);
|
||||
self.global_symbol_free_list.deinit(self.allocator);
|
||||
self.local_symbol_free_list.deinit(self.allocator);
|
||||
self.offset_table_free_list.deinit(self.allocator);
|
||||
self.text_block_free_list.deinit(self.allocator);
|
||||
self.offset_table.deinit(self.allocator);
|
||||
if (self.owns_file_handle) {
|
||||
if (self.file) |f| f.close();
|
||||
|
@ -193,10 +261,6 @@ pub const ElfFile = struct {
|
|||
});
|
||||
}
|
||||
|
||||
// `alloc_num / alloc_den` is the factor of padding when allocation
|
||||
const alloc_num = 4;
|
||||
const alloc_den = 3;
|
||||
|
||||
/// Returns end pos of collision, if any.
|
||||
fn detectAllocCollision(self: *ElfFile, start: u64, size: u64) ?u64 {
|
||||
const small_ptr = self.options.target.cpu.arch.ptrBitWidth() == 32;
|
||||
|
@ -448,6 +512,13 @@ pub const ElfFile = struct {
|
|||
self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
{
|
||||
// Iterate over symbols, populating free_list and last_text_block.
|
||||
if (self.local_symbols.items.len != 1) {
|
||||
@panic("TODO implement setting up free_list and last_text_block from existing ELF file");
|
||||
}
|
||||
// We are starting with an empty file. The default values are correct, null and empty list.
|
||||
}
|
||||
}
|
||||
|
||||
/// Commit pending changes and write headers.
|
||||
|
@ -577,7 +648,6 @@ pub const ElfFile = struct {
|
|||
self.error_flags.no_entry_point_found = false;
|
||||
try self.writeElfHeader();
|
||||
}
|
||||
// TODO find end pos and truncate
|
||||
|
||||
// The point of flush() is to commit changes, so nothing should be dirty after this.
|
||||
assert(!self.phdr_table_dirty);
|
||||
|
@ -709,100 +779,222 @@ pub const ElfFile = struct {
|
|||
try self.file.?.pwriteAll(hdr_buf[0..index], 0);
|
||||
}
|
||||
|
||||
const AllocatedBlock = struct {
|
||||
vaddr: u64,
|
||||
file_offset: u64,
|
||||
size_capacity: u64,
|
||||
};
|
||||
|
||||
fn allocateTextBlock(self: *ElfFile, new_block_size: u64, alignment: u64) !AllocatedBlock {
|
||||
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
|
||||
const shdr = &self.sections.items[self.text_section_index.?];
|
||||
|
||||
// TODO Also detect virtual address collisions.
|
||||
const text_capacity = self.allocatedSize(shdr.sh_offset);
|
||||
// TODO instead of looping here, maintain a free list and a pointer to the end.
|
||||
var last_start: u64 = phdr.p_vaddr;
|
||||
var last_size: u64 = 0;
|
||||
for (self.local_symbols.items) |sym| {
|
||||
if (sym.st_value + sym.st_size > last_start + last_size) {
|
||||
last_start = sym.st_value;
|
||||
last_size = sym.st_size;
|
||||
fn freeTextBlock(self: *ElfFile, text_block: *TextBlock) void {
|
||||
var already_have_free_list_node = false;
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < self.text_block_free_list.items.len) {
|
||||
if (self.text_block_free_list.items[i] == text_block) {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
continue;
|
||||
}
|
||||
if (self.text_block_free_list.items[i] == text_block.prev) {
|
||||
already_have_free_list_node = true;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
const end_vaddr = last_start + (last_size * alloc_num / alloc_den);
|
||||
const aligned_start_vaddr = mem.alignForwardGeneric(u64, end_vaddr, alignment);
|
||||
const needed_size = (aligned_start_vaddr + new_block_size) - phdr.p_vaddr;
|
||||
if (needed_size > text_capacity) {
|
||||
// Must move the entire text section.
|
||||
const new_offset = self.findFreeSpace(needed_size, 0x1000);
|
||||
const text_size = (last_start + last_size) - phdr.p_vaddr;
|
||||
const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, text_size);
|
||||
if (amt != text_size) return error.InputOutput;
|
||||
shdr.sh_offset = new_offset;
|
||||
phdr.p_offset = new_offset;
|
||||
|
||||
if (self.last_text_block == text_block) {
|
||||
// TODO shrink the .text section size here
|
||||
self.last_text_block = text_block.prev;
|
||||
}
|
||||
// Now that we know the code size, we need to update the program header for executable code
|
||||
shdr.sh_size = needed_size;
|
||||
phdr.p_memsz = needed_size;
|
||||
phdr.p_filesz = needed_size;
|
||||
|
||||
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
|
||||
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
|
||||
if (text_block.prev) |prev| {
|
||||
prev.next = text_block.next;
|
||||
|
||||
return AllocatedBlock{
|
||||
.vaddr = aligned_start_vaddr,
|
||||
.file_offset = shdr.sh_offset + (aligned_start_vaddr - phdr.p_vaddr),
|
||||
.size_capacity = text_capacity - needed_size,
|
||||
};
|
||||
if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
|
||||
// The free list is heuristics, it doesn't have to be perfect, so we can
|
||||
// ignore the OOM here.
|
||||
self.text_block_free_list.append(self.allocator, prev) catch {};
|
||||
}
|
||||
} else {
|
||||
text_block.prev = null;
|
||||
}
|
||||
|
||||
if (text_block.next) |next| {
|
||||
next.prev = text_block.prev;
|
||||
} else {
|
||||
text_block.next = null;
|
||||
}
|
||||
}
|
||||
|
||||
fn findAllocatedTextBlock(self: *ElfFile, sym: elf.Elf64_Sym) AllocatedBlock {
|
||||
fn shrinkTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64) void {
|
||||
// TODO check the new capacity, and if it crosses the size threshold into a big enough
|
||||
// capacity, insert a free list node for it.
|
||||
}
|
||||
|
||||
fn growTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
|
||||
const sym = self.local_symbols.items[text_block.local_sym_index];
|
||||
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
|
||||
const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
|
||||
if (!need_realloc) return sym.st_value;
|
||||
return self.allocateTextBlock(text_block, new_block_size, alignment);
|
||||
}
|
||||
|
||||
fn allocateTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
|
||||
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
|
||||
const shdr = &self.sections.items[self.text_section_index.?];
|
||||
const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den;
|
||||
|
||||
// Find the next sym after this one.
|
||||
// TODO look into using a hash map to speed up perf.
|
||||
const text_capacity = self.allocatedSize(shdr.sh_offset);
|
||||
var next_vaddr_start = phdr.p_vaddr + text_capacity;
|
||||
for (self.local_symbols.items) |elem| {
|
||||
if (elem.st_value < sym.st_value) continue;
|
||||
if (elem.st_value < next_vaddr_start) next_vaddr_start = elem.st_value;
|
||||
}
|
||||
return .{
|
||||
.vaddr = sym.st_value,
|
||||
.file_offset = shdr.sh_offset + (sym.st_value - phdr.p_vaddr),
|
||||
.size_capacity = next_vaddr_start - sym.st_value,
|
||||
// We use these to indicate our intention to update metadata, placing the new block,
|
||||
// and possibly removing a free list node.
|
||||
// It would be simpler to do it inside the for loop below, but that would cause a
|
||||
// problem if an error was returned later in the function. So this action
|
||||
// is actually carried out at the end of the function, when errors are no longer possible.
|
||||
var block_placement: ?*TextBlock = null;
|
||||
var free_list_removal: ?usize = null;
|
||||
|
||||
// First we look for an appropriately sized free list node.
|
||||
// The list is unordered. We'll just take the first thing that works.
|
||||
const vaddr = blk: {
|
||||
var i: usize = 0;
|
||||
while (i < self.text_block_free_list.items.len) {
|
||||
const big_block = self.text_block_free_list.items[i];
|
||||
// We now have a pointer to a live text block that has too much capacity.
|
||||
// Is it enough that we could fit this new text block?
|
||||
const sym = self.local_symbols.items[big_block.local_sym_index];
|
||||
const capacity = big_block.capacity(self.*);
|
||||
const ideal_capacity = capacity * alloc_num / alloc_den;
|
||||
const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
|
||||
const capacity_end_vaddr = sym.st_value + capacity;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
|
||||
const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the block that it points to has grown to take up
|
||||
// more of the extra capacity.
|
||||
if (!big_block.freeListEligible(self.*)) {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// At this point we know that we will place the new block here. But the
|
||||
// remaining question is whether there is still yet enough capacity left
|
||||
// over for there to still be a free list node.
|
||||
const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
|
||||
const keep_free_list_node = remaining_capacity >= min_text_capacity;
|
||||
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
block_placement = big_block;
|
||||
if (!keep_free_list_node) {
|
||||
free_list_removal = i;
|
||||
}
|
||||
break :blk new_start_vaddr;
|
||||
} else if (self.last_text_block) |last| {
|
||||
const sym = self.local_symbols.items[last.local_sym_index];
|
||||
const ideal_capacity = sym.st_size * alloc_num / alloc_den;
|
||||
const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
|
||||
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
block_placement = last;
|
||||
break :blk new_start_vaddr;
|
||||
} else {
|
||||
break :blk phdr.p_vaddr;
|
||||
}
|
||||
};
|
||||
|
||||
const expand_text_section = block_placement == null or block_placement.?.next == null;
|
||||
if (expand_text_section) {
|
||||
const text_capacity = self.allocatedSize(shdr.sh_offset);
|
||||
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
|
||||
if (needed_size > text_capacity) {
|
||||
// Must move the entire text section.
|
||||
const new_offset = self.findFreeSpace(needed_size, 0x1000);
|
||||
const text_size = if (self.last_text_block) |last| blk: {
|
||||
const sym = self.local_symbols.items[last.local_sym_index];
|
||||
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
|
||||
} else 0;
|
||||
const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, text_size);
|
||||
if (amt != text_size) return error.InputOutput;
|
||||
shdr.sh_offset = new_offset;
|
||||
phdr.p_offset = new_offset;
|
||||
}
|
||||
self.last_text_block = text_block;
|
||||
|
||||
shdr.sh_size = needed_size;
|
||||
phdr.p_memsz = needed_size;
|
||||
phdr.p_filesz = needed_size;
|
||||
|
||||
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
|
||||
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
|
||||
}
|
||||
|
||||
// This function can also reallocate a text block.
|
||||
// In this case we need to "unplug" it from its previous location before
|
||||
// plugging it in to its new location.
|
||||
if (text_block.prev) |prev| {
|
||||
prev.next = text_block.next;
|
||||
}
|
||||
if (text_block.next) |next| {
|
||||
next.prev = text_block.prev;
|
||||
}
|
||||
|
||||
if (block_placement) |big_block| {
|
||||
text_block.prev = big_block;
|
||||
text_block.next = big_block.next;
|
||||
big_block.next = text_block;
|
||||
} else {
|
||||
text_block.prev = null;
|
||||
text_block.next = null;
|
||||
}
|
||||
if (free_list_removal) |i| {
|
||||
_ = self.text_block_free_list.swapRemove(i);
|
||||
}
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
pub fn allocateDeclIndexes(self: *ElfFile, decl: *Module.Decl) !void {
|
||||
if (decl.link.local_sym_index != 0) return;
|
||||
|
||||
// Here we also ensure capacity for the free lists so that they can be appended to without fail.
|
||||
try self.local_symbols.ensureCapacity(self.allocator, self.local_symbols.items.len + 1);
|
||||
try self.local_symbol_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
|
||||
try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
|
||||
const local_sym_index = self.local_symbols.items.len;
|
||||
const offset_table_index = self.offset_table.items.len;
|
||||
try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
|
||||
|
||||
if (self.local_symbol_free_list.popOrNull()) |i| {
|
||||
//std.debug.warn("reusing symbol index {} for {}\n", .{i, decl.name});
|
||||
decl.link.local_sym_index = i;
|
||||
} else {
|
||||
//std.debug.warn("allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name});
|
||||
decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len);
|
||||
_ = self.local_symbols.addOneAssumeCapacity();
|
||||
}
|
||||
|
||||
if (self.offset_table_free_list.popOrNull()) |i| {
|
||||
decl.link.offset_table_index = i;
|
||||
} else {
|
||||
decl.link.offset_table_index = @intCast(u32, self.offset_table.items.len);
|
||||
_ = self.offset_table.addOneAssumeCapacity();
|
||||
self.offset_table_count_dirty = true;
|
||||
}
|
||||
|
||||
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
|
||||
|
||||
self.local_symbols.appendAssumeCapacity(.{
|
||||
self.local_symbols.items[decl.link.local_sym_index] = .{
|
||||
.st_name = 0,
|
||||
.st_info = 0,
|
||||
.st_other = 0,
|
||||
.st_shndx = 0,
|
||||
.st_value = phdr.p_vaddr,
|
||||
.st_size = 0,
|
||||
});
|
||||
errdefer self.local_symbols.shrink(self.allocator, self.local_symbols.items.len - 1);
|
||||
self.offset_table.appendAssumeCapacity(0);
|
||||
errdefer self.offset_table.shrink(self.allocator, self.offset_table.items.len - 1);
|
||||
|
||||
self.offset_table_count_dirty = true;
|
||||
|
||||
decl.link = .{
|
||||
.local_sym_index = @intCast(u32, local_sym_index),
|
||||
.offset_table_index = @intCast(u32, offset_table_index),
|
||||
};
|
||||
self.offset_table.items[decl.link.offset_table_index] = 0;
|
||||
}
|
||||
|
||||
pub fn freeDecl(self: *ElfFile, decl: *Module.Decl) void {
|
||||
self.freeTextBlock(&decl.link);
|
||||
if (decl.link.local_sym_index != 0) {
|
||||
self.local_symbol_free_list.appendAssumeCapacity(decl.link.local_sym_index);
|
||||
self.offset_table_free_list.appendAssumeCapacity(decl.link.offset_table_index);
|
||||
|
||||
self.local_symbols.items[decl.link.local_sym_index].st_info = 0;
|
||||
|
||||
decl.link.local_sym_index = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDecl(self: *ElfFile, module: *Module, decl: *Module.Decl) !void {
|
||||
|
@ -822,80 +1014,60 @@ pub const ElfFile = struct {
|
|||
|
||||
const required_alignment = typed_value.ty.abiAlignment(self.options.target);
|
||||
|
||||
const file_offset = blk: {
|
||||
const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
|
||||
.Fn => elf.STT_FUNC,
|
||||
else => elf.STT_OBJECT,
|
||||
};
|
||||
|
||||
if (decl.link.local_sym_index != 0) {
|
||||
const local_sym = &self.local_symbols.items[decl.link.local_sym_index];
|
||||
const existing_block = self.findAllocatedTextBlock(local_sym.*);
|
||||
const need_realloc = local_sym.st_size == 0 or
|
||||
code.len > existing_block.size_capacity or
|
||||
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
|
||||
// TODO check for collision with another symbol
|
||||
const file_offset = if (need_realloc) fo: {
|
||||
const new_block = try self.allocateTextBlock(code.len, required_alignment);
|
||||
local_sym.st_value = new_block.vaddr;
|
||||
self.offset_table.items[decl.link.offset_table_index] = new_block.vaddr;
|
||||
|
||||
//std.debug.warn("{}: writing got index {}=0x{x}\n", .{
|
||||
// decl.name,
|
||||
// decl.link.offset_table_index,
|
||||
// self.offset_table.items[decl.link.offset_table_index],
|
||||
//});
|
||||
try self.writeOffsetTableEntry(decl.link.offset_table_index);
|
||||
|
||||
break :fo new_block.file_offset;
|
||||
} else existing_block.file_offset;
|
||||
local_sym.st_size = code.len;
|
||||
local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
|
||||
local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
|
||||
local_sym.st_other = 0;
|
||||
local_sym.st_shndx = self.text_section_index.?;
|
||||
// TODO this write could be avoided if no fields of the symbol were changed.
|
||||
try self.writeSymbol(decl.link.local_sym_index);
|
||||
|
||||
//std.debug.warn("updating {} at vaddr 0x{x}\n", .{ decl.name, local_sym.st_value });
|
||||
break :blk file_offset;
|
||||
} else {
|
||||
try self.local_symbols.ensureCapacity(self.allocator, self.local_symbols.items.len + 1);
|
||||
try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
|
||||
const decl_name = mem.spanZ(decl.name);
|
||||
const name_str_index = try self.makeString(decl_name);
|
||||
const new_block = try self.allocateTextBlock(code.len, required_alignment);
|
||||
const local_sym_index = self.local_symbols.items.len;
|
||||
const offset_table_index = self.offset_table.items.len;
|
||||
|
||||
//std.debug.warn("add symbol for {} at vaddr 0x{x}, size {}\n", .{ decl.name, new_block.vaddr, code.len });
|
||||
self.local_symbols.appendAssumeCapacity(.{
|
||||
.st_name = name_str_index,
|
||||
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
|
||||
.st_other = 0,
|
||||
.st_shndx = self.text_section_index.?,
|
||||
.st_value = new_block.vaddr,
|
||||
.st_size = code.len,
|
||||
});
|
||||
errdefer self.local_symbols.shrink(self.allocator, self.local_symbols.items.len - 1);
|
||||
self.offset_table.appendAssumeCapacity(new_block.vaddr);
|
||||
errdefer self.offset_table.shrink(self.allocator, self.offset_table.items.len - 1);
|
||||
|
||||
self.offset_table_count_dirty = true;
|
||||
|
||||
try self.writeSymbol(local_sym_index);
|
||||
try self.writeOffsetTableEntry(offset_table_index);
|
||||
|
||||
decl.link = .{
|
||||
.local_sym_index = @intCast(u32, local_sym_index),
|
||||
.offset_table_index = @intCast(u32, offset_table_index),
|
||||
};
|
||||
|
||||
//std.debug.warn("writing new {} at vaddr 0x{x}\n", .{ decl.name, new_block.vaddr });
|
||||
break :blk new_block.file_offset;
|
||||
}
|
||||
const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
|
||||
.Fn => elf.STT_FUNC,
|
||||
else => elf.STT_OBJECT,
|
||||
};
|
||||
|
||||
assert(decl.link.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
|
||||
const local_sym = &self.local_symbols.items[decl.link.local_sym_index];
|
||||
if (local_sym.st_size != 0) {
|
||||
const capacity = decl.link.capacity(self.*);
|
||||
const need_realloc = code.len > capacity or
|
||||
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
|
||||
if (need_realloc) {
|
||||
const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
|
||||
//std.debug.warn("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
|
||||
if (vaddr != local_sym.st_value) {
|
||||
local_sym.st_value = vaddr;
|
||||
|
||||
//std.debug.warn(" (writing new offset table entry)\n", .{});
|
||||
self.offset_table.items[decl.link.offset_table_index] = vaddr;
|
||||
try self.writeOffsetTableEntry(decl.link.offset_table_index);
|
||||
}
|
||||
} else if (code.len < local_sym.st_size) {
|
||||
self.shrinkTextBlock(&decl.link, code.len);
|
||||
}
|
||||
local_sym.st_size = code.len;
|
||||
local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
|
||||
local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
|
||||
local_sym.st_other = 0;
|
||||
local_sym.st_shndx = self.text_section_index.?;
|
||||
// TODO this write could be avoided if no fields of the symbol were changed.
|
||||
try self.writeSymbol(decl.link.local_sym_index);
|
||||
} else {
|
||||
const decl_name = mem.spanZ(decl.name);
|
||||
const name_str_index = try self.makeString(decl_name);
|
||||
const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
|
||||
//std.debug.warn("allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
|
||||
errdefer self.freeTextBlock(&decl.link);
|
||||
|
||||
local_sym.* = .{
|
||||
.st_name = name_str_index,
|
||||
.st_info = (elf.STB_LOCAL << 4) | stt_bits,
|
||||
.st_other = 0,
|
||||
.st_shndx = self.text_section_index.?,
|
||||
.st_value = vaddr,
|
||||
.st_size = code.len,
|
||||
};
|
||||
self.offset_table.items[decl.link.offset_table_index] = vaddr;
|
||||
|
||||
try self.writeSymbol(decl.link.local_sym_index);
|
||||
try self.writeOffsetTableEntry(decl.link.offset_table_index);
|
||||
}
|
||||
|
||||
const section_offset = local_sym.st_value - self.program_headers.items[self.phdr_load_re_index.?].p_vaddr;
|
||||
const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
|
||||
try self.file.?.pwriteAll(code, file_offset);
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
|
||||
|
@ -910,7 +1082,10 @@ pub const ElfFile = struct {
|
|||
decl: *const Module.Decl,
|
||||
exports: []const *Module.Export,
|
||||
) !void {
|
||||
// In addition to ensuring capacity for global_symbols, we also ensure capacity for freeing all of
|
||||
// them, so that deleting exports is guaranteed to succeed.
|
||||
try self.global_symbols.ensureCapacity(self.allocator, self.global_symbols.items.len + exports.len);
|
||||
try self.global_symbol_free_list.ensureCapacity(self.allocator, self.global_symbols.items.len);
|
||||
const typed_value = decl.typed_value.most_recent.typed_value;
|
||||
if (decl.link.local_sym_index == 0) return;
|
||||
const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
|
||||
|
@ -957,22 +1132,30 @@ pub const ElfFile = struct {
|
|||
};
|
||||
} else {
|
||||
const name = try self.makeString(exp.options.name);
|
||||
const i = self.global_symbols.items.len;
|
||||
self.global_symbols.appendAssumeCapacity(.{
|
||||
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
|
||||
_ = self.global_symbols.addOneAssumeCapacity();
|
||||
break :blk self.global_symbols.items.len - 1;
|
||||
};
|
||||
self.global_symbols.items[i] = .{
|
||||
.st_name = name,
|
||||
.st_info = (stb_bits << 4) | stt_bits,
|
||||
.st_other = 0,
|
||||
.st_shndx = self.text_section_index.?,
|
||||
.st_value = decl_sym.st_value,
|
||||
.st_size = decl_sym.st_size,
|
||||
});
|
||||
errdefer self.global_symbols.shrink(self.allocator, self.global_symbols.items.len - 1);
|
||||
};
|
||||
|
||||
exp.link.sym_index = @intCast(u32, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deleteExport(self: *ElfFile, exp: Export) void {
|
||||
const sym_index = exp.sym_index orelse return;
|
||||
self.global_symbol_free_list.appendAssumeCapacity(sym_index);
|
||||
self.global_symbols.items[sym_index].st_info = 0;
|
||||
}
|
||||
|
||||
fn writeProgHeader(self: *ElfFile, index: usize) !void {
|
||||
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
|
||||
const offset = self.program_headers.items[index].p_offset;
|
||||
|
|
|
@ -92,13 +92,13 @@ pub const Type = extern union {
|
|||
return @fieldParentPtr(T, "base", self.ptr_otherwise);
|
||||
}
|
||||
|
||||
pub fn eql(self: Type, other: Type) bool {
|
||||
//std.debug.warn("test {} == {}\n", .{ self, other });
|
||||
pub fn eql(a: Type, b: Type) bool {
|
||||
//std.debug.warn("test {} == {}\n", .{ a, b });
|
||||
// As a shortcut, if the small tags / addresses match, we're done.
|
||||
if (self.tag_if_small_enough == other.tag_if_small_enough)
|
||||
if (a.tag_if_small_enough == b.tag_if_small_enough)
|
||||
return true;
|
||||
const zig_tag_a = self.zigTypeTag();
|
||||
const zig_tag_b = self.zigTypeTag();
|
||||
const zig_tag_a = a.zigTypeTag();
|
||||
const zig_tag_b = b.zigTypeTag();
|
||||
if (zig_tag_a != zig_tag_b)
|
||||
return false;
|
||||
switch (zig_tag_a) {
|
||||
|
@ -111,24 +111,40 @@ pub const Type = extern union {
|
|||
.Undefined => return true,
|
||||
.Null => return true,
|
||||
.Pointer => {
|
||||
const is_slice_a = isSlice(self);
|
||||
const is_slice_b = isSlice(other);
|
||||
const is_slice_a = isSlice(a);
|
||||
const is_slice_b = isSlice(b);
|
||||
if (is_slice_a != is_slice_b)
|
||||
return false;
|
||||
@panic("TODO implement more pointer Type equality comparison");
|
||||
},
|
||||
.Int => {
|
||||
if (self.tag() != other.tag()) {
|
||||
if (a.tag() != b.tag()) {
|
||||
// Detect that e.g. u64 != usize, even if the bits match on a particular target.
|
||||
return false;
|
||||
}
|
||||
// The target will not be branched upon, because we handled target-dependent cases above.
|
||||
const info_a = self.intInfo(@as(Target, undefined));
|
||||
const info_b = self.intInfo(@as(Target, undefined));
|
||||
const info_a = a.intInfo(@as(Target, undefined));
|
||||
const info_b = b.intInfo(@as(Target, undefined));
|
||||
return info_a.signed == info_b.signed and info_a.bits == info_b.bits;
|
||||
},
|
||||
.Array => {
|
||||
if (a.arrayLen() != b.arrayLen())
|
||||
return false;
|
||||
if (a.elemType().eql(b.elemType()))
|
||||
return false;
|
||||
const sentinel_a = a.arraySentinel();
|
||||
const sentinel_b = b.arraySentinel();
|
||||
if (sentinel_a) |sa| {
|
||||
if (sentinel_b) |sb| {
|
||||
return sa.eql(sb);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return sentinel_b == null;
|
||||
}
|
||||
},
|
||||
.Float,
|
||||
.Array,
|
||||
.Struct,
|
||||
.Optional,
|
||||
.ErrorUnion,
|
||||
|
|
|
@ -666,6 +666,11 @@ pub const Value = extern union {
|
|||
return orderAgainstZero(lhs).compare(op);
|
||||
}
|
||||
|
||||
pub fn eql(a: Value, b: Value) bool {
|
||||
// TODO non numerical comparisons
|
||||
return compare(a, .eq, b);
|
||||
}
|
||||
|
||||
pub fn toBool(self: Value) bool {
|
||||
return switch (self.tag()) {
|
||||
.bool_true => true,
|
||||
|
|
|
@ -442,6 +442,16 @@ pub const Module = struct {
|
|||
|
||||
const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Module.Body });
|
||||
|
||||
/// TODO Look into making a table to speed this up.
|
||||
pub fn findDecl(self: Module, name: []const u8) ?*Inst {
|
||||
for (self.decls) |decl| {
|
||||
if (mem.eql(u8, decl.name, name)) {
|
||||
return decl;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/// The allocator is used for temporary storage, but this function always returns
|
||||
/// with no resources allocated.
|
||||
pub fn writeToStream(self: Module, allocator: *Allocator, stream: var) !void {
|
||||
|
|
|
@ -200,6 +200,73 @@ pub fn addCases(ctx: *TestContext) void {
|
|||
\\@9 = str("_start")
|
||||
\\@10 = ref(@9)
|
||||
\\@11 = export(@10, @start)
|
||||
,
|
||||
\\@noreturn = primitive(noreturn)
|
||||
\\@void = primitive(void)
|
||||
\\@usize = primitive(usize)
|
||||
\\@0 = int(0)
|
||||
\\@1 = int(1)
|
||||
\\@2 = int(2)
|
||||
\\@3 = int(3)
|
||||
\\
|
||||
\\@syscall_array = str("syscall")
|
||||
\\@sysoutreg_array = str("={rax}")
|
||||
\\@rax_array = str("{rax}")
|
||||
\\@rdi_array = str("{rdi}")
|
||||
\\@rcx_array = str("rcx")
|
||||
\\@r11_array = str("r11")
|
||||
\\@rdx_array = str("{rdx}")
|
||||
\\@rsi_array = str("{rsi}")
|
||||
\\@memory_array = str("memory")
|
||||
\\@len_array = str("len")
|
||||
\\
|
||||
\\@msg = str("Hello, world!\n")
|
||||
\\@msg2 = str("Editing the same msg2 decl but this time with a much longer message which will\ncause the data to need to be relocated in virtual address space.\n")
|
||||
\\
|
||||
\\@start_fnty = fntype([], @noreturn, cc=Naked)
|
||||
\\@start = fn(@start_fnty, {
|
||||
\\ %SYS_exit_group = int(231)
|
||||
\\ %exit_code = as(@usize, @0)
|
||||
\\
|
||||
\\ %syscall = ref(@syscall_array)
|
||||
\\ %sysoutreg = ref(@sysoutreg_array)
|
||||
\\ %rax = ref(@rax_array)
|
||||
\\ %rdi = ref(@rdi_array)
|
||||
\\ %rcx = ref(@rcx_array)
|
||||
\\ %rdx = ref(@rdx_array)
|
||||
\\ %rsi = ref(@rsi_array)
|
||||
\\ %r11 = ref(@r11_array)
|
||||
\\ %memory = ref(@memory_array)
|
||||
\\
|
||||
\\ %SYS_write = as(@usize, @1)
|
||||
\\ %STDOUT_FILENO = as(@usize, @1)
|
||||
\\
|
||||
\\ %msg_ptr = ref(@msg2)
|
||||
\\ %msg_addr = ptrtoint(%msg_ptr)
|
||||
\\
|
||||
\\ %len_name = ref(@len_array)
|
||||
\\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name)
|
||||
\\ %msg_len = deref(%msg_len_ptr)
|
||||
\\ %rc_write = asm(%syscall, @usize,
|
||||
\\ volatile=1,
|
||||
\\ output=%sysoutreg,
|
||||
\\ inputs=[%rax, %rdi, %rsi, %rdx],
|
||||
\\ clobbers=[%rcx, %r11, %memory],
|
||||
\\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len])
|
||||
\\
|
||||
\\ %rc_exit = asm(%syscall, @usize,
|
||||
\\ volatile=1,
|
||||
\\ output=%sysoutreg,
|
||||
\\ inputs=[%rax, %rdi],
|
||||
\\ clobbers=[%rcx, %r11, %memory],
|
||||
\\ args=[%SYS_exit_group, %exit_code])
|
||||
\\
|
||||
\\ %99 = unreachable()
|
||||
\\});
|
||||
\\
|
||||
\\@9 = str("_start")
|
||||
\\@10 = ref(@9)
|
||||
\\@11 = export(@10, @start)
|
||||
},
|
||||
&[_][]const u8{
|
||||
\\Hello, world!
|
||||
|
@ -207,6 +274,10 @@ pub fn addCases(ctx: *TestContext) void {
|
|||
,
|
||||
\\HELL WORLD
|
||||
\\
|
||||
,
|
||||
\\Editing the same msg2 decl but this time with a much longer message which will
|
||||
\\cause the data to need to be relocated in virtual address space.
|
||||
\\
|
||||
},
|
||||
);
|
||||
|
||||
|
|
Loading…
Reference in New Issue