stage2: first pass at recursive dependency resolution

master
Andrew Kelley 2020-05-28 12:19:00 -04:00
parent c7ca1fe6f7
commit 3eed7a4dea
4 changed files with 290 additions and 129 deletions

View File

@ -128,6 +128,9 @@ pub const Decl = struct {
/// Completed successfully before; the `typed_value.most_recent` can be accessed, and
/// new semantic analysis is in progress.
repeat_in_progress,
/// Failed before; the `typed_value.most_recent` is not available, and
/// new semantic analysis is in progress.
repeat_in_progress_novalue,
/// Everything is done and updated.
complete,
},
@ -136,18 +139,24 @@ pub const Decl = struct {
/// This is populated regardless of semantic analysis and code generation.
link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty,
contents_hash: Hash,
/// The shallow set of other decls whose typed_value could possibly change if this Decl's
/// typed_value is modified.
/// TODO look into using a lightweight map/set data structure rather than a linear array.
dependants: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
contents_hash: Hash,
/// The shallow set of other decls whose typed_value changing indicates that this Decl's
/// typed_value may need to be regenerated.
/// TODO look into using a lightweight map/set data structure rather than a linear array.
dependencies: ArrayListUnmanaged(*Decl) = ArrayListUnmanaged(*Decl){},
pub fn destroy(self: *Decl, allocator: *Allocator) void {
allocator.free(mem.spanZ(self.name));
if (self.typedValueManaged()) |tvm| {
tvm.deinit(allocator);
}
self.dependants.deinit(allocator);
self.dependencies.deinit(allocator);
allocator.destroy(self);
}
@ -204,6 +213,7 @@ pub const Decl = struct {
.initial_in_progress,
.initial_dependency_failure,
.initial_sema_failure,
.repeat_in_progress_novalue,
=> return null,
.codegen_failure,
.codegen_failure_retryable,
@ -214,6 +224,31 @@ pub const Decl = struct {
=> return &self.typed_value.most_recent,
}
}
fn flagForRegeneration(self: *Decl) void {
if (self.typedValueManaged() == null) {
self.analysis = .repeat_in_progress_novalue;
} else {
self.analysis = .repeat_in_progress;
}
}
fn isFlaggedForRegeneration(self: *Decl) bool {
return switch (self.analysis) {
.repeat_in_progress, .repeat_in_progress_novalue => true,
else => false,
};
}
fn removeDependant(self: *Decl, other: *Decl) void {
for (self.dependants.items) |item, i| {
if (item == other) {
_ = self.dependants.swapRemove(i);
return;
}
}
unreachable;
}
};
/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
@ -266,12 +301,12 @@ pub const Scope = struct {
/// Asserts the scope has a parent which is a DeclAnalysis and
/// returns the Decl.
pub fn decl(self: *Scope) *Decl {
switch (self.tag) {
.block => return self.cast(Block).?.decl,
.decl => return self.cast(DeclAnalysis).?.decl,
.zir_module => unreachable,
}
pub fn decl(self: *Scope) ?*Decl {
return switch (self.tag) {
.block => self.cast(Block).?.decl,
.decl => self.cast(DeclAnalysis).?.decl,
.zir_module => null,
};
}
/// Asserts the scope has a parent which is a ZIRModule and
@ -517,11 +552,7 @@ pub fn deinit(self: *Module) void {
{
var it = self.export_owners.iterator();
while (it.next()) |kv| {
const export_list = kv.value;
for (export_list) |exp| {
allocator.destroy(exp);
}
allocator.free(export_list);
freeExportList(allocator, kv.value);
}
self.export_owners.deinit();
}
@ -532,6 +563,13 @@ pub fn deinit(self: *Module) void {
self.* = undefined;
}
fn freeExportList(allocator: *Allocator, export_list: []*Export) void {
for (export_list) |exp| {
allocator.destroy(exp);
}
allocator.free(export_list);
}
pub fn target(self: Module) std.Target {
return self.bin_file.options.target;
}
@ -634,9 +672,9 @@ const InnerError = error{ OutOfMemory, AnalysisFail };
pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
while (self.work_queue.readItem()) |work_item| switch (work_item) {
.codegen_decl => |decl| switch (decl.analysis) {
.initial_in_progress,
.repeat_in_progress,
=> unreachable,
.initial_in_progress => unreachable,
.repeat_in_progress => unreachable,
.repeat_in_progress_novalue => unreachable,
.initial_sema_failure,
.repeat_sema_failure,
@ -686,6 +724,23 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
};
}
fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
try depender.dependencies.ensureCapacity(self.allocator, depender.dependencies.items.len + 1);
try dependee.dependants.ensureCapacity(self.allocator, dependee.dependants.items.len + 1);
for (depender.dependencies.items) |item| {
if (item == dependee) break; // Already in the set.
} else {
depender.dependencies.appendAssumeCapacity(dependee);
}
for (dependee.dependants.items) |item| {
if (item == depender) break; // Already in the set.
} else {
dependee.dependants.appendAssumeCapacity(depender);
}
}
fn getSource(self: *Module, root_scope: *Scope.ZIRModule) ![:0]const u8 {
switch (root_scope.source) {
.unloaded => {
@ -772,37 +827,105 @@ fn analyzeRoot(self: *Module, root_scope: *Scope.ZIRModule) !void {
=> {
const src_module = try self.getSrcModule(root_scope);
// Look for changed decls.
// Look for changed decls. First we add all the decls that changed
// into the set.
var regen_decl_set = std.ArrayList(*Decl).init(self.allocator);
defer regen_decl_set.deinit();
try regen_decl_set.ensureCapacity(src_module.decls.len);
var exports_to_resolve = std.ArrayList(*zir.Inst).init(self.allocator);
defer exports_to_resolve.deinit();
for (src_module.decls) |src_decl| {
const name_hash = Decl.hashSimpleName(src_decl.name);
if (self.decl_table.get(name_hash)) |kv| {
const decl = kv.value;
const new_contents_hash = Decl.hashSimpleName(src_decl.contents);
if (!mem.eql(u8, &new_contents_hash, &decl.contents_hash)) {
// TODO recursive dependency management
//std.debug.warn("noticed that '{}' changed\n", .{src_decl.name});
self.decl_table.removeAssertDiscard(name_hash);
const saved_link = decl.link;
decl.destroy(self.allocator);
if (self.export_owners.getValue(decl)) |exports| {
@panic("TODO handle updating a decl that does an export");
}
const new_decl = self.resolveDecl(
&root_scope.base,
src_decl,
saved_link,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => continue,
};
if (self.decl_exports.remove(decl)) |entry| {
self.decl_exports.putAssumeCapacityNoClobber(new_decl, entry.value);
}
std.debug.warn("noticed that '{}' changed\n", .{src_decl.name});
regen_decl_set.appendAssumeCapacity(decl);
}
} else if (src_decl.cast(zir.Inst.Export)) |export_inst| {
_ = try self.resolveDecl(&root_scope.base, &export_inst.base, link.ElfFile.TextBlock.empty);
try exports_to_resolve.append(&export_inst.base);
}
}
// Next, recursively chase the dependency graph, to populate the set.
{
var i: usize = 0;
while (i < regen_decl_set.items.len) : (i += 1) {
const decl = regen_decl_set.items[i];
if (decl.isFlaggedForRegeneration()) {
// We already looked at this decl's dependency graph.
continue;
}
decl.flagForRegeneration();
// Remove itself from its dependencies, because we are about to destroy the
// decl pointer.
for (decl.dependencies.items) |dep| {
dep.removeDependant(decl);
}
// Populate the set with decls that need to get regenerated because they
// depend on this one.
// TODO If it is only a function body that is modified, it should break the chain
// and not cause its dependants to be regenerated.
for (decl.dependants.items) |dep| {
if (!dep.isFlaggedForRegeneration()) {
regen_decl_set.appendAssumeCapacity(dep);
}
}
}
}
// Remove them all from the decl_table.
for (regen_decl_set.items) |decl| {
const decl_name = mem.spanZ(decl.name);
const old_name_hash = Decl.hashSimpleName(decl_name);
self.decl_table.removeAssertDiscard(old_name_hash);
if (self.export_owners.remove(decl)) |kv| {
for (kv.value) |exp| {
self.bin_file.deleteExport(exp.link);
}
freeExportList(self.allocator, kv.value);
}
}
// Regenerate the decls in the set.
const zir_module = try self.getSrcModule(root_scope);
while (regen_decl_set.popOrNull()) |decl| {
const decl_name = mem.spanZ(decl.name);
std.debug.warn("regenerating {}\n", .{decl_name});
const saved_link = decl.link;
const decl_exports_entry = if (self.decl_exports.remove(decl)) |kv| kv.value else null;
const src_decl = zir_module.findDecl(decl_name) orelse {
@panic("TODO treat this as a deleted decl");
};
decl.destroy(self.allocator);
const new_decl = self.resolveDecl(
&root_scope.base,
src_decl,
saved_link,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => continue,
};
if (decl_exports_entry) |entry| {
const gop = try self.decl_exports.getOrPut(new_decl);
if (gop.found_existing) {
self.allocator.free(entry);
} else {
gop.kv.value = entry;
}
}
}
for (exports_to_resolve.items) |export_inst| {
_ = try self.resolveDecl(&root_scope.base, export_inst, link.ElfFile.TextBlock.empty);
}
},
}
}
@ -906,11 +1029,13 @@ fn resolveDecl(
}
}
/// Declares a dependency on the decl.
fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Decl {
const decl = try self.resolveDecl(scope, old_inst, link.ElfFile.TextBlock.empty);
switch (decl.analysis) {
.initial_in_progress => unreachable,
.repeat_in_progress => unreachable,
.repeat_in_progress_novalue => unreachable,
.initial_dependency_failure,
.repeat_dependency_failure,
.initial_sema_failure,
@ -919,8 +1044,12 @@ fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerE
.codegen_failure_retryable,
=> return error.AnalysisFail,
.complete => return decl,
.complete => {},
}
if (scope.decl()) |scope_decl| {
try self.declareDeclDependency(scope_decl, decl);
}
return decl;
}
fn resolveInst(self: *Module, scope: *Scope, old_inst: *zir.Inst) InnerError!*Inst {
@ -998,7 +1127,7 @@ fn analyzeExport(self: *Module, scope: *Scope, export_inst: *zir.Inst.Export) In
const new_export = try self.allocator.create(Export);
errdefer self.allocator.destroy(new_export);
const owner_decl = scope.decl();
const owner_decl = scope.decl().?;
new_export.* = .{
.options = .{ .name = symbol_name },
@ -1327,7 +1456,7 @@ fn analyzeInstFn(self: *Module, scope: *Scope, fn_inst: *zir.Inst.Fn) InnerError
new_func.* = .{
.fn_type = fn_type,
.analysis = .{ .queued = fn_inst },
.owner_decl = scope.decl(),
.owner_decl = scope.decl().?,
};
const fn_payload = try scope.arena().create(Value.Payload.Function);
fn_payload.* = .{ .func = new_func };

View File

@ -126,6 +126,8 @@ pub const ElfFile = struct {
local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
global_symbol_free_list: std.ArrayListUnmanaged(usize) = std.ArrayListUnmanaged(usize){},
/// Same order as in the file. The value is the absolute vaddr value.
/// If the vaddr of the executable program header changes, the entire
/// offset table needs to be rewritten.
@ -153,7 +155,7 @@ pub const ElfFile = struct {
/// overcapacity can be negative. A simple way to have negative overcapacity is to
/// allocate a fresh text block, which will have ideal capacity, and then grow it
/// by 1 byte. It will then have -1 overcapacity.
free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
last_text_block: ?*TextBlock = null,
/// `alloc_num / alloc_den` is the factor of padding when allocating.
@ -229,6 +231,8 @@ pub const ElfFile = struct {
self.shstrtab.deinit(self.allocator);
self.local_symbols.deinit(self.allocator);
self.global_symbols.deinit(self.allocator);
self.global_symbol_free_list.deinit(self.allocator);
self.text_block_free_list.deinit(self.allocator);
self.offset_table.deinit(self.allocator);
if (self.owns_file_handle) {
if (self.file) |f| f.close();
@ -775,12 +779,12 @@ pub const ElfFile = struct {
var already_have_free_list_node = false;
{
var i: usize = 0;
while (i < self.free_list.items.len) {
if (self.free_list.items[i] == text_block) {
_ = self.free_list.swapRemove(i);
while (i < self.text_block_free_list.items.len) {
if (self.text_block_free_list.items[i] == text_block) {
_ = self.text_block_free_list.swapRemove(i);
continue;
}
if (self.free_list.items[i] == text_block.prev) {
if (self.text_block_free_list.items[i] == text_block.prev) {
already_have_free_list_node = true;
}
i += 1;
@ -797,7 +801,7 @@ pub const ElfFile = struct {
if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
self.free_list.append(self.allocator, prev) catch {};
self.text_block_free_list.append(self.allocator, prev) catch {};
}
} else {
text_block.prev = null;
@ -840,8 +844,8 @@ pub const ElfFile = struct {
// The list is unordered. We'll just take the first thing that works.
const vaddr = blk: {
var i: usize = 0;
while (i < self.free_list.items.len) {
const big_block = self.free_list.items[i];
while (i < self.text_block_free_list.items.len) {
const big_block = self.text_block_free_list.items[i];
// We now have a pointer to a live text block that has too much capacity.
// Is it enough that we could fit this new text block?
const sym = self.local_symbols.items[big_block.local_sym_index];
@ -856,7 +860,7 @@ pub const ElfFile = struct {
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
if (!big_block.freeListEligible(self.*)) {
_ = self.free_list.swapRemove(i);
_ = self.text_block_free_list.swapRemove(i);
} else {
i += 1;
}
@ -932,7 +936,7 @@ pub const ElfFile = struct {
text_block.next = null;
}
if (free_list_removal) |i| {
_ = self.free_list.swapRemove(i);
_ = self.text_block_free_list.swapRemove(i);
}
return vaddr;
}
@ -958,11 +962,18 @@ pub const ElfFile = struct {
self.offset_table_count_dirty = true;
//std.debug.warn("allocating symbol index {}\n", .{local_sym_index});
std.debug.warn("allocating symbol index {} for {}\n", .{local_sym_index, decl.name});
decl.link.local_sym_index = @intCast(u32, local_sym_index);
decl.link.offset_table_index = @intCast(u32, offset_table_index);
}
pub fn freeDecl(self: *ElfFile, decl: *Module.Decl) void {
self.freeTextBlock(&decl.link);
if (decl.link.local_sym_index != 0) {
@panic("TODO free the symbol entry and offset table entry");
}
}
pub fn updateDecl(self: *ElfFile, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.allocator);
defer code_buffer.deinit();
@ -993,11 +1004,11 @@ pub const ElfFile = struct {
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
if (need_realloc) {
const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
//std.debug.warn("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
std.debug.warn("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
if (vaddr != local_sym.st_value) {
local_sym.st_value = vaddr;
//std.debug.warn(" (writing new offset table entry)\n", .{});
std.debug.warn(" (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.offset_table_index);
}
@ -1015,7 +1026,7 @@ pub const ElfFile = struct {
const decl_name = mem.spanZ(decl.name);
const name_str_index = try self.makeString(decl_name);
const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
//std.debug.warn("allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
std.debug.warn("allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
errdefer self.freeTextBlock(&decl.link);
local_sym.* = .{
@ -1048,7 +1059,10 @@ pub const ElfFile = struct {
decl: *const Module.Decl,
exports: []const *Module.Export,
) !void {
// In addition to ensuring capacity for global_symbols, we also ensure capacity for freeing all of
// them, so that deleting exports is guaranteed to succeed.
try self.global_symbols.ensureCapacity(self.allocator, self.global_symbols.items.len + exports.len);
try self.global_symbol_free_list.ensureCapacity(self.allocator, self.global_symbols.items.len);
const typed_value = decl.typed_value.most_recent.typed_value;
if (decl.link.local_sym_index == 0) return;
const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
@ -1095,22 +1109,30 @@ pub const ElfFile = struct {
};
} else {
const name = try self.makeString(exp.options.name);
const i = self.global_symbols.items.len;
self.global_symbols.appendAssumeCapacity(.{
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
self.global_symbols.items[i] = .{
.st_name = name,
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = self.text_section_index.?,
.st_value = decl_sym.st_value,
.st_size = decl_sym.st_size,
});
errdefer self.global_symbols.shrink(self.allocator, self.global_symbols.items.len - 1);
};
exp.link.sym_index = @intCast(u32, i);
}
}
}
pub fn deleteExport(self: *ElfFile, exp: Export) void {
const sym_index = exp.sym_index orelse return;
self.global_symbol_free_list.appendAssumeCapacity(sym_index);
self.global_symbols.items[sym_index].st_info = 0;
}
fn writeProgHeader(self: *ElfFile, index: usize) !void {
const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
const offset = self.program_headers.items[index].p_offset;

View File

@ -442,6 +442,16 @@ pub const Module = struct {
const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Module.Body });
/// TODO Look into making a table to speed this up.
pub fn findDecl(self: Module, name: []const u8) ?*Inst {
for (self.decls) |decl| {
if (mem.eql(u8, decl.name, name)) {
return decl;
}
}
return null;
}
/// The allocator is used for temporary storage, but this function always returns
/// with no resources allocated.
pub fn writeToStream(self: Module, allocator: *Allocator, stream: var) !void {

View File

@ -200,73 +200,73 @@ pub fn addCases(ctx: *TestContext) void {
\\@9 = str("_start")
\\@10 = ref(@9)
\\@11 = export(@10, @start)
// ,
// \\@noreturn = primitive(noreturn)
// \\@void = primitive(void)
// \\@usize = primitive(usize)
// \\@0 = int(0)
// \\@1 = int(1)
// \\@2 = int(2)
// \\@3 = int(3)
// \\
// \\@syscall_array = str("syscall")
// \\@sysoutreg_array = str("={rax}")
// \\@rax_array = str("{rax}")
// \\@rdi_array = str("{rdi}")
// \\@rcx_array = str("rcx")
// \\@r11_array = str("r11")
// \\@rdx_array = str("{rdx}")
// \\@rsi_array = str("{rsi}")
// \\@memory_array = str("memory")
// \\@len_array = str("len")
// \\
// \\@msg = str("Hello, world!\n")
// \\@msg2 = str("Editing the same msg2 decl but this time with a much longer message which will\ncause the data to need to be relocated in virtual address space.\n")
// \\
// \\@start_fnty = fntype([], @noreturn, cc=Naked)
// \\@start = fn(@start_fnty, {
// \\ %SYS_exit_group = int(231)
// \\ %exit_code = as(@usize, @0)
// \\
// \\ %syscall = ref(@syscall_array)
// \\ %sysoutreg = ref(@sysoutreg_array)
// \\ %rax = ref(@rax_array)
// \\ %rdi = ref(@rdi_array)
// \\ %rcx = ref(@rcx_array)
// \\ %rdx = ref(@rdx_array)
// \\ %rsi = ref(@rsi_array)
// \\ %r11 = ref(@r11_array)
// \\ %memory = ref(@memory_array)
// \\
// \\ %SYS_write = as(@usize, @1)
// \\ %STDOUT_FILENO = as(@usize, @1)
// \\
// \\ %msg_ptr = ref(@msg2)
// \\ %msg_addr = ptrtoint(%msg_ptr)
// \\
// \\ %len_name = ref(@len_array)
// \\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name)
// \\ %msg_len = deref(%msg_len_ptr)
// \\ %rc_write = asm(%syscall, @usize,
// \\ volatile=1,
// \\ output=%sysoutreg,
// \\ inputs=[%rax, %rdi, %rsi, %rdx],
// \\ clobbers=[%rcx, %r11, %memory],
// \\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len])
// \\
// \\ %rc_exit = asm(%syscall, @usize,
// \\ volatile=1,
// \\ output=%sysoutreg,
// \\ inputs=[%rax, %rdi],
// \\ clobbers=[%rcx, %r11, %memory],
// \\ args=[%SYS_exit_group, %exit_code])
// \\
// \\ %99 = unreachable()
// \\});
// \\
// \\@9 = str("_start")
// \\@10 = ref(@9)
// \\@11 = export(@10, @start)
,
\\@noreturn = primitive(noreturn)
\\@void = primitive(void)
\\@usize = primitive(usize)
\\@0 = int(0)
\\@1 = int(1)
\\@2 = int(2)
\\@3 = int(3)
\\
\\@syscall_array = str("syscall")
\\@sysoutreg_array = str("={rax}")
\\@rax_array = str("{rax}")
\\@rdi_array = str("{rdi}")
\\@rcx_array = str("rcx")
\\@r11_array = str("r11")
\\@rdx_array = str("{rdx}")
\\@rsi_array = str("{rsi}")
\\@memory_array = str("memory")
\\@len_array = str("len")
\\
\\@msg = str("Hello, world!\n")
\\@msg2 = str("Editing the same msg2 decl but this time with a much longer message which will\ncause the data to need to be relocated in virtual address space.\n")
\\
\\@start_fnty = fntype([], @noreturn, cc=Naked)
\\@start = fn(@start_fnty, {
\\ %SYS_exit_group = int(231)
\\ %exit_code = as(@usize, @0)
\\
\\ %syscall = ref(@syscall_array)
\\ %sysoutreg = ref(@sysoutreg_array)
\\ %rax = ref(@rax_array)
\\ %rdi = ref(@rdi_array)
\\ %rcx = ref(@rcx_array)
\\ %rdx = ref(@rdx_array)
\\ %rsi = ref(@rsi_array)
\\ %r11 = ref(@r11_array)
\\ %memory = ref(@memory_array)
\\
\\ %SYS_write = as(@usize, @1)
\\ %STDOUT_FILENO = as(@usize, @1)
\\
\\ %msg_ptr = ref(@msg2)
\\ %msg_addr = ptrtoint(%msg_ptr)
\\
\\ %len_name = ref(@len_array)
\\ %msg_len_ptr = fieldptr(%msg_ptr, %len_name)
\\ %msg_len = deref(%msg_len_ptr)
\\ %rc_write = asm(%syscall, @usize,
\\ volatile=1,
\\ output=%sysoutreg,
\\ inputs=[%rax, %rdi, %rsi, %rdx],
\\ clobbers=[%rcx, %r11, %memory],
\\ args=[%SYS_write, %STDOUT_FILENO, %msg_addr, %msg_len])
\\
\\ %rc_exit = asm(%syscall, @usize,
\\ volatile=1,
\\ output=%sysoutreg,
\\ inputs=[%rax, %rdi],
\\ clobbers=[%rcx, %r11, %memory],
\\ args=[%SYS_exit_group, %exit_code])
\\
\\ %99 = unreachable()
\\});
\\
\\@9 = str("_start")
\\@10 = ref(@9)
\\@11 = export(@10, @start)
},
&[_][]const u8{
\\Hello, world!
@ -274,10 +274,10 @@ pub fn addCases(ctx: *TestContext) void {
,
\\HELL WORLD
\\
// ,
// \\Editing the same msg2 decl but this time with a much longer message which will
// \\cause the data to need to be relocated in virtual address space.
// \\
,
\\Editing the same msg2 decl but this time with a much longer message which will
\\cause the data to need to be relocated in virtual address space.
\\
},
);