Merge pull request #7231 from kubkon/stage2-arm-macos
stage2: Hello, Silicon!\n
This commit is contained in:
commit
3bc1c719bd
@ -1384,10 +1384,10 @@ pub const CSTYPE_INDEX_REQUIREMENTS: u32 = 0x00000002;
|
||||
/// Compat with amfi
|
||||
pub const CSTYPE_INDEX_ENTITLEMENTS: u32 = 0x00000005;
|
||||
|
||||
pub const CS_HASHTYPE_SHA1: u32 = 1;
|
||||
pub const CS_HASHTYPE_SHA256: u32 = 2;
|
||||
pub const CS_HASHTYPE_SHA256_TRUNCATED: u32 = 3;
|
||||
pub const CS_HASHTYPE_SHA384: u32 = 4;
|
||||
pub const CS_HASHTYPE_SHA1: u8 = 1;
|
||||
pub const CS_HASHTYPE_SHA256: u8 = 2;
|
||||
pub const CS_HASHTYPE_SHA256_TRUNCATED: u8 = 3;
|
||||
pub const CS_HASHTYPE_SHA384: u8 = 4;
|
||||
|
||||
pub const CS_SHA1_LEN: u32 = 20;
|
||||
pub const CS_SHA256_LEN: u32 = 32;
|
||||
@ -1402,6 +1402,10 @@ pub const CS_SIGNER_TYPE_UNKNOWN: u32 = 0;
|
||||
pub const CS_SIGNER_TYPE_LEGACYVPN: u32 = 5;
|
||||
pub const CS_SIGNER_TYPE_MAC_APP_STORE: u32 = 6;
|
||||
|
||||
pub const CS_ADHOC: u32 = 0x2;
|
||||
|
||||
pub const CS_EXECSEG_MAIN_BINARY: u32 = 0x1;
|
||||
|
||||
/// This CodeDirectory is tailored specfically at version 0x20400.
|
||||
pub const CodeDirectory = extern struct {
|
||||
/// Magic number (CSMAGIC_CODEDIRECTORY)
|
||||
@ -1446,6 +1450,18 @@ pub const CodeDirectory = extern struct {
|
||||
/// Unused (must be zero)
|
||||
spare2: u32,
|
||||
|
||||
///
|
||||
scatterOffset: u32,
|
||||
|
||||
///
|
||||
teamOffset: u32,
|
||||
|
||||
///
|
||||
spare3: u32,
|
||||
|
||||
///
|
||||
codeLimit64: u64,
|
||||
|
||||
/// Offset of executable segment
|
||||
execSegBase: u64,
|
||||
|
||||
@ -1453,9 +1469,7 @@ pub const CodeDirectory = extern struct {
|
||||
execSegLimit: u64,
|
||||
|
||||
/// Executable segment flags
|
||||
execSegFlags,
|
||||
|
||||
// end_withExecSeg: [*]u8,
|
||||
execSegFlags: u64,
|
||||
};
|
||||
|
||||
/// Structure of an embedded-signature SuperBlob
|
||||
@ -1478,8 +1492,6 @@ pub const SuperBlob = extern struct {
|
||||
|
||||
/// Number of index BlobIndex entries following this struct
|
||||
count: u32,
|
||||
|
||||
// index: []const BlobIndex,
|
||||
};
|
||||
|
||||
pub const GenericBlob = extern struct {
|
||||
@ -1488,6 +1500,4 @@ pub const GenericBlob = extern struct {
|
||||
|
||||
/// Total length of blob
|
||||
length: u32,
|
||||
|
||||
// data: []const u8,
|
||||
};
|
||||
|
143
src/codegen.zig
143
src/codegen.zig
@ -1683,15 +1683,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const got_addr = got.addr + func.owner_decl.link.macho.offset_table_index * @sizeOf(u64);
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
// Here, we store the got address in %rax, and then call %rax
|
||||
// movabsq [addr], %rax
|
||||
try self.genSetReg(inst.base.src, .rax, .{ .memory = got_addr });
|
||||
// callq *%rax
|
||||
try self.code.ensureCapacity(self.code.items.len + 2);
|
||||
self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 });
|
||||
},
|
||||
.aarch64 => {
|
||||
try self.genSetReg(inst.base.src, .x30, .{ .memory = got_addr });
|
||||
// blr x30
|
||||
writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32());
|
||||
},
|
||||
else => unreachable, // unsupported architecture on MachO
|
||||
@ -2586,10 +2584,82 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
},
|
||||
.register => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}),
|
||||
.memory => |addr| {
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try self.genSetReg(src, reg, .{ .immediate = addr });
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .rn = reg }).toU32());
|
||||
if (self.bin_file.options.pie) {
|
||||
// For MachO, the binary, with the exception of object files, has to be a PIE.
|
||||
// Therefore we cannot load an absolute address.
|
||||
// Instead, we need to make use of PC-relative addressing.
|
||||
// TODO This needs to be optimised in the stack usage (perhaps use a shadow stack
|
||||
// like described here:
|
||||
// https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/using-the-stack-in-aarch64-implementing-push-and-pop)
|
||||
// TODO As far as branching is concerned, instead of saving the return address
|
||||
// in a register, I'm thinking here of immitating x86_64, and having the address
|
||||
// passed on the stack.
|
||||
if (reg.id() == 0) { // x0 is special-cased
|
||||
// str x28, [sp, #-16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.str(.x28, Register.sp, .{
|
||||
.offset = Instruction.Offset.imm_pre_index(-16),
|
||||
}).toU32());
|
||||
// adr x28, #8
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.adr(.x28, 8).toU32());
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = addr,
|
||||
.start = self.code.items.len,
|
||||
.len = 4,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// b [label]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.b(0).toU32());
|
||||
// mov r, x0
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(reg, .x0, Instruction.RegisterShift.none()).toU32());
|
||||
// ldr x28, [sp], #16
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x28, .{
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.Offset.imm_post_index(16),
|
||||
}).toU32());
|
||||
} else {
|
||||
// str x28, [sp, #-16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.str(.x28, Register.sp, .{
|
||||
.offset = Instruction.Offset.imm_pre_index(-16),
|
||||
}).toU32());
|
||||
// str x0, [sp, #-16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.str(.x0, Register.sp, .{
|
||||
.offset = Instruction.Offset.imm_pre_index(-16),
|
||||
}).toU32());
|
||||
// adr x28, #8
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.adr(.x28, 8).toU32());
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = addr,
|
||||
.start = self.code.items.len,
|
||||
.len = 4,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// b [label]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.b(0).toU32());
|
||||
// mov r, x0
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(reg, .x0, Instruction.RegisterShift.none()).toU32());
|
||||
// ldr x0, [sp], #16
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x0, .{
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.Offset.imm_post_index(16),
|
||||
}).toU32());
|
||||
// ldr x28, [sp], #16
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x28, .{
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.Offset.imm_post_index(16),
|
||||
}).toU32());
|
||||
}
|
||||
} else {
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try self.genSetReg(src, reg, .{ .immediate = addr });
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .rn = reg }).toU32());
|
||||
}
|
||||
},
|
||||
else => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}),
|
||||
},
|
||||
@ -2766,7 +2836,64 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, R });
|
||||
},
|
||||
.memory => |x| {
|
||||
if (x <= math.maxInt(u32)) {
|
||||
if (self.bin_file.options.pie) {
|
||||
// For MachO, the binary, with the exception of object files, has to be a PIE.
|
||||
// Therefore, we cannot load an absolute address.
|
||||
assert(x > math.maxInt(u32)); // 32bit direct addressing is not supported by MachO.
|
||||
// The plan here is to use unconditional relative jump to GOT entry, where we store
|
||||
// pre-calculated and stored effective address to load into the target register.
|
||||
// We leave the actual displacement information empty (0-padded) and fixing it up
|
||||
// later in the linker.
|
||||
if (reg.id() == 0) { // %rax is special-cased
|
||||
try self.code.ensureCapacity(self.code.items.len + 5);
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = x,
|
||||
.start = self.code.items.len,
|
||||
.len = 5,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// call [label]
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0xE8,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
});
|
||||
} else {
|
||||
try self.code.ensureCapacity(self.code.items.len + 10);
|
||||
// push %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{0x50});
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = x,
|
||||
.start = self.code.items.len,
|
||||
.len = 5,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// call [label]
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0xE8,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
});
|
||||
// mov %r, %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0x48,
|
||||
0x89,
|
||||
0xC0 | @as(u8, reg.id()),
|
||||
});
|
||||
// pop %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{0x58});
|
||||
}
|
||||
} else if (x <= math.maxInt(u32)) {
|
||||
// Moving from memory to a register is a variant of `8B /r`.
|
||||
// Since we're using 64-bit moves, we require a REX.
|
||||
// This variant also requires a SIB, as it would otherwise be RIP-relative.
|
||||
|
@ -19,6 +19,8 @@ pub const Register = enum(u6) {
|
||||
w16, w17, w18, w19, w20, w21, w22, w23,
|
||||
w24, w25, w26, w27, w28, w29, w30, wzr,
|
||||
|
||||
pub const sp = .xzr;
|
||||
|
||||
pub fn id(self: Register) u5 {
|
||||
return @truncate(u5, @enumToInt(self));
|
||||
}
|
||||
@ -195,6 +197,17 @@ test "FloatingPointRegister.toX" {
|
||||
|
||||
/// Represents an instruction in the AArch64 instruction set
|
||||
pub const Instruction = union(enum) {
|
||||
OrShiftedRegister: packed struct {
|
||||
rd: u5,
|
||||
rn: u5,
|
||||
imm6: u6,
|
||||
rm: u5,
|
||||
n: u1,
|
||||
shift: u2,
|
||||
fixed: u5 = 0b01010,
|
||||
opc: u2 = 0b01,
|
||||
sf: u1,
|
||||
},
|
||||
MoveWideImmediate: packed struct {
|
||||
rd: u5,
|
||||
imm16: u16,
|
||||
@ -251,6 +264,7 @@ pub const Instruction = union(enum) {
|
||||
|
||||
pub fn toU32(self: Instruction) u32 {
|
||||
return switch (self) {
|
||||
.OrShiftedRegister => |v| @bitCast(u32, v),
|
||||
.MoveWideImmediate => |v| @bitCast(u32, v),
|
||||
.PCRelativeAddress => |v| @bitCast(u32, v),
|
||||
.LoadStoreRegister => |v| @bitCast(u32, v),
|
||||
@ -379,8 +393,65 @@ pub const Instruction = union(enum) {
|
||||
}
|
||||
};
|
||||
|
||||
pub const RegisterShift = struct {
|
||||
rn: u5,
|
||||
imm6: u6,
|
||||
shift: enum(u2) {
|
||||
Lsl = 0,
|
||||
Lsr = 1,
|
||||
Asr = 2,
|
||||
Ror = 3,
|
||||
},
|
||||
|
||||
pub fn none() RegisterShift {
|
||||
return .{
|
||||
.rn = 0b11111,
|
||||
.imm6 = 0,
|
||||
.shift = .Lsl,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Helper functions for assembly syntax functions
|
||||
|
||||
fn orShiftedRegister(
|
||||
rd: Register,
|
||||
rm: Register,
|
||||
shift: RegisterShift,
|
||||
invert: bool,
|
||||
) Instruction {
|
||||
const n: u1 = if (invert) 1 else 0;
|
||||
switch (rd.size()) {
|
||||
32 => {
|
||||
return Instruction{
|
||||
.OrShiftedRegister = .{
|
||||
.rd = rd.id(),
|
||||
.rn = shift.rn,
|
||||
.imm6 = shift.imm6,
|
||||
.rm = rm.id(),
|
||||
.n = n,
|
||||
.shift = @enumToInt(shift.shift),
|
||||
.sf = 0,
|
||||
},
|
||||
};
|
||||
},
|
||||
64 => {
|
||||
return Instruction{
|
||||
.OrShiftedRegister = .{
|
||||
.rd = rd.id(),
|
||||
.rn = shift.rn,
|
||||
.imm6 = shift.imm6,
|
||||
.rm = rm.id(),
|
||||
.n = n,
|
||||
.shift = @enumToInt(shift.shift),
|
||||
.sf = 1,
|
||||
},
|
||||
};
|
||||
},
|
||||
else => unreachable, // unexpected register size
|
||||
}
|
||||
}
|
||||
|
||||
fn moveWideImmediate(
|
||||
opc: u2,
|
||||
rd: Register,
|
||||
@ -543,6 +614,16 @@ pub const Instruction = union(enum) {
|
||||
};
|
||||
}
|
||||
|
||||
// Bitwise (inclusive) OR of a register value
|
||||
|
||||
pub fn orr(rd: Register, rm: Register, shift: RegisterShift) Instruction {
|
||||
return orShiftedRegister(rd, rm, shift, false);
|
||||
}
|
||||
|
||||
pub fn orn(rd: Register, rm: Register, shift: RegisterShift) Instruction {
|
||||
return orShiftedRegister(rd, rm, shift, true);
|
||||
}
|
||||
|
||||
// Move wide (immediate)
|
||||
|
||||
pub fn movn(rd: Register, imm16: u16, shift: u6) Instruction {
|
||||
@ -653,6 +734,14 @@ test "serialize instructions" {
|
||||
};
|
||||
|
||||
const testcases = [_]Testcase{
|
||||
.{ // orr x0 x1
|
||||
.inst = Instruction.orr(.x0, .x1, Instruction.RegisterShift.none()),
|
||||
.expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
|
||||
},
|
||||
.{ // orn x0 x1
|
||||
.inst = Instruction.orn(.x0, .x1, Instruction.RegisterShift.none()),
|
||||
.expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
|
||||
},
|
||||
.{ // movz x1 #4
|
||||
.inst = Instruction.movz(.x1, 4, 0),
|
||||
.expected = 0b1_10_100101_00_0000000000000100_00001,
|
||||
|
24
src/link.zig
24
src/link.zig
@ -238,7 +238,29 @@ pub const File = struct {
|
||||
|
||||
pub fn makeExecutable(base: *File) !void {
|
||||
switch (base.tag) {
|
||||
.coff, .elf, .macho => if (base.file) |f| {
|
||||
.macho => if (base.file) |f| {
|
||||
if (base.intermediary_basename != null) {
|
||||
// The file we have open is not the final file that we want to
|
||||
// make executable, so we don't have to close it.
|
||||
return;
|
||||
}
|
||||
if (comptime std.Target.current.isDarwin() and std.Target.current.cpu.arch == .aarch64) {
|
||||
if (base.options.target.cpu.arch != .aarch64) return; // If we're not targeting aarch64, nothing to do.
|
||||
// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
|
||||
// Any change to the binary will effectively invalidate the kernel's cache
|
||||
// resulting in a SIGKILL on each subsequent run. Since when doing incremental
|
||||
// linking we're modifying a binary in-place, this will end up with the kernel
|
||||
// killing it on every subsequent run. To circumvent it, we will copy the file
|
||||
// into a new inode, remove the original file, and rename the copy to match
|
||||
// the original file. This is super messy, but there doesn't seem any other
|
||||
// way to please the XNU.
|
||||
const emit = base.options.emit orelse return;
|
||||
try emit.directory.handle.copyFile(emit.sub_path, emit.directory.handle, emit.sub_path, .{});
|
||||
}
|
||||
f.close();
|
||||
base.file = null;
|
||||
},
|
||||
.coff, .elf => if (base.file) |f| {
|
||||
if (base.intermediary_basename != null) {
|
||||
// The file we have open is not the final file that we want to
|
||||
// make executable, so we don't have to close it.
|
||||
|
@ -7,6 +7,7 @@ const fs = std.fs;
|
||||
const log = std.log.scoped(.link);
|
||||
const macho = std.macho;
|
||||
const codegen = @import("../codegen.zig");
|
||||
const aarch64 = @import("../codegen/aarch64.zig");
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -21,6 +22,7 @@ const Cache = @import("../Cache.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
|
||||
const Trie = @import("MachO/Trie.zig");
|
||||
const CodeSignature = @import("MachO/CodeSignature.zig");
|
||||
|
||||
pub const base_tag: File.Tag = File.Tag.macho;
|
||||
|
||||
@ -33,6 +35,8 @@ const LoadCommand = union(enum) {
|
||||
Dylinker: macho.dylinker_command,
|
||||
Dylib: macho.dylib_command,
|
||||
EntryPoint: macho.entry_point_command,
|
||||
MinVersion: macho.version_min_command,
|
||||
SourceVersion: macho.source_version_command,
|
||||
|
||||
pub fn cmdsize(self: LoadCommand) u32 {
|
||||
return switch (self) {
|
||||
@ -44,6 +48,8 @@ const LoadCommand = union(enum) {
|
||||
.Dylinker => |x| x.cmdsize,
|
||||
.Dylib => |x| x.cmdsize,
|
||||
.EntryPoint => |x| x.cmdsize,
|
||||
.MinVersion => |x| x.cmdsize,
|
||||
.SourceVersion => |x| x.cmdsize,
|
||||
};
|
||||
}
|
||||
|
||||
@ -57,6 +63,8 @@ const LoadCommand = union(enum) {
|
||||
.Dylinker => |cmd| writeGeneric(cmd, file, offset),
|
||||
.Dylib => |cmd| writeGeneric(cmd, file, offset),
|
||||
.EntryPoint => |cmd| writeGeneric(cmd, file, offset),
|
||||
.MinVersion => |cmd| writeGeneric(cmd, file, offset),
|
||||
.SourceVersion => |cmd| writeGeneric(cmd, file, offset),
|
||||
};
|
||||
}
|
||||
|
||||
@ -68,6 +76,10 @@ const LoadCommand = union(enum) {
|
||||
|
||||
base: File,
|
||||
|
||||
/// Page size is dependent on the target cpu architecture.
|
||||
/// For x86_64 that's 4KB, whereas for aarch64, that's 16KB.
|
||||
page_size: u16,
|
||||
|
||||
/// Table of all load commands
|
||||
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
|
||||
/// __PAGEZERO segment
|
||||
@ -96,6 +108,12 @@ function_starts_cmd_index: ?u16 = null,
|
||||
/// Specifies offset wrt __TEXT segment start address to the main entry point
|
||||
/// of the binary.
|
||||
main_cmd_index: ?u16 = null,
|
||||
/// Minimum OS version
|
||||
version_min_cmd_index: ?u16 = null,
|
||||
/// Source version
|
||||
source_version_cmd_index: ?u16 = null,
|
||||
/// Code signature
|
||||
code_signature_cmd_index: ?u16 = null,
|
||||
|
||||
/// Table of all sections
|
||||
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
|
||||
@ -108,6 +126,9 @@ got_section_index: ?u16 = null,
|
||||
|
||||
entry_addr: ?u64 = null,
|
||||
|
||||
// TODO move this into each Segment aggregator
|
||||
linkedit_segment_next_offset: ?u32 = null,
|
||||
|
||||
/// Table of all local symbols
|
||||
/// Internally references string table for names (which are optional).
|
||||
local_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{},
|
||||
@ -154,6 +175,22 @@ libsystem_cmd_dirty: bool = false,
|
||||
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
|
||||
/// Pointer to the last allocated text block
|
||||
last_text_block: ?*TextBlock = null,
|
||||
/// A list of all PIE fixups required for this run of the linker.
|
||||
/// Warning, this is currently NOT thread-safe. See the TODO below.
|
||||
/// TODO Move this list inside `updateDecl` where it should be allocated
|
||||
/// prior to calling `generateSymbol`, and then immediately deallocated
|
||||
/// rather than sitting in the global scope.
|
||||
pie_fixups: std.ArrayListUnmanaged(PieFixup) = .{},
|
||||
|
||||
pub const PieFixup = struct {
|
||||
/// Target address we wanted to address in absolute terms.
|
||||
address: u64,
|
||||
/// Where in the byte stream we should perform the fixup.
|
||||
start: usize,
|
||||
/// The length of the byte stream. For x86_64, this will be
|
||||
/// variable. For aarch64, it will be fixed at 4 bytes.
|
||||
len: usize,
|
||||
};
|
||||
|
||||
/// `alloc_num / alloc_den` is the factor of padding when allocating.
|
||||
const alloc_num = 4;
|
||||
@ -292,6 +329,7 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*MachO {
|
||||
.allocator = gpa,
|
||||
.file = null,
|
||||
},
|
||||
.page_size = if (options.target.cpu.arch == .aarch64) 0x4000 else 0x1000,
|
||||
};
|
||||
return self;
|
||||
}
|
||||
@ -312,35 +350,14 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
// Unfortunately these have to be buffered and done at the end because MachO does not allow
|
||||
// mixing local, global and undefined symbols within a symbol table.
|
||||
try self.writeAllGlobalSymbols();
|
||||
try self.writeAllUndefSymbols();
|
||||
|
||||
try self.writeStringTable();
|
||||
|
||||
switch (self.base.options.output_mode) {
|
||||
.Exe => {
|
||||
// Write export trie.
|
||||
try self.writeExportTrie();
|
||||
if (self.entry_addr) |addr| {
|
||||
// Update LC_MAIN with entry offset.
|
||||
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const main_cmd = &self.load_commands.items[self.main_cmd_index.?].EntryPoint;
|
||||
main_cmd.entryoff = addr - text_segment.vmaddr;
|
||||
}
|
||||
{
|
||||
// Update dynamic symbol table.
|
||||
const nlocals = @intCast(u32, self.local_symbols.items.len);
|
||||
const nglobals = @intCast(u32, self.global_symbols.items.len);
|
||||
const nundefs = @intCast(u32, self.undef_symbols.items.len);
|
||||
const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab;
|
||||
dysymtab.nlocalsym = nlocals;
|
||||
dysymtab.iextdefsym = nlocals;
|
||||
dysymtab.nextdefsym = nglobals;
|
||||
dysymtab.iundefsym = nlocals + nglobals;
|
||||
dysymtab.nundefsym = nundefs;
|
||||
}
|
||||
if (self.dylinker_cmd_dirty) {
|
||||
// Write path to dyld loader.
|
||||
var off: usize = @sizeOf(macho.mach_header_64);
|
||||
@ -367,20 +384,22 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
|
||||
try self.base.file.?.pwriteAll(mem.spanZ(LIB_SYSTEM_PATH), off);
|
||||
self.libsystem_cmd_dirty = false;
|
||||
}
|
||||
|
||||
try self.writeExportTrie();
|
||||
try self.writeSymbolTable();
|
||||
try self.writeStringTable();
|
||||
|
||||
// Preallocate space for the code signature.
|
||||
// We need to do this at this stage so that we have the load commands with proper values
|
||||
// written out to the file.
|
||||
// The most important here is to have the correct vm and filesize of the __LINKEDIT segment
|
||||
// where the code signature goes into.
|
||||
try self.writeCodeSignaturePadding();
|
||||
},
|
||||
.Obj => {},
|
||||
.Lib => return error.TODOImplementWritingLibFiles,
|
||||
}
|
||||
|
||||
{
|
||||
// Update symbol table.
|
||||
const nlocals = @intCast(u32, self.local_symbols.items.len);
|
||||
const nglobals = @intCast(u32, self.global_symbols.items.len);
|
||||
const nundefs = @intCast(u32, self.undef_symbols.items.len);
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
symtab.nsyms = nlocals + nglobals + nundefs;
|
||||
}
|
||||
|
||||
if (self.cmd_table_dirty) {
|
||||
try self.writeCmdHeaders();
|
||||
try self.writeMachOHeader();
|
||||
@ -398,6 +417,11 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
|
||||
assert(!self.cmd_table_dirty);
|
||||
assert(!self.dylinker_cmd_dirty);
|
||||
assert(!self.libsystem_cmd_dirty);
|
||||
|
||||
switch (self.base.options.output_mode) {
|
||||
.Exe, .Lib => try self.writeCodeSignature(), // code signing always comes last
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
@ -823,6 +847,7 @@ fn darwinArchString(arch: std.Target.Cpu.Arch) []const u8 {
|
||||
}
|
||||
|
||||
pub fn deinit(self: *MachO) void {
|
||||
self.pie_fixups.deinit(self.base.allocator);
|
||||
self.text_block_free_list.deinit(self.base.allocator);
|
||||
self.offset_table.deinit(self.base.allocator);
|
||||
self.offset_table_free_list.deinit(self.base.allocator);
|
||||
@ -955,7 +980,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, symbol.n_value, vaddr });
|
||||
if (vaddr != symbol.n_value) {
|
||||
symbol.n_value = vaddr;
|
||||
|
||||
log.debug(" (writing new offset table entry)\n", .{});
|
||||
self.offset_table.items[decl.link.macho.offset_table_index] = vaddr;
|
||||
try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
|
||||
@ -968,8 +992,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
symbol.n_type = macho.N_SECT;
|
||||
symbol.n_sect = @intCast(u8, self.text_section_index.?) + 1;
|
||||
symbol.n_desc = 0;
|
||||
// TODO this write could be avoided if no fields of the symbol were changed.
|
||||
try self.writeSymbol(decl.link.macho.local_sym_index);
|
||||
} else {
|
||||
const decl_name = mem.spanZ(decl.name);
|
||||
const name_str_index = try self.makeString(decl_name);
|
||||
@ -985,15 +1007,32 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
|
||||
.n_value = addr,
|
||||
};
|
||||
self.offset_table.items[decl.link.macho.offset_table_index] = addr;
|
||||
|
||||
try self.writeSymbol(decl.link.macho.local_sym_index);
|
||||
try self.writeOffsetTableEntry(decl.link.macho.offset_table_index);
|
||||
}
|
||||
|
||||
// Perform PIE fixups (if any)
|
||||
const got_section = self.sections.items[self.got_section_index.?];
|
||||
while (self.pie_fixups.popOrNull()) |fixup| {
|
||||
const target_addr = fixup.address;
|
||||
const this_addr = symbol.n_value + fixup.start;
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
const displacement = @intCast(u32, target_addr - this_addr - fixup.len);
|
||||
var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)];
|
||||
mem.writeIntSliceLittle(u32, placeholder, displacement);
|
||||
},
|
||||
.aarch64 => {
|
||||
const displacement = @intCast(u27, target_addr - this_addr);
|
||||
var placeholder = code_buffer.items[fixup.start..][0..fixup.len];
|
||||
mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.b(@intCast(i28, displacement)).toU32());
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
}
|
||||
|
||||
const text_section = self.sections.items[self.text_section_index.?];
|
||||
const section_offset = symbol.n_value - text_section.addr;
|
||||
const file_offset = text_section.offset + section_offset;
|
||||
|
||||
try self.base.file.?.pwriteAll(code, file_offset);
|
||||
|
||||
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
|
||||
@ -1127,7 +1166,8 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
}
|
||||
if (self.text_segment_cmd_index == null) {
|
||||
self.text_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
const prot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE;
|
||||
const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE;
|
||||
const initprot = macho.VM_PROT_READ | macho.VM_PROT_EXECUTE;
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.Segment = .{
|
||||
.cmd = macho.LC_SEGMENT_64,
|
||||
@ -1137,8 +1177,8 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
.vmsize = 0,
|
||||
.fileoff = 0,
|
||||
.filesize = 0,
|
||||
.maxprot = prot,
|
||||
.initprot = prot,
|
||||
.maxprot = maxprot,
|
||||
.initprot = initprot,
|
||||
.nsects = 0,
|
||||
.flags = 0,
|
||||
},
|
||||
@ -1148,11 +1188,10 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
if (self.text_section_index == null) {
|
||||
self.text_section_index = @intCast(u16, self.sections.items.len);
|
||||
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
text_segment.cmdsize += @sizeOf(macho.section_64);
|
||||
text_segment.nsects += 1;
|
||||
|
||||
const file_size = mem.alignForwardGeneric(u64, self.base.options.program_code_size_hint, 0x1000);
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, 0x1000)); // TODO maybe findFreeSpace should return u32 directly?
|
||||
const program_code_size_hint = self.base.options.program_code_size_hint;
|
||||
const file_size = mem.alignForwardGeneric(u64, program_code_size_hint, self.page_size);
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, self.page_size)); // TODO maybe findFreeSpace should return u32 directly?
|
||||
const flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS;
|
||||
|
||||
log.debug("found __text section free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
@ -1163,7 +1202,7 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
.addr = text_segment.vmaddr + off,
|
||||
.size = file_size,
|
||||
.offset = off,
|
||||
.@"align" = 12, // 2^12 = 4096
|
||||
.@"align" = if (self.base.options.target.cpu.arch == .aarch64) 2 else 0, // 2^2 for aarch64, 2^0 for x86_64
|
||||
.reloff = 0,
|
||||
.nreloc = 0,
|
||||
.flags = flags,
|
||||
@ -1174,47 +1213,27 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
|
||||
text_segment.vmsize = file_size + off; // We add off here since __TEXT segment includes everything prior to __text section.
|
||||
text_segment.filesize = file_size + off;
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
if (self.data_segment_cmd_index == null) {
|
||||
self.data_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const prot = macho.VM_PROT_READ | macho.VM_PROT_WRITE;
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.Segment = .{
|
||||
.cmd = macho.LC_SEGMENT_64,
|
||||
.cmdsize = @sizeOf(macho.segment_command_64),
|
||||
.segname = makeStaticString("__DATA"),
|
||||
.vmaddr = text_segment.vmaddr + text_segment.vmsize,
|
||||
.vmsize = 0,
|
||||
.fileoff = 0,
|
||||
.filesize = 0,
|
||||
.maxprot = prot,
|
||||
.initprot = prot,
|
||||
.nsects = 0,
|
||||
.flags = 0,
|
||||
},
|
||||
});
|
||||
text_segment.cmdsize += @sizeOf(macho.section_64);
|
||||
text_segment.nsects += 1;
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
if (self.got_section_index == null) {
|
||||
self.got_section_index = @intCast(u16, self.sections.items.len);
|
||||
const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
|
||||
data_segment.cmdsize += @sizeOf(macho.section_64);
|
||||
data_segment.nsects += 1;
|
||||
const text_section = &self.sections.items[self.text_section_index.?];
|
||||
|
||||
const file_size = @sizeOf(u64) * self.base.options.symbol_count_hint;
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, 0x1000));
|
||||
// TODO looking for free space should be done *within* a segment it belongs to
|
||||
const off = @intCast(u32, text_section.offset + text_section.size);
|
||||
|
||||
log.debug("found __got section free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
|
||||
try self.sections.append(self.base.allocator, .{
|
||||
.sectname = makeStaticString("__got"),
|
||||
.segname = makeStaticString("__DATA"),
|
||||
.addr = data_segment.vmaddr,
|
||||
.segname = makeStaticString("__TEXT"),
|
||||
.addr = text_section.addr + text_section.size,
|
||||
.size = file_size,
|
||||
.offset = off,
|
||||
.@"align" = 3, // 2^3 = 8
|
||||
.@"align" = if (self.base.options.target.cpu.arch == .aarch64) 2 else 0,
|
||||
.reloff = 0,
|
||||
.nreloc = 0,
|
||||
.flags = macho.S_REGULAR,
|
||||
@ -1223,31 +1242,36 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
.reserved3 = 0,
|
||||
});
|
||||
|
||||
const segment_size = mem.alignForwardGeneric(u64, file_size, 0x1000);
|
||||
data_segment.vmsize = segment_size;
|
||||
data_segment.filesize = segment_size;
|
||||
data_segment.fileoff = off;
|
||||
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const added_size = mem.alignForwardGeneric(u64, file_size, self.page_size);
|
||||
text_segment.vmsize += added_size;
|
||||
text_segment.filesize += added_size;
|
||||
text_segment.cmdsize += @sizeOf(macho.section_64);
|
||||
text_segment.nsects += 1;
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
if (self.linkedit_segment_cmd_index == null) {
|
||||
self.linkedit_segment_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
const data_segment = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
|
||||
const prot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE;
|
||||
const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment;
|
||||
const maxprot = macho.VM_PROT_READ | macho.VM_PROT_WRITE | macho.VM_PROT_EXECUTE;
|
||||
const initprot = macho.VM_PROT_READ;
|
||||
const off = text_segment.fileoff + text_segment.filesize;
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.Segment = .{
|
||||
.cmd = macho.LC_SEGMENT_64,
|
||||
.cmdsize = @sizeOf(macho.segment_command_64),
|
||||
.segname = makeStaticString("__LINKEDIT"),
|
||||
.vmaddr = data_segment.vmaddr + data_segment.vmsize,
|
||||
.vmaddr = text_segment.vmaddr + text_segment.vmsize,
|
||||
.vmsize = 0,
|
||||
.fileoff = 0,
|
||||
.fileoff = off,
|
||||
.filesize = 0,
|
||||
.maxprot = prot,
|
||||
.initprot = prot,
|
||||
.maxprot = maxprot,
|
||||
.initprot = initprot,
|
||||
.nsects = 0,
|
||||
.flags = 0,
|
||||
},
|
||||
});
|
||||
self.linkedit_segment_next_offset = @intCast(u32, off);
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
if (self.dyld_info_cmd_index == null) {
|
||||
@ -1329,8 +1353,8 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
self.libsystem_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
const cmdsize = mem.alignForwardGeneric(u64, @sizeOf(macho.dylib_command) + mem.lenZ(LIB_SYSTEM_PATH), @sizeOf(u64));
|
||||
// TODO Find a way to work out runtime version from the OS version triple stored in std.Target.
|
||||
// In the meantime, we're gonna hardcode to the minimum compatibility version of 1.0.0.
|
||||
const min_version = 0x10000;
|
||||
// In the meantime, we're gonna hardcode to the minimum compatibility version of 0.0.0.
|
||||
const min_version = 0x0;
|
||||
const dylib = .{
|
||||
.name = @sizeOf(macho.dylib_command),
|
||||
.timestamp = 2, // not sure why not simply 0; this is reverse engineered from Mach-O files
|
||||
@ -1359,47 +1383,46 @@ pub fn populateMissingMetadata(self: *MachO) !void {
|
||||
});
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
{
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfo;
|
||||
if (dyld_info.export_off == 0) {
|
||||
const nsyms = self.base.options.symbol_count_hint;
|
||||
const file_size = @sizeOf(u64) * nsyms;
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, 0x1000));
|
||||
log.debug("found export trie free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
dyld_info.export_off = off;
|
||||
dyld_info.export_size = @intCast(u32, file_size);
|
||||
|
||||
const segment_size = mem.alignForwardGeneric(u64, file_size, 0x1000);
|
||||
linkedit.vmsize += segment_size;
|
||||
linkedit.fileoff = off;
|
||||
}
|
||||
if (self.version_min_cmd_index == null) {
|
||||
self.version_min_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
const cmd: u32 = switch (self.base.options.target.os.tag) {
|
||||
.macos => macho.LC_VERSION_MIN_MACOSX,
|
||||
.ios => macho.LC_VERSION_MIN_IPHONEOS,
|
||||
.tvos => macho.LC_VERSION_MIN_TVOS,
|
||||
.watchos => macho.LC_VERSION_MIN_WATCHOS,
|
||||
else => unreachable, // wrong OS
|
||||
};
|
||||
const ver = self.base.options.target.os.version_range.semver.min;
|
||||
const version = ver.major << 16 | ver.minor << 8 | ver.patch;
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.MinVersion = .{
|
||||
.cmd = cmd,
|
||||
.cmdsize = @sizeOf(macho.version_min_command),
|
||||
.version = version,
|
||||
.sdk = version,
|
||||
},
|
||||
});
|
||||
}
|
||||
{
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
if (symtab.symoff == 0) {
|
||||
const nsyms = self.base.options.symbol_count_hint;
|
||||
const file_size = @sizeOf(macho.nlist_64) * nsyms;
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, 0x1000));
|
||||
log.debug("found symbol table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
symtab.symoff = off;
|
||||
symtab.nsyms = @intCast(u32, nsyms);
|
||||
|
||||
const segment_size = mem.alignForwardGeneric(u64, file_size, 0x1000);
|
||||
linkedit.vmsize += segment_size;
|
||||
}
|
||||
if (symtab.stroff == 0) {
|
||||
try self.string_table.append(self.base.allocator, 0);
|
||||
const file_size = @intCast(u32, self.string_table.items.len);
|
||||
const off = @intCast(u32, self.findFreeSpace(file_size, 0x1000));
|
||||
log.debug("found string table free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
|
||||
symtab.stroff = off;
|
||||
symtab.strsize = file_size;
|
||||
|
||||
const segment_size = mem.alignForwardGeneric(u64, file_size, 0x1000);
|
||||
linkedit.vmsize += segment_size;
|
||||
}
|
||||
if (self.source_version_cmd_index == null) {
|
||||
self.source_version_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.SourceVersion = .{
|
||||
.cmd = macho.LC_SOURCE_VERSION,
|
||||
.cmdsize = @sizeOf(macho.source_version_command),
|
||||
.version = 0x0,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (self.code_signature_cmd_index == null) {
|
||||
self.code_signature_cmd_index = @intCast(u16, self.load_commands.items.len);
|
||||
try self.load_commands.append(self.base.allocator, .{
|
||||
.LinkeditData = .{
|
||||
.cmd = macho.LC_CODE_SIGNATURE,
|
||||
.cmdsize = @sizeOf(macho.linkedit_data_command),
|
||||
.dataoff = 0,
|
||||
.datasize = 0,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (self.dyld_stub_binder_index == null) {
|
||||
self.dyld_stub_binder_index = @intCast(u16, self.undef_symbols.items.len);
|
||||
@ -1626,47 +1649,121 @@ fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u16) u64 {
|
||||
return start;
|
||||
}
|
||||
|
||||
fn writeSymbol(self: *MachO, index: usize) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const sym = [1]macho.nlist_64{self.local_symbols.items[index]};
|
||||
const off = symtab.symoff + @sizeOf(macho.nlist_64) * index;
|
||||
log.debug("writing symbol {} at 0x{x}\n", .{ sym[0], off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
|
||||
}
|
||||
|
||||
fn writeOffsetTableEntry(self: *MachO, index: usize) !void {
|
||||
const sect = &self.sections.items[self.got_section_index.?];
|
||||
const endian = self.base.options.target.cpu.arch.endian();
|
||||
var buf: [@sizeOf(u64)]u8 = undefined;
|
||||
mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
|
||||
const off = sect.offset + @sizeOf(u64) * index;
|
||||
const vmaddr = sect.addr + @sizeOf(u64) * index;
|
||||
|
||||
var code: [8]u8 = undefined;
|
||||
switch (self.base.options.target.cpu.arch) {
|
||||
.x86_64 => {
|
||||
const pos_symbol_off = @intCast(u31, vmaddr - self.offset_table.items[index] + 7);
|
||||
const symbol_off = @bitCast(u32, @intCast(i32, pos_symbol_off) * -1);
|
||||
// lea %rax, [rip - disp]
|
||||
code[0] = 0x48;
|
||||
code[1] = 0x8D;
|
||||
code[2] = 0x5;
|
||||
mem.writeIntLittle(u32, code[3..7], symbol_off);
|
||||
// ret
|
||||
code[7] = 0xC3;
|
||||
},
|
||||
.aarch64 => {
|
||||
const pos_symbol_off = @intCast(u20, vmaddr - self.offset_table.items[index]);
|
||||
const symbol_off = @intCast(i21, pos_symbol_off) * -1;
|
||||
// adr x0, #-disp
|
||||
mem.writeIntLittle(u32, code[0..4], aarch64.Instruction.adr(.x0, symbol_off).toU32());
|
||||
// ret x28
|
||||
mem.writeIntLittle(u32, code[4..8], aarch64.Instruction.ret(.x28).toU32());
|
||||
},
|
||||
else => unreachable, // unsupported target architecture
|
||||
}
|
||||
log.debug("writing offset table entry 0x{x} at 0x{x}\n", .{ self.offset_table.items[index], off });
|
||||
try self.base.file.?.pwriteAll(&buf, off);
|
||||
try self.base.file.?.pwriteAll(&code, off);
|
||||
}
|
||||
|
||||
fn writeAllGlobalSymbols(self: *MachO) !void {
|
||||
fn writeSymbolTable(self: *MachO) !void {
|
||||
// TODO workout how we can cache these so that we only overwrite symbols that were updated
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const off = symtab.symoff + self.local_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
const file_size = self.global_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
log.debug("writing global symbols from 0x{x} to 0x{x}\n", .{ off, file_size + off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.global_symbols.items), off);
|
||||
|
||||
const locals_off = self.linkedit_segment_next_offset.?;
|
||||
const locals_size = self.local_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
log.debug("writing local symbols from 0x{x} to 0x{x}\n", .{ locals_off, locals_size + locals_off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.local_symbols.items), locals_off);
|
||||
|
||||
const globals_off = locals_off + locals_size;
|
||||
const globals_size = self.global_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
log.debug("writing global symbols from 0x{x} to 0x{x}\n", .{ globals_off, globals_size + globals_off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.global_symbols.items), globals_off);
|
||||
|
||||
const undefs_off = globals_off + globals_size;
|
||||
const undefs_size = self.undef_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
log.debug("writing undef symbols from 0x{x} to 0x{x}\n", .{ undefs_off, undefs_size + undefs_off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undef_symbols.items), undefs_off);
|
||||
|
||||
// Update symbol table.
|
||||
const nlocals = @intCast(u32, self.local_symbols.items.len);
|
||||
const nglobals = @intCast(u32, self.global_symbols.items.len);
|
||||
const nundefs = @intCast(u32, self.undef_symbols.items.len);
|
||||
symtab.symoff = self.linkedit_segment_next_offset.?;
|
||||
symtab.nsyms = nlocals + nglobals + nundefs;
|
||||
self.linkedit_segment_next_offset = symtab.symoff + symtab.nsyms * @sizeOf(macho.nlist_64);
|
||||
|
||||
// Update dynamic symbol table.
|
||||
const dysymtab = &self.load_commands.items[self.dysymtab_cmd_index.?].Dysymtab;
|
||||
dysymtab.nlocalsym = nlocals;
|
||||
dysymtab.iextdefsym = nlocals;
|
||||
dysymtab.nextdefsym = nglobals;
|
||||
dysymtab.iundefsym = nlocals + nglobals;
|
||||
dysymtab.nundefsym = nundefs;
|
||||
|
||||
// Advance size of __LINKEDIT segment
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
linkedit.filesize += symtab.nsyms * @sizeOf(macho.nlist_64);
|
||||
if (linkedit.vmsize < linkedit.filesize) {
|
||||
linkedit.vmsize = mem.alignForwardGeneric(u64, linkedit.filesize, self.page_size);
|
||||
}
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
|
||||
fn writeAllUndefSymbols(self: *MachO) !void {
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const nlocals = self.local_symbols.items.len;
|
||||
const nglobals = self.global_symbols.items.len;
|
||||
const off = symtab.symoff + (nlocals + nglobals) * @sizeOf(macho.nlist_64);
|
||||
const file_size = self.undef_symbols.items.len * @sizeOf(macho.nlist_64);
|
||||
log.debug("writing undef symbols from 0x{x} to 0x{x}\n", .{ off, file_size + off });
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.undef_symbols.items), off);
|
||||
fn writeCodeSignaturePadding(self: *MachO) !void {
|
||||
const code_sig_cmd = &self.load_commands.items[self.code_signature_cmd_index.?].LinkeditData;
|
||||
const fileoff = self.linkedit_segment_next_offset.?;
|
||||
const datasize: u32 = 0x1000; // TODO Calculate the expected size of the signature.
|
||||
code_sig_cmd.dataoff = fileoff;
|
||||
code_sig_cmd.datasize = datasize;
|
||||
|
||||
self.linkedit_segment_next_offset = fileoff + datasize;
|
||||
// Advance size of __LINKEDIT segment
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
linkedit.filesize += datasize;
|
||||
if (linkedit.vmsize < linkedit.filesize) {
|
||||
linkedit.vmsize = mem.alignForwardGeneric(u64, linkedit.filesize, self.page_size);
|
||||
}
|
||||
log.debug("writing code signature padding from 0x{x} to 0x{x}\n", .{ fileoff, fileoff + datasize });
|
||||
// Pad out the space. We need to do this to calculate valid hashes for everything in the file
|
||||
// except for code signature data.
|
||||
try self.base.file.?.pwriteAll(&[_]u8{0}, fileoff + datasize - 1);
|
||||
}
|
||||
|
||||
fn writeCodeSignature(self: *MachO) !void {
|
||||
const code_sig_cmd = &self.load_commands.items[self.code_signature_cmd_index.?].LinkeditData;
|
||||
var code_sig = CodeSignature.init(self.base.allocator);
|
||||
defer code_sig.deinit();
|
||||
|
||||
try code_sig.calcAdhocSignature(self);
|
||||
|
||||
var buffer = try self.base.allocator.alloc(u8, code_sig.size());
|
||||
defer self.base.allocator.free(buffer);
|
||||
|
||||
code_sig.write(buffer);
|
||||
|
||||
log.debug("writing code signature from 0x{x} to 0x{x}\n", .{ code_sig_cmd.dataoff, code_sig_cmd.dataoff + buffer.len });
|
||||
|
||||
try self.base.file.?.pwriteAll(buffer, code_sig_cmd.dataoff);
|
||||
}
|
||||
|
||||
fn writeExportTrie(self: *MachO) !void {
|
||||
if (self.global_symbols.items.len == 0) return; // No exports, nothing to do.
|
||||
if (self.global_symbols.items.len == 0) return;
|
||||
|
||||
var trie: Trie = .{};
|
||||
defer trie.deinit(self.base.allocator);
|
||||
@ -1689,28 +1786,51 @@ fn writeExportTrie(self: *MachO) !void {
|
||||
try trie.writeULEB128Mem(self.base.allocator, &buffer);
|
||||
|
||||
const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfo;
|
||||
const export_size = @intCast(u32, mem.alignForward(buffer.items.len, @sizeOf(u64)));
|
||||
dyld_info.export_off = self.linkedit_segment_next_offset.?;
|
||||
dyld_info.export_size = export_size;
|
||||
|
||||
log.debug("writing export trie from 0x{x} to 0x{x}\n", .{ dyld_info.export_off, dyld_info.export_off + export_size });
|
||||
|
||||
if (export_size > buffer.items.len) {
|
||||
// Pad out to align(8).
|
||||
try self.base.file.?.pwriteAll(&[_]u8{0}, dyld_info.export_off + export_size);
|
||||
}
|
||||
try self.base.file.?.pwriteAll(buffer.items, dyld_info.export_off);
|
||||
|
||||
self.linkedit_segment_next_offset = dyld_info.export_off + dyld_info.export_size;
|
||||
// Advance size of __LINKEDIT segment
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
linkedit.filesize += dyld_info.export_size;
|
||||
if (linkedit.vmsize < linkedit.filesize) {
|
||||
linkedit.vmsize = mem.alignForwardGeneric(u64, linkedit.filesize, self.page_size);
|
||||
}
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
|
||||
fn writeStringTable(self: *MachO) !void {
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const allocated_size = self.allocatedSize(symtab.stroff);
|
||||
const needed_size = self.string_table.items.len;
|
||||
|
||||
if (needed_size > allocated_size) {
|
||||
symtab.strsize = 0;
|
||||
symtab.stroff = @intCast(u32, self.findFreeSpace(needed_size, 1));
|
||||
}
|
||||
symtab.strsize = @intCast(u32, needed_size);
|
||||
symtab.stroff = self.linkedit_segment_next_offset.?;
|
||||
symtab.strsize = @intCast(u32, mem.alignForward(needed_size, @sizeOf(u64)));
|
||||
|
||||
log.debug("writing string table from 0x{x} to 0x{x}\n", .{ symtab.stroff, symtab.stroff + symtab.strsize });
|
||||
|
||||
if (symtab.strsize > needed_size) {
|
||||
// Pad out to align(8);
|
||||
try self.base.file.?.pwriteAll(&[_]u8{0}, symtab.stroff + symtab.strsize);
|
||||
}
|
||||
try self.base.file.?.pwriteAll(self.string_table.items, symtab.stroff);
|
||||
|
||||
// TODO rework how we preallocate space for the entire __LINKEDIT segment instead of
|
||||
// doing dynamic updates like this.
|
||||
self.linkedit_segment_next_offset = symtab.stroff + symtab.strsize;
|
||||
// Advance size of __LINKEDIT segment
|
||||
const linkedit = &self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
linkedit.filesize = symtab.stroff + symtab.strsize - linkedit.fileoff;
|
||||
linkedit.filesize += symtab.strsize;
|
||||
if (linkedit.vmsize < linkedit.filesize) {
|
||||
linkedit.vmsize = mem.alignForwardGeneric(u64, linkedit.filesize, self.page_size);
|
||||
}
|
||||
self.cmd_table_dirty = true;
|
||||
}
|
||||
|
||||
fn writeCmdHeaders(self: *MachO) !void {
|
||||
@ -1726,7 +1846,6 @@ fn writeCmdHeaders(self: *MachO) !void {
|
||||
last_cmd_offset += cmd.cmdsize();
|
||||
}
|
||||
{
|
||||
// write __text section header
|
||||
const off = if (self.text_segment_cmd_index) |text_segment_index| blk: {
|
||||
var i: usize = 0;
|
||||
var cmdsize: usize = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64);
|
||||
@ -1739,27 +1858,11 @@ fn writeCmdHeaders(self: *MachO) !void {
|
||||
// only one, noname segment to append this section header to.
|
||||
return error.TODOImplementWritingObjFiles;
|
||||
};
|
||||
const idx = self.text_section_index.?;
|
||||
log.debug("writing text section header at 0x{x}\n", .{off});
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items[idx .. idx + 1]), off);
|
||||
}
|
||||
{
|
||||
// write __got section header
|
||||
const off = if (self.data_segment_cmd_index) |data_segment_index| blk: {
|
||||
var i: usize = 0;
|
||||
var cmdsize: usize = @sizeOf(macho.mach_header_64) + @sizeOf(macho.segment_command_64);
|
||||
while (i < data_segment_index) : (i += 1) {
|
||||
cmdsize += self.load_commands.items[i].cmdsize();
|
||||
}
|
||||
break :blk cmdsize;
|
||||
} else {
|
||||
// If we've landed in here, we are building a MachO object file, so we have
|
||||
// only one, noname segment to append this section header to.
|
||||
return error.TODOImplementWritingObjFiles;
|
||||
};
|
||||
const idx = self.got_section_index.?;
|
||||
log.debug("writing got section header at 0x{x}\n", .{off});
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items[idx .. idx + 1]), off);
|
||||
// write sections belonging to __TEXT segment
|
||||
// TODO section indices should belong to each Segment, and we should iterate dynamically.
|
||||
const id = self.text_section_index.?;
|
||||
log.debug("writing __TEXT section headers at 0x{x}\n", .{off});
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.sections.items[id .. id + 2]), off);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1807,8 +1910,14 @@ fn writeMachOHeader(self: *MachO) !void {
|
||||
|
||||
hdr.sizeofcmds = sizeofcmds;
|
||||
|
||||
// TODO should these be set to something else?
|
||||
hdr.flags = 0;
|
||||
switch (self.base.options.output_mode) {
|
||||
.Exe => {
|
||||
hdr.flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK | macho.MH_PIE;
|
||||
},
|
||||
else => {
|
||||
hdr.flags = 0;
|
||||
},
|
||||
}
|
||||
hdr.reserved = 0;
|
||||
|
||||
log.debug("writing Mach-O header {}\n", .{hdr});
|
||||
|
182
src/link/MachO/CodeSignature.zig
Normal file
182
src/link/MachO/CodeSignature.zig
Normal file
@ -0,0 +1,182 @@
|
||||
const CodeSignature = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.link);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
const testing = std.testing;
|
||||
const Allocator = mem.Allocator;
|
||||
const Sha256 = std.crypto.hash.sha2.Sha256;
|
||||
|
||||
const MachO = @import("../MachO.zig");
|
||||
|
||||
const hash_size: u8 = 32;
|
||||
const page_size: u16 = 0x1000;
|
||||
|
||||
const CodeDirectory = struct {
|
||||
inner: macho.CodeDirectory,
|
||||
data: std.ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
fn size(self: CodeDirectory) u32 {
|
||||
return self.inner.length;
|
||||
}
|
||||
|
||||
fn write(self: CodeDirectory, buffer: []u8) void {
|
||||
assert(buffer.len >= self.inner.length);
|
||||
|
||||
mem.writeIntBig(u32, buffer[0..4], self.inner.magic);
|
||||
mem.writeIntBig(u32, buffer[4..8], self.inner.length);
|
||||
mem.writeIntBig(u32, buffer[8..12], self.inner.version);
|
||||
mem.writeIntBig(u32, buffer[12..16], self.inner.flags);
|
||||
mem.writeIntBig(u32, buffer[16..20], self.inner.hashOffset);
|
||||
mem.writeIntBig(u32, buffer[20..24], self.inner.identOffset);
|
||||
mem.writeIntBig(u32, buffer[24..28], self.inner.nSpecialSlots);
|
||||
mem.writeIntBig(u32, buffer[28..32], self.inner.nCodeSlots);
|
||||
mem.writeIntBig(u32, buffer[32..36], self.inner.codeLimit);
|
||||
buffer[36] = self.inner.hashSize;
|
||||
buffer[37] = self.inner.hashType;
|
||||
buffer[38] = self.inner.platform;
|
||||
buffer[39] = self.inner.pageSize;
|
||||
mem.writeIntBig(u32, buffer[40..44], self.inner.spare2);
|
||||
mem.writeIntBig(u32, buffer[44..48], self.inner.scatterOffset);
|
||||
mem.writeIntBig(u32, buffer[48..52], self.inner.teamOffset);
|
||||
mem.writeIntBig(u32, buffer[52..56], self.inner.spare3);
|
||||
mem.writeIntBig(u64, buffer[56..64], self.inner.codeLimit64);
|
||||
mem.writeIntBig(u64, buffer[64..72], self.inner.execSegBase);
|
||||
mem.writeIntBig(u64, buffer[72..80], self.inner.execSegLimit);
|
||||
mem.writeIntBig(u64, buffer[80..88], self.inner.execSegFlags);
|
||||
|
||||
mem.copy(u8, buffer[88..], self.data.items);
|
||||
}
|
||||
};
|
||||
|
||||
alloc: *Allocator,
|
||||
inner: macho.SuperBlob = .{
|
||||
.magic = macho.CSMAGIC_EMBEDDED_SIGNATURE,
|
||||
.length = @sizeOf(macho.SuperBlob),
|
||||
.count = 0,
|
||||
},
|
||||
cdir: ?CodeDirectory = null,
|
||||
|
||||
pub fn init(alloc: *Allocator) CodeSignature {
|
||||
return .{
|
||||
.alloc = alloc,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn calcAdhocSignature(self: *CodeSignature, bin_file: *const MachO) !void {
|
||||
const text_segment = bin_file.load_commands.items[bin_file.text_segment_cmd_index.?].Segment;
|
||||
const code_sig_cmd = bin_file.load_commands.items[bin_file.code_signature_cmd_index.?].LinkeditData;
|
||||
|
||||
const execSegBase: u64 = text_segment.fileoff;
|
||||
const execSegLimit: u64 = text_segment.filesize;
|
||||
const execSegFlags: u64 = if (bin_file.base.options.output_mode == .Exe) macho.CS_EXECSEG_MAIN_BINARY else 0;
|
||||
const file_size = code_sig_cmd.dataoff;
|
||||
var cdir = CodeDirectory{
|
||||
.inner = .{
|
||||
.magic = macho.CSMAGIC_CODEDIRECTORY,
|
||||
.length = @sizeOf(macho.CodeDirectory),
|
||||
.version = macho.CS_SUPPORTSEXECSEG,
|
||||
.flags = macho.CS_ADHOC,
|
||||
.hashOffset = 0,
|
||||
.identOffset = 0,
|
||||
.nSpecialSlots = 0,
|
||||
.nCodeSlots = 0,
|
||||
.codeLimit = @intCast(u32, file_size),
|
||||
.hashSize = hash_size,
|
||||
.hashType = macho.CS_HASHTYPE_SHA256,
|
||||
.platform = 0,
|
||||
.pageSize = @truncate(u8, std.math.log2(page_size)),
|
||||
.spare2 = 0,
|
||||
.scatterOffset = 0,
|
||||
.teamOffset = 0,
|
||||
.spare3 = 0,
|
||||
.codeLimit64 = 0,
|
||||
.execSegBase = execSegBase,
|
||||
.execSegLimit = execSegLimit,
|
||||
.execSegFlags = execSegFlags,
|
||||
},
|
||||
};
|
||||
|
||||
const total_pages = mem.alignForward(file_size, page_size) / page_size;
|
||||
|
||||
var hash: [hash_size]u8 = undefined;
|
||||
var buffer = try bin_file.base.allocator.alloc(u8, page_size);
|
||||
defer bin_file.base.allocator.free(buffer);
|
||||
const macho_file = bin_file.base.file.?;
|
||||
|
||||
const id = bin_file.base.options.emit.?.sub_path;
|
||||
try cdir.data.ensureCapacity(self.alloc, total_pages * hash_size + id.len + 1);
|
||||
|
||||
// 1. Save the identifier and update offsets
|
||||
cdir.inner.identOffset = cdir.inner.length;
|
||||
cdir.data.appendSliceAssumeCapacity(id);
|
||||
cdir.data.appendAssumeCapacity(0);
|
||||
|
||||
// 2. Calculate hash for each page (in file) and write it to the buffer
|
||||
// TODO figure out how we can cache several hashes since we won't update
|
||||
// every page during incremental linking
|
||||
cdir.inner.hashOffset = cdir.inner.identOffset + @intCast(u32, id.len) + 1;
|
||||
var i: usize = 0;
|
||||
while (i < total_pages) : (i += 1) {
|
||||
const fstart = i * page_size;
|
||||
const fsize = if (fstart + page_size > file_size) file_size - fstart else page_size;
|
||||
const len = try macho_file.preadAll(buffer, fstart);
|
||||
assert(fsize <= len);
|
||||
|
||||
Sha256.hash(buffer[0..fsize], &hash, .{});
|
||||
|
||||
cdir.data.appendSliceAssumeCapacity(hash[0..]);
|
||||
cdir.inner.nCodeSlots += 1;
|
||||
}
|
||||
|
||||
// 3. Update CodeDirectory length
|
||||
cdir.inner.length += @intCast(u32, cdir.data.items.len);
|
||||
|
||||
self.inner.length += @sizeOf(macho.BlobIndex) + cdir.size();
|
||||
self.inner.count = 1;
|
||||
self.cdir = cdir;
|
||||
}
|
||||
|
||||
pub fn size(self: CodeSignature) u32 {
|
||||
return self.inner.length;
|
||||
}
|
||||
|
||||
pub fn write(self: CodeSignature, buffer: []u8) void {
|
||||
assert(buffer.len >= self.inner.length);
|
||||
self.writeHeader(buffer);
|
||||
const offset: u32 = @sizeOf(macho.SuperBlob) + @sizeOf(macho.BlobIndex);
|
||||
writeBlobIndex(macho.CSSLOT_CODEDIRECTORY, offset, buffer[@sizeOf(macho.SuperBlob)..]);
|
||||
self.cdir.?.write(buffer[offset..]);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *CodeSignature) void {
|
||||
if (self.cdir) |*cdir| {
|
||||
cdir.data.deinit(self.alloc);
|
||||
}
|
||||
}
|
||||
|
||||
fn writeHeader(self: CodeSignature, buffer: []u8) void {
|
||||
assert(buffer.len >= @sizeOf(macho.SuperBlob));
|
||||
mem.writeIntBig(u32, buffer[0..4], self.inner.magic);
|
||||
mem.writeIntBig(u32, buffer[4..8], self.inner.length);
|
||||
mem.writeIntBig(u32, buffer[8..12], self.inner.count);
|
||||
}
|
||||
|
||||
fn writeBlobIndex(tt: u32, offset: u32, buffer: []u8) void {
|
||||
assert(buffer.len >= @sizeOf(macho.BlobIndex));
|
||||
mem.writeIntBig(u32, buffer[0..4], tt);
|
||||
mem.writeIntBig(u32, buffer[4..8], offset);
|
||||
}
|
||||
|
||||
test "CodeSignature header" {
|
||||
var code_sig = CodeSignature.init(testing.allocator);
|
||||
defer code_sig.deinit();
|
||||
|
||||
var buffer: [@sizeOf(macho.SuperBlob)]u8 = undefined;
|
||||
code_sig.writeHeader(buffer[0..]);
|
||||
|
||||
const expected = &[_]u8{ 0xfa, 0xde, 0x0c, 0xc0, 0x0, 0x0, 0x0, 0xc, 0x0, 0x0, 0x0, 0x0 };
|
||||
testing.expect(mem.eql(u8, expected[0..], buffer[0..]));
|
||||
}
|
@ -1727,6 +1727,9 @@ fn buildOutputType(
|
||||
error.SemanticAnalyzeFail => process.exit(1),
|
||||
else => |e| return e,
|
||||
};
|
||||
if (output_mode == .Exe) {
|
||||
try comp.makeBinFileExecutable();
|
||||
}
|
||||
|
||||
if (build_options.is_stage1 and comp.stage1_lock != null and watch) {
|
||||
warn("--watch is not recommended with the stage1 backend; it leaks memory and is not capable of incremental compilation", .{});
|
||||
|
@ -12,9 +12,7 @@ const linux_aarch64 = std.zig.CrossTarget{
|
||||
};
|
||||
|
||||
pub fn addCases(ctx: *TestContext) !void {
|
||||
// TODO enable when we add codesigning to the self-hosted linker
|
||||
// related to #6971
|
||||
if (false) {
|
||||
{
|
||||
var case = ctx.exe("hello world with updates", macos_aarch64);
|
||||
|
||||
// Regular old hello world
|
||||
|
Loading…
x
Reference in New Issue
Block a user