rename std.heap.direct_allocator to std.heap.page_allocator
std.heap.direct_allocator is still available for now but it is marked deprecated.
This commit is contained in:
parent
35d65cceb8
commit
cb38bd0a14
@ -16,7 +16,7 @@ const tmp_dir_name = "docgen_tmp";
|
||||
const test_out_path = tmp_dir_name ++ fs.path.sep_str ++ "test" ++ exe_ext;
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const allocator = &arena.allocator;
|
||||
|
@ -6428,7 +6428,7 @@ fn amainWrap() void {
|
||||
}
|
||||
|
||||
fn amain() !void {
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
var download_frame = async fetchUrl(allocator, "https://example.com/");
|
||||
var awaited_download_frame = false;
|
||||
errdefer if (!awaited_download_frame) {
|
||||
@ -6498,7 +6498,7 @@ fn amainWrap() void {
|
||||
}
|
||||
|
||||
fn amain() !void {
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
var download_frame = async fetchUrl(allocator, "https://example.com/");
|
||||
var awaited_download_frame = false;
|
||||
errdefer if (!awaited_download_frame) {
|
||||
@ -7315,7 +7315,7 @@ test "field access by string" {
|
||||
const std = @import("std");
|
||||
|
||||
test "heap allocated frame" {
|
||||
const frame = try std.heap.direct_allocator.create(@Frame(func));
|
||||
const frame = try std.heap.page_allocator.create(@Frame(func));
|
||||
frame.* = async func();
|
||||
}
|
||||
|
||||
@ -9341,7 +9341,7 @@ fn concat(allocator: *Allocator, a: []const u8, b: []const u8) ![]u8 {
|
||||
const std = @import("std");
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const allocator = &arena.allocator;
|
||||
|
@ -152,8 +152,8 @@ const puts_per_thread = 500;
|
||||
const put_thread_count = 3;
|
||||
|
||||
test "std.atomic.Queue" {
|
||||
var plenty_of_memory = try std.heap.direct_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.direct_allocator.free(plenty_of_memory);
|
||||
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.page_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
|
@ -86,8 +86,8 @@ const puts_per_thread = 500;
|
||||
const put_thread_count = 3;
|
||||
|
||||
test "std.atomic.stack" {
|
||||
var plenty_of_memory = try std.heap.direct_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.direct_allocator.free(plenty_of_memory);
|
||||
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.page_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
|
@ -83,7 +83,7 @@ pub const BufMap = struct {
|
||||
};
|
||||
|
||||
test "BufMap" {
|
||||
var bufmap = BufMap.init(std.heap.direct_allocator);
|
||||
var bufmap = BufMap.init(std.heap.page_allocator);
|
||||
defer bufmap.deinit();
|
||||
|
||||
try bufmap.set("x", "1");
|
||||
|
@ -65,7 +65,7 @@ pub const BufSet = struct {
|
||||
};
|
||||
|
||||
test "BufSet" {
|
||||
var bufset = BufSet.init(std.heap.direct_allocator);
|
||||
var bufset = BufSet.init(std.heap.page_allocator);
|
||||
defer bufset.deinit();
|
||||
|
||||
try bufset.put("x");
|
||||
|
@ -969,7 +969,7 @@ pub const Builder = struct {
|
||||
};
|
||||
|
||||
test "builder.findProgram compiles" {
|
||||
const builder = try Builder.create(std.heap.direct_allocator, "zig", "zig-cache", "zig-cache");
|
||||
const builder = try Builder.create(std.heap.page_allocator, "zig", "zig-cache", "zig-cache");
|
||||
_ = builder.findProgram([_][]const u8{}, [_][]const u8{}) catch null;
|
||||
}
|
||||
|
||||
|
@ -2352,7 +2352,7 @@ var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
|
||||
fn getDebugInfoAllocator() *mem.Allocator {
|
||||
if (debug_info_allocator) |a| return a;
|
||||
|
||||
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
debug_info_arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
debug_info_allocator = &debug_info_arena_allocator.allocator;
|
||||
return &debug_info_arena_allocator.allocator;
|
||||
}
|
||||
|
@ -1306,7 +1306,7 @@ test "write a file, watch it, write it again" {
|
||||
// TODO provide a way to run tests in evented I/O mode
|
||||
if (!std.io.is_async) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
|
||||
// TODO move this into event loop too
|
||||
try os.makePath(allocator, test_tmp_dir);
|
||||
|
@ -115,7 +115,7 @@ test "std.event.Group" {
|
||||
// TODO provide a way to run tests in evented I/O mode
|
||||
if (!std.io.is_async) return error.SkipZigTest;
|
||||
|
||||
const handle = async testGroup(std.heap.direct_allocator);
|
||||
const handle = async testGroup(std.heap.page_allocator);
|
||||
}
|
||||
|
||||
async fn testGroup(allocator: *Allocator) void {
|
||||
|
@ -129,7 +129,7 @@ pub const Loop = struct {
|
||||
/// max(thread_count - 1, 0)
|
||||
pub fn initThreadPool(self: *Loop, thread_count: usize) !void {
|
||||
// TODO: https://github.com/ziglang/zig/issues/3539
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
self.* = Loop{
|
||||
.pending_event_count = 1,
|
||||
.allocator = allocator,
|
||||
|
@ -220,7 +220,7 @@ test "std.event.RwLock" {
|
||||
var lock = RwLock.init();
|
||||
defer lock.deinit();
|
||||
|
||||
const handle = testLock(std.heap.direct_allocator, &lock);
|
||||
const handle = testLock(std.heap.page_allocator, &lock);
|
||||
|
||||
const expected_result = [1]i32{shared_it_count * @intCast(i32, shared_test_data.len)} ** shared_test_data.len;
|
||||
testing.expectEqualSlices(i32, expected_result, shared_test_data);
|
||||
|
@ -235,8 +235,8 @@ test "hash pointer" {
|
||||
test "hash slice shallow" {
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.heap.direct_allocator.create([6]u32);
|
||||
defer std.heap.direct_allocator.destroy(array1);
|
||||
const array1 = try std.heap.page_allocator.create([6]u32);
|
||||
defer std.heap.page_allocator.destroy(array1);
|
||||
array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const a = array1[0..];
|
||||
@ -251,8 +251,8 @@ test "hash slice shallow" {
|
||||
test "hash slice deep" {
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.heap.direct_allocator.create([6]u32);
|
||||
defer std.heap.direct_allocator.destroy(array1);
|
||||
const array1 = try std.heap.page_allocator.create([6]u32);
|
||||
defer std.heap.page_allocator.destroy(array1);
|
||||
array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const a = array1[0..];
|
||||
@ -279,7 +279,7 @@ test "hash struct deep" {
|
||||
}
|
||||
};
|
||||
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
const foo = try Foo.init(allocator, 123, 1.0, true);
|
||||
const bar = try Foo.init(allocator, 123, 1.0, true);
|
||||
const baz = try Foo.init(allocator, 123, 1.0, false);
|
||||
|
@ -414,7 +414,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
|
||||
}
|
||||
|
||||
test "basic hash map usage" {
|
||||
var map = AutoHashMap(i32, i32).init(std.heap.direct_allocator);
|
||||
var map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
|
||||
defer map.deinit();
|
||||
|
||||
testing.expect((try map.put(1, 11)) == null);
|
||||
@ -458,7 +458,7 @@ test "basic hash map usage" {
|
||||
}
|
||||
|
||||
test "iterator hash map" {
|
||||
var reset_map = AutoHashMap(i32, i32).init(std.heap.direct_allocator);
|
||||
var reset_map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
|
||||
defer reset_map.deinit();
|
||||
|
||||
try reset_map.putNoClobber(1, 11);
|
||||
@ -504,7 +504,7 @@ test "iterator hash map" {
|
||||
}
|
||||
|
||||
test "ensure capacity" {
|
||||
var map = AutoHashMap(i32, i32).init(std.heap.direct_allocator);
|
||||
var map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
|
||||
defer map.deinit();
|
||||
|
||||
try map.ensureCapacity(20);
|
||||
|
@ -33,13 +33,16 @@ fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
/// Thread-safe and lock-free.
|
||||
pub const direct_allocator = &direct_allocator_state;
|
||||
var direct_allocator_state = Allocator{
|
||||
.reallocFn = DirectAllocator.realloc,
|
||||
.shrinkFn = DirectAllocator.shrink,
|
||||
pub const page_allocator = &page_allocator_state;
|
||||
var page_allocator_state = Allocator{
|
||||
.reallocFn = PageAllocator.realloc,
|
||||
.shrinkFn = PageAllocator.shrink,
|
||||
};
|
||||
|
||||
const DirectAllocator = struct {
|
||||
/// Deprecated. Use `page_allocator`.
|
||||
pub const direct_allocator = page_allocator;
|
||||
|
||||
const PageAllocator = struct {
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) error{OutOfMemory}![]u8 {
|
||||
if (n == 0) return &[0]u8{};
|
||||
|
||||
@ -484,7 +487,7 @@ pub const FixedBufferAllocator = struct {
|
||||
}
|
||||
};
|
||||
|
||||
// FIXME: Exposed LLVM intrinsics is a bug
|
||||
// TODO Exposed LLVM intrinsics is a bug
|
||||
// See: https://github.com/ziglang/zig/issues/2291
|
||||
extern fn @"llvm.wasm.memory.size.i32"(u32) u32;
|
||||
extern fn @"llvm.wasm.memory.grow.i32"(u32, u32) i32;
|
||||
@ -725,8 +728,8 @@ test "c_allocator" {
|
||||
}
|
||||
}
|
||||
|
||||
test "DirectAllocator" {
|
||||
const allocator = direct_allocator;
|
||||
test "PageAllocator" {
|
||||
const allocator = page_allocator;
|
||||
try testAllocator(allocator);
|
||||
try testAllocatorAligned(allocator, 16);
|
||||
try testAllocatorLargeAlignment(allocator);
|
||||
@ -735,7 +738,7 @@ test "DirectAllocator" {
|
||||
if (builtin.os == .windows) {
|
||||
// Trying really large alignment. As mentionned in the implementation,
|
||||
// VirtualAlloc returns 64K aligned addresses. We want to make sure
|
||||
// DirectAllocator works beyond that, as it's not tested by
|
||||
// PageAllocator works beyond that, as it's not tested by
|
||||
// `testAllocatorLargeAlignment`.
|
||||
const slice = try allocator.alignedAlloc(u8, 1 << 20, 128);
|
||||
slice[0] = 0x12;
|
||||
@ -758,7 +761,7 @@ test "HeapAllocator" {
|
||||
}
|
||||
|
||||
test "ArenaAllocator" {
|
||||
var arena_allocator = ArenaAllocator.init(direct_allocator);
|
||||
var arena_allocator = ArenaAllocator.init(page_allocator);
|
||||
defer arena_allocator.deinit();
|
||||
|
||||
try testAllocator(&arena_allocator.allocator);
|
||||
|
@ -125,8 +125,8 @@ const TestContext = struct {
|
||||
};
|
||||
|
||||
test "std.Mutex" {
|
||||
var plenty_of_memory = try std.heap.direct_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.direct_allocator.free(plenty_of_memory);
|
||||
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.page_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
|
@ -234,6 +234,6 @@ test "pipe" {
|
||||
}
|
||||
|
||||
test "argsAlloc" {
|
||||
var args = try std.process.argsAlloc(std.heap.direct_allocator);
|
||||
std.heap.direct_allocator.free(args);
|
||||
var args = try std.process.argsAlloc(std.heap.page_allocator);
|
||||
std.process.argsFree(std.heap.page_allocator, args);
|
||||
}
|
||||
|
@ -622,7 +622,7 @@ test "PackedIntArray at end of available memory" {
|
||||
p: PackedArray,
|
||||
};
|
||||
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
|
||||
var pad = try allocator.create(Padded);
|
||||
defer allocator.destroy(pad);
|
||||
@ -636,7 +636,7 @@ test "PackedIntSlice at end of available memory" {
|
||||
}
|
||||
const PackedSlice = PackedIntSlice(u11);
|
||||
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const allocator = std.heap.page_allocator;
|
||||
|
||||
var page = try allocator.alloc(u8, std.mem.page_size);
|
||||
defer allocator.free(page);
|
||||
|
@ -339,7 +339,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
|
||||
}
|
||||
|
||||
test "std.SegmentedList" {
|
||||
var a = std.heap.direct_allocator;
|
||||
var a = std.heap.page_allocator;
|
||||
|
||||
try testSegmentedList(0, a);
|
||||
try testSegmentedList(1, a);
|
||||
|
@ -17,7 +17,7 @@ pub fn main() !void {
|
||||
// one shot program. We don't need to waste time freeing memory and finding places to squish
|
||||
// bytes into. So we free everything all at once at the very end.
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const allocator = &arena.allocator;
|
||||
|
@ -4,7 +4,7 @@ const std = @import("std");
|
||||
pub fn main() !void {
|
||||
const stdout = &std.io.getStdOut().outStream().stream;
|
||||
|
||||
const args = try std.process.argsAlloc(std.heap.direct_allocator);
|
||||
const args = try std.process.argsAlloc(std.heap.page_allocator);
|
||||
|
||||
@fence(.SeqCst);
|
||||
var timer = try std.time.Timer.start();
|
||||
|
@ -837,7 +837,7 @@ test "error prereq - continuation expecting end-of-line" {
|
||||
|
||||
// - tokenize input, emit textual representation, and compare to expect
|
||||
fn depTokenizer(input: []const u8, expect: []const u8) !void {
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const arena = &arena_allocator.allocator;
|
||||
defer arena_allocator.deinit();
|
||||
|
||||
|
@ -8,7 +8,7 @@ const ChildProcess = std.ChildProcess;
|
||||
var a: *std.mem.Allocator = undefined;
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
var arg_it = process.args();
|
||||
|
@ -410,8 +410,8 @@ test "heap allocated async function frame" {
|
||||
var x: i32 = 42;
|
||||
|
||||
fn doTheTest() !void {
|
||||
const frame = try std.heap.direct_allocator.create(@Frame(someFunc));
|
||||
defer std.heap.direct_allocator.destroy(frame);
|
||||
const frame = try std.heap.page_allocator.create(@Frame(someFunc));
|
||||
defer std.heap.page_allocator.destroy(frame);
|
||||
|
||||
expect(x == 42);
|
||||
frame.* = async someFunc();
|
||||
@ -671,7 +671,7 @@ fn testAsyncAwaitTypicalUsage(
|
||||
}
|
||||
|
||||
fn amain() !void {
|
||||
const allocator = std.heap.direct_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
|
||||
const allocator = std.heap.page_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
|
||||
var download_frame = async fetchUrl(allocator, "https://example.com/");
|
||||
var download_awaited = false;
|
||||
errdefer if (!download_awaited) {
|
||||
@ -935,12 +935,12 @@ fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
|
||||
_ = async amain(&result);
|
||||
return result;
|
||||
} else {
|
||||
return fib(std.heap.direct_allocator, 10) catch unreachable;
|
||||
return fib(std.heap.page_allocator, 10) catch unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
fn amain(result: *u32) void {
|
||||
var x = async fib(std.heap.direct_allocator, 10);
|
||||
var x = async fib(std.heap.page_allocator, 10);
|
||||
result.* = (await x) catch unreachable;
|
||||
}
|
||||
};
|
||||
@ -1132,7 +1132,9 @@ test "await used in expression after a fn call" {
|
||||
async fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
fn foo() i32 { return 1; }
|
||||
fn foo() i32 {
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
_ = async S.atest();
|
||||
}
|
||||
@ -1147,7 +1149,9 @@ test "async fn call used in expression after a fn call" {
|
||||
async fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
fn foo() i32 { return 1; }
|
||||
fn foo() i32 {
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
_ = async S.atest();
|
||||
}
|
||||
@ -1201,14 +1205,13 @@ test "correctly spill when returning the error union result of another async fn"
|
||||
resume S.global_frame;
|
||||
}
|
||||
|
||||
|
||||
test "spill target expr in a for loop" {
|
||||
const S = struct {
|
||||
var global_frame: anyframe = undefined;
|
||||
|
||||
fn doTheTest() void {
|
||||
var foo = Foo{
|
||||
.slice = [_]i32{1, 2},
|
||||
.slice = [_]i32{ 1, 2 },
|
||||
};
|
||||
expect(atest(&foo) == 3);
|
||||
}
|
||||
@ -1239,7 +1242,7 @@ test "spill target expr in a for loop, with a var decl in the loop body" {
|
||||
|
||||
fn doTheTest() void {
|
||||
var foo = Foo{
|
||||
.slice = [_]i32{1, 2},
|
||||
.slice = [_]i32{ 1, 2 },
|
||||
};
|
||||
expect(atest(&foo) == 3);
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ pub fn main() !void {
|
||||
const stdin_file = io.getStdIn();
|
||||
const stdout_file = io.getStdOut();
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
global_allocator = &arena.allocator;
|
||||
|
@ -6,7 +6,7 @@ const fieldIndex = std.meta.fieldIndex;
|
||||
const TypeId = builtin.TypeId;
|
||||
|
||||
pub fn main() anyerror!void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
const allocator = &arena.allocator;
|
||||
|
@ -56,113 +56,113 @@ const MultiAbi = union(enum) {
|
||||
const glibc_targets = [_]LibCTarget{
|
||||
LibCTarget{
|
||||
.name = "aarch64_be-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.aarch64_be},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.aarch64_be },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "aarch64-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.aarch64},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.aarch64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "armeb-linux-gnueabi",
|
||||
.arch = MultiArch {.specific = Arch.armeb},
|
||||
.abi = MultiAbi {.specific = Abi.gnueabi},
|
||||
.arch = MultiArch{ .specific = Arch.armeb },
|
||||
.abi = MultiAbi{ .specific = Abi.gnueabi },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "armeb-linux-gnueabihf",
|
||||
.arch = MultiArch {.specific = Arch.armeb},
|
||||
.abi = MultiAbi {.specific = Abi.gnueabihf},
|
||||
.arch = MultiArch{ .specific = Arch.armeb },
|
||||
.abi = MultiAbi{ .specific = Abi.gnueabihf },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "arm-linux-gnueabi",
|
||||
.arch = MultiArch {.specific = Arch.arm},
|
||||
.abi = MultiAbi {.specific = Abi.gnueabi},
|
||||
.arch = MultiArch{ .specific = Arch.arm },
|
||||
.abi = MultiAbi{ .specific = Abi.gnueabi },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "arm-linux-gnueabihf",
|
||||
.arch = MultiArch {.specific = Arch.arm},
|
||||
.abi = MultiAbi {.specific = Abi.gnueabihf},
|
||||
.arch = MultiArch{ .specific = Arch.arm },
|
||||
.abi = MultiAbi{ .specific = Abi.gnueabihf },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "i686-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.i386},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.i386 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mips64el-linux-gnu-n32",
|
||||
.arch = MultiArch {.specific = Arch.mips64el},
|
||||
.abi = MultiAbi {.specific = Abi.gnuabin32},
|
||||
.arch = MultiArch{ .specific = Arch.mips64el },
|
||||
.abi = MultiAbi{ .specific = Abi.gnuabin32 },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mips64el-linux-gnu-n64",
|
||||
.arch = MultiArch {.specific = Arch.mips64el},
|
||||
.abi = MultiAbi {.specific = Abi.gnuabi64},
|
||||
.arch = MultiArch{ .specific = Arch.mips64el },
|
||||
.abi = MultiAbi{ .specific = Abi.gnuabi64 },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mips64-linux-gnu-n32",
|
||||
.arch = MultiArch {.specific = Arch.mips64},
|
||||
.abi = MultiAbi {.specific = Abi.gnuabin32},
|
||||
.arch = MultiArch{ .specific = Arch.mips64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnuabin32 },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mips64-linux-gnu-n64",
|
||||
.arch = MultiArch {.specific = Arch.mips64},
|
||||
.abi = MultiAbi {.specific = Abi.gnuabi64},
|
||||
.arch = MultiArch{ .specific = Arch.mips64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnuabi64 },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mipsel-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.mipsel},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.mipsel },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "mips-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.mips},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.mips },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "powerpc64le-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.powerpc64le},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.powerpc64le },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "powerpc64-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.powerpc64},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.powerpc64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "powerpc-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.powerpc},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.powerpc },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "riscv64-linux-gnu-rv64imac-lp64",
|
||||
.arch = MultiArch {.specific = Arch.riscv64},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.riscv64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "s390x-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.s390x},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.s390x },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "sparc64-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.sparc},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.sparc },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "sparcv9-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.sparcv9},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.sparcv9 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "x86_64-linux-gnu",
|
||||
.arch = MultiArch {.specific = Arch.x86_64},
|
||||
.abi = MultiAbi {.specific = Abi.gnu},
|
||||
.arch = MultiArch{ .specific = Arch.x86_64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnu },
|
||||
},
|
||||
LibCTarget{
|
||||
.name = "x86_64-linux-gnu-x32",
|
||||
.arch = MultiArch {.specific = Arch.x86_64},
|
||||
.abi = MultiAbi {.specific = Abi.gnux32},
|
||||
.arch = MultiArch{ .specific = Arch.x86_64 },
|
||||
.abi = MultiAbi{ .specific = Abi.gnux32 },
|
||||
},
|
||||
};
|
||||
|
||||
@ -258,7 +258,7 @@ const LibCVendor = enum {
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = &arena.allocator;
|
||||
const args = try std.process.argsAlloc(allocator);
|
||||
var search_paths = std.ArrayList([]const u8).init(allocator);
|
||||
|
@ -131,7 +131,7 @@ const Function = struct {
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.direct_allocator);
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
const allocator = &arena.allocator;
|
||||
const args = try std.process.argsAlloc(allocator);
|
||||
const in_glibc_dir = args[1]; // path to the unzipped tarball of glibc, e.g. ~/downloads/glibc-2.25
|
||||
|
Loading…
x
Reference in New Issue
Block a user