Merge remote-tracking branch 'origin/master' into llvm10

master
Andrew Kelley 2020-02-03 17:30:38 -05:00
commit d0b12d7726
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
81 changed files with 12148 additions and 5933 deletions

View File

@ -1,10 +1,4 @@
image: freebsd/latest
packages:
- cmake
- py27-s3cmd
- wget
- curl
- jq
secrets:
- 6c60aaee-92e7-4e7d-812c-114817689b4d
- dd0bd962-7664-4d3e-b0f3-41c9ee96b8b8

View File

@ -3,6 +3,9 @@
set -x
set -e
sudo pkg update -f
yes | sudo pkg install cmake py27-s3cmd wget curl jq
ZIGDIR="$(pwd)"
CACHE_BASENAME="llvm+clang-10.0.0-freebsd-x86_64-release"
PREFIX="$HOME/$CACHE_BASENAME"

View File

@ -672,7 +672,8 @@ const TermState = enum {
test "term color" {
const input_bytes = "A\x1b[32;1mgreen\x1b[0mB";
const result = try termColor(std.debug.global_allocator, input_bytes);
const result = try termColor(std.testing.allocator, input_bytes);
defer std.testing.allocator.free(result);
testing.expectEqualSlices(u8, "A<span class=\"t32\">green</span>B", result);
}

View File

@ -5359,7 +5359,7 @@ const std = @import("std");
const assert = std.debug.assert;
test "turn HashMap into a set with void" {
var map = std.HashMap(i32, void, hash_i32, eql_i32).init(std.debug.global_allocator);
var map = std.HashMap(i32, void, hash_i32, eql_i32).init(std.testing.allocator);
defer map.deinit();
_ = try map.put(1, {});
@ -9247,9 +9247,8 @@ fn concat(allocator: *Allocator, a: []const u8, b: []const u8) ![]u8 {
In the above example, 100 bytes of stack memory are used to initialize a
{#syntax#}FixedBufferAllocator{#endsyntax#}, which is then passed to a function.
As a convenience there is a global {#syntax#}FixedBufferAllocator{#endsyntax#}
available for quick tests at {#syntax#}std.debug.global_allocator{#endsyntax#},
however it is deprecated and should be avoided in favor of directly using a
{#syntax#}FixedBufferAllocator{#endsyntax#} as in the example above.
available for quick tests at {#syntax#}std.testing.allocator{#endsyntax#},
which will also do perform basic leak detection.
</p>
<p>
Currently Zig has no general purpose allocator, but there is
@ -9307,7 +9306,7 @@ pub fn main() !void {
</li>
<li>
Are you writing a test, and you want to make sure {#syntax#}error.OutOfMemory{#endsyntax#}
is handled correctly? In this case, use {#syntax#}std.debug.FailingAllocator{#endsyntax#}.
is handled correctly? In this case, use {#syntax#}std.testing.FailingAllocator{#endsyntax#}.
</li>
<li>
Finally, if none of the above apply, you need a general purpose allocator. Zig does not

View File

@ -0,0 +1,34 @@
;
; Definition file of PSAPI.DLL
; Automatic generated by gendef
; written by Kai Tietz 2008
;
LIBRARY "PSAPI.DLL"
EXPORTS
EmptyWorkingSet
EnumDeviceDrivers
EnumPageFilesA
EnumPageFilesW
EnumProcessModules
EnumProcessModulesEx
EnumProcesses
GetDeviceDriverBaseNameA
GetDeviceDriverBaseNameW
GetDeviceDriverFileNameA
GetDeviceDriverFileNameW
GetMappedFileNameA
GetMappedFileNameW
GetModuleBaseNameA
GetModuleBaseNameW
GetModuleFileNameExA
GetModuleFileNameExW
GetModuleInformation
GetPerformanceInfo
GetProcessImageFileNameA
GetProcessImageFileNameW
GetProcessMemoryInfo
GetWsChanges
GetWsChangesEx
InitializeProcessForWsWatch
QueryWorkingSet
QueryWorkingSetEx

View File

@ -0,0 +1,34 @@
;
; Definition file of PSAPI.DLL
; Automatic generated by gendef
; written by Kai Tietz 2008
;
LIBRARY "PSAPI.DLL"
EXPORTS
EmptyWorkingSet@4
EnumDeviceDrivers@12
EnumPageFilesA@8
EnumPageFilesW@8
EnumProcessModules@16
EnumProcessModulesEx@20
EnumProcesses@12
GetDeviceDriverBaseNameA@12
GetDeviceDriverBaseNameW@12
GetDeviceDriverFileNameA@12
GetDeviceDriverFileNameW@12
GetMappedFileNameA@16
GetMappedFileNameW@16
GetModuleBaseNameA@16
GetModuleBaseNameW@16
GetModuleFileNameExA@16
GetModuleFileNameExW@16
GetModuleInformation@16
GetPerformanceInfo@8
GetProcessImageFileNameA@12
GetProcessImageFileNameW@12
GetProcessMemoryInfo@12
GetWsChanges@12
GetWsChangesEx@12
InitializeProcessForWsWatch@4
QueryWorkingSet@12
QueryWorkingSetEx@12

View File

@ -320,7 +320,7 @@ test "std.ArrayList.basic" {
}
test "std.ArrayList.orderedRemove" {
var list = ArrayList(i32).init(debug.global_allocator);
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try list.append(1);
@ -347,7 +347,7 @@ test "std.ArrayList.orderedRemove" {
}
test "std.ArrayList.swapRemove" {
var list = ArrayList(i32).init(debug.global_allocator);
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try list.append(1);
@ -374,7 +374,7 @@ test "std.ArrayList.swapRemove" {
}
test "std.ArrayList.swapRemoveOrError" {
var list = ArrayList(i32).init(debug.global_allocator);
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
// Test just after initialization
@ -402,7 +402,7 @@ test "std.ArrayList.swapRemoveOrError" {
}
test "std.ArrayList.insert" {
var list = ArrayList(i32).init(debug.global_allocator);
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try list.append(1);
@ -416,7 +416,7 @@ test "std.ArrayList.insert" {
}
test "std.ArrayList.insertSlice" {
var list = ArrayList(i32).init(debug.global_allocator);
var list = ArrayList(i32).init(testing.allocator);
defer list.deinit();
try list.append(1);
@ -443,7 +443,8 @@ const Item = struct {
};
test "std.ArrayList: ArrayList(T) of struct T" {
var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(debug.global_allocator) };
try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(debug.global_allocator) });
var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(testing.allocator) };
defer root.sub_items.deinit();
try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(testing.allocator) });
testing.expect(root.sub_items.items[0].integer == 42);
}

View File

@ -43,9 +43,10 @@ pub const BufMap = struct {
pub fn set(self: *BufMap, key: []const u8, value: []const u8) !void {
const value_copy = try self.copy(value);
errdefer self.free(value_copy);
// Avoid copying key if it already exists
const get_or_put = try self.hash_map.getOrPut(key);
if (!get_or_put.found_existing) {
if (get_or_put.found_existing) {
self.free(get_or_put.kv.value);
} else {
get_or_put.kv.key = self.copy(key) catch |err| {
_ = self.hash_map.remove(key);
return err;
@ -83,7 +84,7 @@ pub const BufMap = struct {
};
test "BufMap" {
var bufmap = BufMap.init(std.heap.page_allocator);
var bufmap = BufMap.init(std.testing.allocator);
defer bufmap.deinit();
try bufmap.set("x", "1");

View File

@ -65,7 +65,7 @@ pub const BufSet = struct {
};
test "BufSet" {
var bufset = BufSet.init(std.heap.page_allocator);
var bufset = BufSet.init(std.testing.allocator);
defer bufset.deinit();
try bufset.put("x");

View File

@ -150,7 +150,9 @@ pub const Buffer = struct {
};
test "simple Buffer" {
var buf = try Buffer.init(debug.global_allocator, "");
var buf = try Buffer.init(testing.allocator, "");
defer buf.deinit();
testing.expect(buf.len() == 0);
try buf.append("hello");
try buf.append(" ");
@ -159,6 +161,7 @@ test "simple Buffer" {
testing.expect(mem.eql(u8, mem.toSliceConst(u8, buf.toSliceConst().ptr), buf.toSliceConst()));
var buf2 = try Buffer.initFromBuffer(buf);
defer buf2.deinit();
testing.expect(buf.eql(buf2.toSliceConst()));
testing.expect(buf.startsWith("hell"));
@ -169,14 +172,16 @@ test "simple Buffer" {
}
test "Buffer.initSize" {
var buf = try Buffer.initSize(debug.global_allocator, 3);
var buf = try Buffer.initSize(testing.allocator, 3);
defer buf.deinit();
testing.expect(buf.len() == 3);
try buf.append("hello");
testing.expect(mem.eql(u8, buf.toSliceConst()[3..], "hello"));
}
test "Buffer.initCapacity" {
var buf = try Buffer.initCapacity(debug.global_allocator, 10);
var buf = try Buffer.initCapacity(testing.allocator, 10);
defer buf.deinit();
testing.expect(buf.len() == 0);
testing.expect(buf.capacity() >= 10);
const old_cap = buf.capacity();

View File

@ -21,6 +21,7 @@ pub const TranslateCStep = @import("build/translate_c.zig").TranslateCStep;
pub const WriteFileStep = @import("build/write_file.zig").WriteFileStep;
pub const RunStep = @import("build/run.zig").RunStep;
pub const CheckFileStep = @import("build/check_file.zig").CheckFileStep;
pub const InstallRawStep = @import("build/emit_raw.zig").InstallRawStep;
pub const Builder = struct {
install_tls: TopLevelStep,
@ -824,6 +825,10 @@ pub const Builder = struct {
self.getInstallStep().dependOn(&self.addInstallFileWithDir(src_path, .Lib, dest_rel_path).step);
}
pub fn installRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8) void {
self.getInstallStep().dependOn(&self.addInstallRaw(artifact, dest_filename).step);
}
///`dest_rel_path` is relative to install prefix path
pub fn addInstallFile(self: *Builder, src_path: []const u8, dest_rel_path: []const u8) *InstallFileStep {
return self.addInstallFileWithDir(src_path, .Prefix, dest_rel_path);
@ -839,6 +844,10 @@ pub const Builder = struct {
return self.addInstallFileWithDir(src_path, .Lib, dest_rel_path);
}
pub fn addInstallRaw(self: *Builder, artifact: *LibExeObjStep, dest_filename: []const u8) *InstallRawStep {
return InstallRawStep.create(self, artifact, dest_filename);
}
pub fn addInstallFileWithDir(
self: *Builder,
src_path: []const u8,
@ -1059,7 +1068,10 @@ pub const Builder = struct {
};
test "builder.findProgram compiles" {
// TODO: uncomment and fix the leak
// const builder = try Builder.create(std.testing.allocator, "zig", "zig-cache", "zig-cache");
const builder = try Builder.create(std.heap.page_allocator, "zig", "zig-cache", "zig-cache");
defer builder.destroy();
_ = builder.findProgram(&[_][]const u8{}, &[_][]const u8{}) catch null;
}
@ -1072,9 +1084,10 @@ pub const CrossTarget = std.Target.Cross;
/// Deprecated. Use `std.Target`.
pub const Target = std.Target;
const Pkg = struct {
pub const Pkg = struct {
name: []const u8,
path: []const u8,
dependencies: ?[]Pkg = null,
};
const CSourceFile = struct {
@ -1403,6 +1416,10 @@ pub const LibExeObjStep = struct {
self.builder.installArtifact(self);
}
pub fn installRaw(self: *LibExeObjStep, dest_filename: [] const u8) void {
self.builder.installRaw(self, dest_filename);
}
/// Creates a `RunStep` with an executable built with `addExecutable`.
/// Add command line arguments with `addArg`.
pub fn run(exe: *LibExeObjStep) *RunStep {
@ -1732,17 +1749,21 @@ pub const LibExeObjStep = struct {
}
pub fn addLibPath(self: *LibExeObjStep, path: []const u8) void {
self.lib_paths.append(path) catch unreachable;
self.lib_paths.append(self.builder.dupe(path)) catch unreachable;
}
pub fn addFrameworkDir(self: *LibExeObjStep, dir_path: []const u8) void {
self.framework_dirs.append(dir_path) catch unreachable;
self.framework_dirs.append(self.builder.dupe(dir_path)) catch unreachable;
}
pub fn addPackage(self: *LibExeObjStep, package: Pkg) void {
self.packages.append(package) catch unreachable;
}
pub fn addPackagePath(self: *LibExeObjStep, name: []const u8, pkg_index_path: []const u8) void {
self.packages.append(Pkg{
.name = name,
.path = pkg_index_path,
.name = self.builder.dupe(name),
.path = self.builder.dupe(pkg_index_path),
}) catch unreachable;
}
@ -2088,10 +2109,20 @@ pub const LibExeObjStep = struct {
},
}
for (self.packages.toSliceConst()) |pkg| {
zig_args.append("--pkg-begin") catch unreachable;
zig_args.append(pkg.name) catch unreachable;
zig_args.append(builder.pathFromRoot(pkg.path)) catch unreachable;
zig_args.append("--pkg-end") catch unreachable;
try zig_args.append("--pkg-begin");
try zig_args.append(pkg.name);
try zig_args.append(builder.pathFromRoot(pkg.path));
if (pkg.dependencies) |dependencies| {
for (dependencies) |sub_pkg| {
try zig_args.append("--pkg-begin");
try zig_args.append(sub_pkg.name);
try zig_args.append(builder.pathFromRoot(sub_pkg.path));
try zig_args.append("--pkg-end");
}
}
try zig_args.append("--pkg-end");
}
for (self.include_dirs.toSliceConst()) |include_dir| {

255
lib/std/build/emit_raw.zig Normal file
View File

@ -0,0 +1,255 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayList = std.ArrayList;
const Builder = std.build.Builder;
const File = std.fs.File;
const InstallDir = std.build.InstallDir;
const LibExeObjStep = std.build.LibExeObjStep;
const Step = std.build.Step;
const elf = std.elf;
const fs = std.fs;
const io = std.io;
const sort = std.sort;
const warn = std.debug.warn;
const BinOutStream = io.OutStream(anyerror);
const BinSeekStream = io.SeekableStream(anyerror, anyerror);
const ElfSeekStream = io.SeekableStream(anyerror, anyerror);
const ElfInStream = io.InStream(anyerror);
const BinaryElfSection = struct {
elfOffset: u64,
binaryOffset: u64,
fileSize: usize,
segment: ?*BinaryElfSegment,
};
const BinaryElfSegment = struct {
physicalAddress: u64,
virtualAddress: u64,
elfOffset: u64,
binaryOffset: u64,
fileSize: usize,
firstSection: ?*BinaryElfSection,
};
const BinaryElfOutput = struct {
segments: ArrayList(*BinaryElfSegment),
sections: ArrayList(*BinaryElfSection),
const Self = @This();
pub fn init(allocator: *Allocator) Self {
return Self{
.segments = ArrayList(*BinaryElfSegment).init(allocator),
.sections = ArrayList(*BinaryElfSection).init(allocator),
};
}
pub fn deinit(self: *Self) void {
self.sections.deinit();
self.segments.deinit();
}
pub fn parseElf(self: *Self, elfFile: elf.Elf) !void {
const allocator = self.segments.allocator;
for (elfFile.section_headers) |section, i| {
if (sectionValidForOutput(section)) {
const newSection = try allocator.create(BinaryElfSection);
newSection.binaryOffset = 0;
newSection.elfOffset = section.sh_offset;
newSection.fileSize = @intCast(usize, section.sh_size);
newSection.segment = null;
try self.sections.append(newSection);
}
}
for (elfFile.program_headers) |programHeader, i| {
if (programHeader.p_type == elf.PT_LOAD) {
const newSegment = try allocator.create(BinaryElfSegment);
newSegment.physicalAddress = if (programHeader.p_paddr != 0) programHeader.p_paddr else programHeader.p_vaddr;
newSegment.virtualAddress = programHeader.p_vaddr;
newSegment.fileSize = @intCast(usize, programHeader.p_filesz);
newSegment.elfOffset = programHeader.p_offset;
newSegment.binaryOffset = 0;
newSegment.firstSection = null;
for (self.sections.toSlice()) |section| {
if (sectionWithinSegment(section, programHeader)) {
if (section.segment) |sectionSegment| {
if (sectionSegment.elfOffset > newSegment.elfOffset) {
section.segment = newSegment;
}
} else {
section.segment = newSegment;
}
if (newSegment.firstSection == null) {
newSegment.firstSection = section;
}
}
}
try self.segments.append(newSegment);
}
}
sort.sort(*BinaryElfSegment, self.segments.toSlice(), segmentSortCompare);
if (self.segments.len > 0) {
const firstSegment = self.segments.at(0);
if (firstSegment.firstSection) |firstSection| {
const diff = firstSection.elfOffset - firstSegment.elfOffset;
firstSegment.elfOffset += diff;
firstSegment.fileSize += diff;
firstSegment.physicalAddress += diff;
const basePhysicalAddress = firstSegment.physicalAddress;
for (self.segments.toSlice()) |segment| {
segment.binaryOffset = segment.physicalAddress - basePhysicalAddress;
}
}
}
for (self.sections.toSlice()) |section| {
if (section.segment) |segment| {
section.binaryOffset = segment.binaryOffset + (section.elfOffset - segment.elfOffset);
}
}
sort.sort(*BinaryElfSection, self.sections.toSlice(), sectionSortCompare);
}
fn sectionWithinSegment(section: *BinaryElfSection, segment: elf.ProgramHeader) bool {
return segment.p_offset <= section.elfOffset and (segment.p_offset + segment.p_filesz) >= (section.elfOffset + section.fileSize);
}
fn sectionValidForOutput(section: elf.SectionHeader) bool {
return section.sh_size > 0 and section.sh_type != elf.SHT_NOBITS and ((section.sh_flags & elf.SHF_ALLOC) == elf.SHF_ALLOC);
}
fn segmentSortCompare(left: *BinaryElfSegment, right: *BinaryElfSegment) bool {
if (left.physicalAddress < right.physicalAddress) {
return true;
}
if (left.physicalAddress > right.physicalAddress) {
return false;
}
return false;
}
fn sectionSortCompare(left: *BinaryElfSection, right: *BinaryElfSection) bool {
return left.binaryOffset < right.binaryOffset;
}
};
const WriteContext = struct {
inStream: *ElfInStream,
inSeekStream: *ElfSeekStream,
outStream: *BinOutStream,
outSeekStream: *BinSeekStream,
};
fn writeBinaryElfSection(allocator: *Allocator, context: WriteContext, section: *BinaryElfSection) !void {
var readBuffer = try allocator.alloc(u8, section.fileSize);
defer allocator.free(readBuffer);
try context.inSeekStream.seekTo(section.elfOffset);
_ = try context.inStream.read(readBuffer);
try context.outSeekStream.seekTo(section.binaryOffset);
try context.outStream.write(readBuffer);
}
fn emit_raw(allocator: *Allocator, elf_path: []const u8, raw_path: []const u8) !void {
var arenaAlloc = ArenaAllocator.init(allocator);
errdefer arenaAlloc.deinit();
var arena_allocator = &arenaAlloc.allocator;
const currentDir = fs.cwd();
var file = try currentDir.openFile(elf_path, File.OpenFlags{});
defer file.close();
var fileInStream = file.inStream();
var fileSeekStream = file.seekableStream();
var elfFile = try elf.Elf.openStream(allocator, @ptrCast(*ElfSeekStream, &fileSeekStream.stream), @ptrCast(*ElfInStream, &fileInStream.stream));
defer elfFile.close();
var outFile = try currentDir.createFile(raw_path, File.CreateFlags{});
defer outFile.close();
var outFileOutStream = outFile.outStream();
var outFileSeekStream = outFile.seekableStream();
const writeContext = WriteContext{
.inStream = @ptrCast(*ElfInStream, &fileInStream.stream),
.inSeekStream = @ptrCast(*ElfSeekStream, &fileSeekStream.stream),
.outStream = @ptrCast(*BinOutStream, &outFileOutStream.stream),
.outSeekStream = @ptrCast(*BinSeekStream, &outFileSeekStream.stream),
};
var binaryElfOutput = BinaryElfOutput.init(arena_allocator);
defer binaryElfOutput.deinit();
try binaryElfOutput.parseElf(elfFile);
for (binaryElfOutput.sections.toSlice()) |section| {
try writeBinaryElfSection(allocator, writeContext, section);
}
}
pub const InstallRawStep = struct {
step: Step,
builder: *Builder,
artifact: *LibExeObjStep,
dest_dir: InstallDir,
dest_filename: [] const u8,
const Self = @This();
pub fn create(builder: *Builder, artifact: *LibExeObjStep, dest_filename: [] const u8) *Self {
const self = builder.allocator.create(Self) catch unreachable;
self.* = Self{
.step = Step.init(builder.fmt("install raw binary {}", .{artifact.step.name}), builder.allocator, make),
.builder = builder,
.artifact = artifact,
.dest_dir = switch (artifact.kind) {
.Obj => unreachable,
.Test => unreachable,
.Exe => .Bin,
.Lib => unreachable,
},
.dest_filename = dest_filename,
};
self.step.dependOn(&artifact.step);
builder.pushInstalledFile(self.dest_dir, dest_filename);
return self;
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(Self, "step", step);
const builder = self.builder;
if (self.artifact.target.getObjectFormat() != .elf) {
warn("InstallRawStep only works with ELF format.\n", .{});
return error.InvalidObjectFormat;
}
const full_src_path = self.artifact.getOutputPath();
const full_dest_path = builder.getInstallPath(self.dest_dir, self.dest_filename);
fs.makePath(builder.allocator, builder.getInstallPath(self.dest_dir, "")) catch unreachable;
try emit_raw(builder.allocator, full_src_path, full_dest_path);
}
};

View File

@ -1428,4 +1428,3 @@ const Parser = struct {
});
}
};

View File

@ -631,7 +631,7 @@ pub const Tokenizer = struct {
},
.BackSlashCr => switch (c) {
'\n' => {
state = .Start;
state = .Start;
},
else => {
result.id = .Invalid;

View File

@ -21,6 +21,8 @@ pub const Blake2s256 = blake2.Blake2s256;
pub const Blake2b384 = blake2.Blake2b384;
pub const Blake2b512 = blake2.Blake2b512;
pub const Blake3 = @import("crypto/blake3.zig").Blake3;
const hmac = @import("crypto/hmac.zig");
pub const HmacMd5 = hmac.HmacMd5;
pub const HmacSha1 = hmac.HmacSha1;
@ -44,6 +46,7 @@ pub const randomBytes = std.os.getrandom;
test "crypto" {
_ = @import("crypto/aes.zig");
_ = @import("crypto/blake2.zig");
_ = @import("crypto/blake3.zig");
_ = @import("crypto/chacha20.zig");
_ = @import("crypto/gimli.zig");
_ = @import("crypto/hmac.zig");

View File

@ -25,6 +25,7 @@ const hashes = [_]Crypto{
Crypto{ .ty = crypto.Sha3_512, .name = "sha3-512" },
Crypto{ .ty = crypto.Blake2s256, .name = "blake2s" },
Crypto{ .ty = crypto.Blake2b512, .name = "blake2b" },
Crypto{ .ty = crypto.Blake3, .name = "blake3" },
};
pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 {

596
lib/std/crypto/blake3.zig Normal file
View File

@ -0,0 +1,596 @@
// Translated from BLAKE3 reference implementation.
// Source: https://github.com/BLAKE3-team/BLAKE3
const std = @import("../std.zig");
const fmt = std.fmt;
const math = std.math;
const mem = std.mem;
const testing = std.testing;
const ChunkIterator = struct {
slice: []u8,
chunk_len: usize,
fn init(slice: []u8, chunk_len: usize) ChunkIterator {
return ChunkIterator{
.slice = slice,
.chunk_len = chunk_len,
};
}
fn next(self: *ChunkIterator) ?[]u8 {
const next_chunk = self.slice[0..math.min(self.chunk_len, self.slice.len)];
self.slice = self.slice[next_chunk.len..];
return if (next_chunk.len > 0) next_chunk else null;
}
};
const OUT_LEN: usize = 32;
const KEY_LEN: usize = 32;
const BLOCK_LEN: usize = 64;
const CHUNK_LEN: usize = 1024;
const IV = [8]u32{
0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
};
const MSG_SCHEDULE = [7][16]u8{
[_]u8{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 },
[_]u8{ 2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8 },
[_]u8{ 3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1 },
[_]u8{ 10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6 },
[_]u8{ 12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4 },
[_]u8{ 9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7 },
[_]u8{ 11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13 },
};
// These are the internal flags that we use to domain separate root/non-root,
// chunk/parent, and chunk beginning/middle/end. These get set at the high end
// of the block flags word in the compression function, so their values start
// high and go down.
const CHUNK_START: u8 = 1 << 0;
const CHUNK_END: u8 = 1 << 1;
const PARENT: u8 = 1 << 2;
const ROOT: u8 = 1 << 3;
const KEYED_HASH: u8 = 1 << 4;
const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
// The mixing function, G, which mixes either a column or a diagonal.
fn g(state: *[16]u32, a: usize, b: usize, c: usize, d: usize, mx: u32, my: u32) void {
_ = @addWithOverflow(u32, state[a], state[b], &state[a]);
_ = @addWithOverflow(u32, state[a], mx, &state[a]);
state[d] = math.rotr(u32, state[d] ^ state[a], 16);
_ = @addWithOverflow(u32, state[c], state[d], &state[c]);
state[b] = math.rotr(u32, state[b] ^ state[c], 12);
_ = @addWithOverflow(u32, state[a], state[b], &state[a]);
_ = @addWithOverflow(u32, state[a], my, &state[a]);
state[d] = math.rotr(u32, state[d] ^ state[a], 8);
_ = @addWithOverflow(u32, state[c], state[d], &state[c]);
state[b] = math.rotr(u32, state[b] ^ state[c], 7);
}
fn round(state: *[16]u32, msg: [16]u32, schedule: [16]u8) void {
// Mix the columns.
g(state, 0, 4, 8, 12, msg[schedule[0]], msg[schedule[1]]);
g(state, 1, 5, 9, 13, msg[schedule[2]], msg[schedule[3]]);
g(state, 2, 6, 10, 14, msg[schedule[4]], msg[schedule[5]]);
g(state, 3, 7, 11, 15, msg[schedule[6]], msg[schedule[7]]);
// Mix the diagonals.
g(state, 0, 5, 10, 15, msg[schedule[8]], msg[schedule[9]]);
g(state, 1, 6, 11, 12, msg[schedule[10]], msg[schedule[11]]);
g(state, 2, 7, 8, 13, msg[schedule[12]], msg[schedule[13]]);
g(state, 3, 4, 9, 14, msg[schedule[14]], msg[schedule[15]]);
}
fn compress(
chaining_value: [8]u32,
block_words: [16]u32,
block_len: u32,
counter: u64,
flags: u8,
) [16]u32 {
var state = [16]u32{
chaining_value[0],
chaining_value[1],
chaining_value[2],
chaining_value[3],
chaining_value[4],
chaining_value[5],
chaining_value[6],
chaining_value[7],
IV[0],
IV[1],
IV[2],
IV[3],
@truncate(u32, counter),
@truncate(u32, counter >> 32),
block_len,
flags,
};
for (MSG_SCHEDULE) |schedule| {
round(&state, block_words, schedule);
}
for (chaining_value) |_, i| {
state[i] ^= state[i + 8];
state[i + 8] ^= chaining_value[i];
}
return state;
}
fn first_8_words(words: [16]u32) [8]u32 {
return @ptrCast(*const [8]u32, &words).*;
}
fn words_from_little_endian_bytes(words: []u32, bytes: []const u8) void {
var byte_slice = bytes;
for (words) |*word| {
word.* = mem.readIntSliceLittle(u32, byte_slice);
byte_slice = byte_slice[4..];
}
}
// Each chunk or parent node can produce either an 8-word chaining value or, by
// setting the ROOT flag, any number of final output bytes. The Output struct
// captures the state just prior to choosing between those two possibilities.
const Output = struct {
input_chaining_value: [8]u32,
block_words: [16]u32,
block_len: u32,
counter: u64,
flags: u8,
fn chaining_value(self: *const Output) [8]u32 {
return first_8_words(compress(
self.input_chaining_value,
self.block_words,
self.block_len,
self.counter,
self.flags,
));
}
fn root_output_bytes(self: *const Output, output: []u8) void {
var out_block_it = ChunkIterator.init(output, 2 * OUT_LEN);
var output_block_counter: usize = 0;
while (out_block_it.next()) |out_block| {
var words = compress(
self.input_chaining_value,
self.block_words,
self.block_len,
output_block_counter,
self.flags | ROOT,
);
var out_word_it = ChunkIterator.init(out_block, 4);
var word_counter: usize = 0;
while (out_word_it.next()) |out_word| {
var word_bytes: [4]u8 = undefined;
mem.writeIntLittle(u32, &word_bytes, words[word_counter]);
mem.copy(u8, out_word, word_bytes[0..out_word.len]);
word_counter += 1;
}
output_block_counter += 1;
}
}
};
const ChunkState = struct {
chaining_value: [8]u32,
chunk_counter: u64,
block: [BLOCK_LEN]u8 = [_]u8{0} ** BLOCK_LEN,
block_len: u8 = 0,
blocks_compressed: u8 = 0,
flags: u8,
fn init(key: [8]u32, chunk_counter: u64, flags: u8) ChunkState {
return ChunkState{
.chaining_value = key,
.chunk_counter = chunk_counter,
.flags = flags,
};
}
fn len(self: *const ChunkState) usize {
return BLOCK_LEN * @as(usize, self.blocks_compressed) + @as(usize, self.block_len);
}
fn fill_block_buf(self: *ChunkState, input: []const u8) []const u8 {
const want = BLOCK_LEN - self.block_len;
const take = math.min(want, input.len);
mem.copy(u8, self.block[self.block_len..][0..take], input[0..take]);
self.block_len += @truncate(u8, take);
return input[take..];
}
fn start_flag(self: *const ChunkState) u8 {
return if (self.blocks_compressed == 0) CHUNK_START else 0;
}
fn update(self: *ChunkState, input_slice: []const u8) void {
var input = input_slice;
while (input.len > 0) {
// If the block buffer is full, compress it and clear it. More
// input is coming, so this compression is not CHUNK_END.
if (self.block_len == BLOCK_LEN) {
var block_words: [16]u32 = undefined;
words_from_little_endian_bytes(block_words[0..], self.block[0..]);
self.chaining_value = first_8_words(compress(
self.chaining_value,
block_words,
BLOCK_LEN,
self.chunk_counter,
self.flags | self.start_flag(),
));
self.blocks_compressed += 1;
self.block = [_]u8{0} ** BLOCK_LEN;
self.block_len = 0;
}
// Copy input bytes into the block buffer.
input = self.fill_block_buf(input);
}
}
fn output(self: *const ChunkState) Output {
var block_words: [16]u32 = undefined;
words_from_little_endian_bytes(block_words[0..], self.block[0..]);
return Output{
.input_chaining_value = self.chaining_value,
.block_words = block_words,
.block_len = self.block_len,
.counter = self.chunk_counter,
.flags = self.flags | self.start_flag() | CHUNK_END,
};
}
};
fn parent_output(
left_child_cv: [8]u32,
right_child_cv: [8]u32,
key: [8]u32,
flags: u8,
) Output {
var block_words: [16]u32 = undefined;
mem.copy(u32, block_words[0..8], left_child_cv[0..]);
mem.copy(u32, block_words[8..], right_child_cv[0..]);
return Output{
.input_chaining_value = key,
.block_words = block_words,
.block_len = BLOCK_LEN, // Always BLOCK_LEN (64) for parent nodes.
.counter = 0, // Always 0 for parent nodes.
.flags = PARENT | flags,
};
}
fn parent_cv(
left_child_cv: [8]u32,
right_child_cv: [8]u32,
key: [8]u32,
flags: u8,
) [8]u32 {
return parent_output(left_child_cv, right_child_cv, key, flags).chaining_value();
}
/// An incremental hasher that can accept any number of writes.
pub const Blake3 = struct {
chunk_state: ChunkState,
key: [8]u32,
cv_stack: [54][8]u32 = undefined, // Space for 54 subtree chaining values:
cv_stack_len: u8 = 0, // 2^54 * CHUNK_LEN = 2^64
flags: u8,
pub const digest_length = OUT_LEN;
pub const block_length = BLOCK_LEN;
fn init_internal(key: [8]u32, flags: u8) Blake3 {
return Blake3{
.chunk_state = ChunkState.init(key, 0, flags),
.key = key,
.flags = flags,
};
}
/// Construct a new `Blake3` for the regular hash function.
pub fn init() Blake3 {
return Blake3.init_internal(IV, 0);
}
/// Construct a new `Blake3` for the keyed hash function.
pub fn init_keyed(key: [KEY_LEN]u8) Blake3 {
var key_words: [8]u32 = undefined;
words_from_little_endian_bytes(key_words[0..], key[0..]);
return Blake3.init_internal(key_words, KEYED_HASH);
}
/// Construct a new `Blake3` for the key derivation function. The context
/// string should be hardcoded, globally unique, and application-specific.
pub fn init_derive_key(context: []const u8) Blake3 {
var context_hasher = Blake3.init_internal(IV, DERIVE_KEY_CONTEXT);
context_hasher.update(context);
var context_key: [KEY_LEN]u8 = undefined;
context_hasher.final(context_key[0..]);
var context_key_words: [8]u32 = undefined;
words_from_little_endian_bytes(context_key_words[0..], context_key[0..]);
return Blake3.init_internal(context_key_words, DERIVE_KEY_MATERIAL);
}
pub fn hash(in: []const u8, out: []u8) void {
var hasher = Blake3.init();
hasher.update(in);
hasher.final(out);
}
/// Reset the `Blake3` to its initial state.
pub fn reset(self: *Blake3) void {
self.chunk_state = ChunkState.init(self.key, 0, self.flags);
self.cv_stack_len = 0;
}
fn push_cv(self: *Blake3, cv: [8]u32) void {
self.cv_stack[self.cv_stack_len] = cv;
self.cv_stack_len += 1;
}
fn pop_cv(self: *Blake3) [8]u32 {
self.cv_stack_len -= 1;
return self.cv_stack[self.cv_stack_len];
}
// Section 5.1.2 of the BLAKE3 spec explains this algorithm in more detail.
fn add_chunk_chaining_value(self: *Blake3, new_cv: [8]u32, total_chunks: u64) void {
// This chunk might complete some subtrees. For each completed subtree,
// its left child will be the current top entry in the CV stack, and
// its right child will be the current value of `new_cv`. Pop each left
// child off the stack, merge it with `new_cv`, and overwrite `new_cv`
// with the result. After all these merges, push the final value of
// `new_cv` onto the stack. The number of completed subtrees is given
// by the number of trailing 0-bits in the new total number of chunks.
var chunk_counter = total_chunks;
while (chunk_counter & 1 == 0) {
new_cv = parent_cv(self.pop_cv(), new_cv, self.key, self.flags);
chunk_counter >>= 1;
}
self.push_cv(new_cv);
}
/// Add input to the hash state. This can be called any number of times.
pub fn update(self: *Blake3, input_slice: []const u8) void {
var input = input_slice;
while (input.len > 0) {
// If the current chunk is complete, finalize it and reset the
// chunk state. More input is coming, so this chunk is not ROOT.
if (self.chunk_state.len() == CHUNK_LEN) {
const chunk_cv = self.chunk_state.output().chaining_value();
const total_chunks = self.chunk_state.chunk_counter + 1;
self.add_chunk_chaining_value(chunk_cv, total_chunks);
self.chunk_state = ChunkState.init(self.key, total_chunks, self.flags);
}
// Compress input bytes into the current chunk state.
const want = CHUNK_LEN - self.chunk_state.len();
const take = math.min(want, input.len);
self.chunk_state.update(input[0..take]);
input = input[take..];
}
}
/// Finalize the hash and write any number of output bytes.
pub fn final(self: *const Blake3, out_slice: []u8) void {
// Starting with the Output from the current chunk, compute all the
// parent chaining values along the right edge of the tree, until we
// have the root Output.
var output = self.chunk_state.output();
var parent_nodes_remaining: usize = self.cv_stack_len;
while (parent_nodes_remaining > 0) {
parent_nodes_remaining -= 1;
output = parent_output(
self.cv_stack[parent_nodes_remaining],
output.chaining_value(),
self.key,
self.flags,
);
}
output.root_output_bytes(out_slice);
}
};
// Use named type declarations to workaround crash with anonymous structs (issue #4373).
const ReferenceTest = struct {
key: *const [KEY_LEN]u8,
context_string: []const u8,
cases: []const ReferenceTestCase,
};
const ReferenceTestCase = struct {
input_len: usize,
hash: *const [262]u8,
keyed_hash: *const [262]u8,
derive_key: *const [262]u8,
};
// Each test is an input length and three outputs, one for each of the `hash`, `keyed_hash`, and
// `derive_key` modes. The input in each case is filled with a 251-byte-long repeating pattern:
// 0, 1, 2, ..., 249, 250, 0, 1, ... The key used with `keyed_hash` is the 32-byte ASCII string
// given in the `key` field below. For `derive_key`, the test input is used as the input key, and
// the context string is 'BLAKE3 2019-12-27 16:29:52 test vectors context'. (As good practice for
// following the security requirements of `derive_key`, test runners should make that context
// string a hardcoded constant, and we do not provided it in machine-readable form.) Outputs are
// encoded as hexadecimal. Each case is an extended output, and implementations should also check
// that the first 32 bytes match their default-length output.
//
// Source: https://github.com/BLAKE3-team/BLAKE3/blob/92d421dea1a89e2f079f4dbd93b0dab41234b279/test_vectors/test_vectors.json
const reference_test = ReferenceTest{
.key = "whats the Elvish word for friend",
.context_string = "BLAKE3 2019-12-27 16:29:52 test vectors context",
.cases = &[_]ReferenceTestCase{
.{
.input_len = 0,
.hash = "af1349b9f5f9a1a6a0404dea36dcc9499bcb25c9adc112b7cc9a93cae41f3262e00f03e7b69af26b7faaf09fcd333050338ddfe085b8cc869ca98b206c08243a26f5487789e8f660afe6c99ef9e0c52b92e7393024a80459cf91f476f9ffdbda7001c22e159b402631f277ca96f2defdf1078282314e763699a31c5363165421cce14d",
.keyed_hash = "92b2b75604ed3c761f9d6f62392c8a9227ad0ea3f09573e783f1498a4ed60d26b18171a2f22a4b94822c701f107153dba24918c4bae4d2945c20ece13387627d3b73cbf97b797d5e59948c7ef788f54372df45e45e4293c7dc18c1d41144a9758be58960856be1eabbe22c2653190de560ca3b2ac4aa692a9210694254c371e851bc8f",
.derive_key = "2cc39783c223154fea8dfb7c1b1660f2ac2dcbd1c1de8277b0b0dd39b7e50d7d905630c8be290dfcf3e6842f13bddd573c098c3f17361f1f206b8cad9d088aa4a3f746752c6b0ce6a83b0da81d59649257cdf8eb3e9f7d4998e41021fac119deefb896224ac99f860011f73609e6e0e4540f93b273e56547dfd3aa1a035ba6689d89a0",
},
.{
.input_len = 1,
.hash = "2d3adedff11b61f14c886e35afa036736dcd87a74d27b5c1510225d0f592e213c3a6cb8bf623e20cdb535f8d1a5ffb86342d9c0b64aca3bce1d31f60adfa137b358ad4d79f97b47c3d5e79f179df87a3b9776ef8325f8329886ba42f07fb138bb502f4081cbcec3195c5871e6c23e2cc97d3c69a613eba131e5f1351f3f1da786545e5",
.keyed_hash = "6d7878dfff2f485635d39013278ae14f1454b8c0a3a2d34bc1ab38228a80c95b6568c0490609413006fbd428eb3fd14e7756d90f73a4725fad147f7bf70fd61c4e0cf7074885e92b0e3f125978b4154986d4fb202a3f331a3fb6cf349a3a70e49990f98fe4289761c8602c4e6ab1138d31d3b62218078b2f3ba9a88e1d08d0dd4cea11",
.derive_key = "b3e2e340a117a499c6cf2398a19ee0d29cca2bb7404c73063382693bf66cb06c5827b91bf889b6b97c5477f535361caefca0b5d8c4746441c57617111933158950670f9aa8a05d791daae10ac683cbef8faf897c84e6114a59d2173c3f417023a35d6983f2c7dfa57e7fc559ad751dbfb9ffab39c2ef8c4aafebc9ae973a64f0c76551",
},
.{
.input_len = 1023,
.hash = "10108970eeda3eb932baac1428c7a2163b0e924c9a9e25b35bba72b28f70bd11a182d27a591b05592b15607500e1e8dd56bc6c7fc063715b7a1d737df5bad3339c56778957d870eb9717b57ea3d9fb68d1b55127bba6a906a4a24bbd5acb2d123a37b28f9e9a81bbaae360d58f85e5fc9d75f7c370a0cc09b6522d9c8d822f2f28f485",
.keyed_hash = "c951ecdf03288d0fcc96ee3413563d8a6d3589547f2c2fb36d9786470f1b9d6e890316d2e6d8b8c25b0a5b2180f94fb1a158ef508c3cde45e2966bd796a696d3e13efd86259d756387d9becf5c8bf1ce2192b87025152907b6d8cc33d17826d8b7b9bc97e38c3c85108ef09f013e01c229c20a83d9e8efac5b37470da28575fd755a10",
.derive_key = "74a16c1c3d44368a86e1ca6df64be6a2f64cce8f09220787450722d85725dea59c413264404661e9e4d955409dfe4ad3aa487871bcd454ed12abfe2c2b1eb7757588cf6cb18d2eccad49e018c0d0fec323bec82bf1644c6325717d13ea712e6840d3e6e730d35553f59eff5377a9c350bcc1556694b924b858f329c44ee64b884ef00d",
},
.{
.input_len = 1024,
.hash = "42214739f095a406f3fc83deb889744ac00df831c10daa55189b5d121c855af71cf8107265ecdaf8505b95d8fcec83a98a6a96ea5109d2c179c47a387ffbb404756f6eeae7883b446b70ebb144527c2075ab8ab204c0086bb22b7c93d465efc57f8d917f0b385c6df265e77003b85102967486ed57db5c5ca170ba441427ed9afa684e",
.keyed_hash = "75c46f6f3d9eb4f55ecaaee480db732e6c2105546f1e675003687c31719c7ba4a78bc838c72852d4f49c864acb7adafe2478e824afe51c8919d06168414c265f298a8094b1ad813a9b8614acabac321f24ce61c5a5346eb519520d38ecc43e89b5000236df0597243e4d2493fd626730e2ba17ac4d8824d09d1a4a8f57b8227778e2de",
.derive_key = "7356cd7720d5b66b6d0697eb3177d9f8d73a4a5c5e968896eb6a6896843027066c23b601d3ddfb391e90d5c8eccdef4ae2a264bce9e612ba15e2bc9d654af1481b2e75dbabe615974f1070bba84d56853265a34330b4766f8e75edd1f4a1650476c10802f22b64bd3919d246ba20a17558bc51c199efdec67e80a227251808d8ce5bad",
},
.{
.input_len = 1025,
.hash = "d00278ae47eb27b34faecf67b4fe263f82d5412916c1ffd97c8cb7fb814b8444f4c4a22b4b399155358a994e52bf255de60035742ec71bd08ac275a1b51cc6bfe332b0ef84b409108cda080e6269ed4b3e2c3f7d722aa4cdc98d16deb554e5627be8f955c98e1d5f9565a9194cad0c4285f93700062d9595adb992ae68ff12800ab67a",
.keyed_hash = "357dc55de0c7e382c900fd6e320acc04146be01db6a8ce7210b7189bd664ea69362396b77fdc0d2634a552970843722066c3c15902ae5097e00ff53f1e116f1cd5352720113a837ab2452cafbde4d54085d9cf5d21ca613071551b25d52e69d6c81123872b6f19cd3bc1333edf0c52b94de23ba772cf82636cff4542540a7738d5b930",
.derive_key = "effaa245f065fbf82ac186839a249707c3bddf6d3fdda22d1b95a3c970379bcb5d31013a167509e9066273ab6e2123bc835b408b067d88f96addb550d96b6852dad38e320b9d940f86db74d398c770f462118b35d2724efa13da97194491d96dd37c3c09cbef665953f2ee85ec83d88b88d11547a6f911c8217cca46defa2751e7f3ad",
},
.{
.input_len = 2048,
.hash = "e776b6028c7cd22a4d0ba182a8bf62205d2ef576467e838ed6f2529b85fba24a9a60bf80001410ec9eea6698cd537939fad4749edd484cb541aced55cd9bf54764d063f23f6f1e32e12958ba5cfeb1bf618ad094266d4fc3c968c2088f677454c288c67ba0dba337b9d91c7e1ba586dc9a5bc2d5e90c14f53a8863ac75655461cea8f9",
.keyed_hash = "879cf1fa2ea0e79126cb1063617a05b6ad9d0b696d0d757cf053439f60a99dd10173b961cd574288194b23ece278c330fbb8585485e74967f31352a8183aa782b2b22f26cdcadb61eed1a5bc144b8198fbb0c13abbf8e3192c145d0a5c21633b0ef86054f42809df823389ee40811a5910dcbd1018af31c3b43aa55201ed4edaac74fe",
.derive_key = "7b2945cb4fef70885cc5d78a87bf6f6207dd901ff239201351ffac04e1088a23e2c11a1ebffcea4d80447867b61badb1383d842d4e79645d48dd82ccba290769caa7af8eaa1bd78a2a5e6e94fbdab78d9c7b74e894879f6a515257ccf6f95056f4e25390f24f6b35ffbb74b766202569b1d797f2d4bd9d17524c720107f985f4ddc583",
},
.{
.input_len = 2049,
.hash = "5f4d72f40d7a5f82b15ca2b2e44b1de3c2ef86c426c95c1af0b687952256303096de31d71d74103403822a2e0bc1eb193e7aecc9643a76b7bbc0c9f9c52e8783aae98764ca468962b5c2ec92f0c74eb5448d519713e09413719431c802f948dd5d90425a4ecdadece9eb178d80f26efccae630734dff63340285adec2aed3b51073ad3",
.keyed_hash = "9f29700902f7c86e514ddc4df1e3049f258b2472b6dd5267f61bf13983b78dd5f9a88abfefdfa1e00b418971f2b39c64ca621e8eb37fceac57fd0c8fc8e117d43b81447be22d5d8186f8f5919ba6bcc6846bd7d50726c06d245672c2ad4f61702c646499ee1173daa061ffe15bf45a631e2946d616a4c345822f1151284712f76b2b0e",
.derive_key = "2ea477c5515cc3dd606512ee72bb3e0e758cfae7232826f35fb98ca1bcbdf27316d8e9e79081a80b046b60f6a263616f33ca464bd78d79fa18200d06c7fc9bffd808cc4755277a7d5e09da0f29ed150f6537ea9bed946227ff184cc66a72a5f8c1e4bd8b04e81cf40fe6dc4427ad5678311a61f4ffc39d195589bdbc670f63ae70f4b6",
},
.{
.input_len = 3072,
.hash = "b98cb0ff3623be03326b373de6b9095218513e64f1ee2edd2525c7ad1e5cffd29a3f6b0b978d6608335c09dc94ccf682f9951cdfc501bfe47b9c9189a6fc7b404d120258506341a6d802857322fbd20d3e5dae05b95c88793fa83db1cb08e7d8008d1599b6209d78336e24839724c191b2a52a80448306e0daa84a3fdb566661a37e11",
.keyed_hash = "044a0e7b172a312dc02a4c9a818c036ffa2776368d7f528268d2e6b5df19177022f302d0529e4174cc507c463671217975e81dab02b8fdeb0d7ccc7568dd22574c783a76be215441b32e91b9a904be8ea81f7a0afd14bad8ee7c8efc305ace5d3dd61b996febe8da4f56ca0919359a7533216e2999fc87ff7d8f176fbecb3d6f34278b",
.derive_key = "050df97f8c2ead654d9bb3ab8c9178edcd902a32f8495949feadcc1e0480c46b3604131bbd6e3ba573b6dd682fa0a63e5b165d39fc43a625d00207607a2bfeb65ff1d29292152e26b298868e3b87be95d6458f6f2ce6118437b632415abe6ad522874bcd79e4030a5e7bad2efa90a7a7c67e93f0a18fb28369d0a9329ab5c24134ccb0",
},
.{
.input_len = 3073,
.hash = "7124b49501012f81cc7f11ca069ec9226cecb8a2c850cfe644e327d22d3e1cd39a27ae3b79d68d89da9bf25bc27139ae65a324918a5f9b7828181e52cf373c84f35b639b7fccbb985b6f2fa56aea0c18f531203497b8bbd3a07ceb5926f1cab74d14bd66486d9a91eba99059a98bd1cd25876b2af5a76c3e9eed554ed72ea952b603bf",
.keyed_hash = "68dede9bef00ba89e43f31a6825f4cf433389fedae75c04ee9f0cf16a427c95a96d6da3fe985054d3478865be9a092250839a697bbda74e279e8a9e69f0025e4cfddd6cfb434b1cd9543aaf97c635d1b451a4386041e4bb100f5e45407cbbc24fa53ea2de3536ccb329e4eb9466ec37093a42cf62b82903c696a93a50b702c80f3c3c5",
.derive_key = "72613c9ec9ff7e40f8f5c173784c532ad852e827dba2bf85b2ab4b76f7079081576288e552647a9d86481c2cae75c2dd4e7c5195fb9ada1ef50e9c5098c249d743929191441301c69e1f48505a4305ec1778450ee48b8e69dc23a25960fe33070ea549119599760a8a2d28aeca06b8c5e9ba58bc19e11fe57b6ee98aa44b2a8e6b14a5",
},
.{
.input_len = 4096,
.hash = "015094013f57a5277b59d8475c0501042c0b642e531b0a1c8f58d2163229e9690289e9409ddb1b99768eafe1623da896faf7e1114bebeadc1be30829b6f8af707d85c298f4f0ff4d9438aef948335612ae921e76d411c3a9111df62d27eaf871959ae0062b5492a0feb98ef3ed4af277f5395172dbe5c311918ea0074ce0036454f620",
.keyed_hash = "befc660aea2f1718884cd8deb9902811d332f4fc4a38cf7c7300d597a081bfc0bbb64a36edb564e01e4b4aaf3b060092a6b838bea44afebd2deb8298fa562b7b597c757b9df4c911c3ca462e2ac89e9a787357aaf74c3b56d5c07bc93ce899568a3eb17d9250c20f6c5f6c1e792ec9a2dcb715398d5a6ec6d5c54f586a00403a1af1de",
.derive_key = "1e0d7f3db8c414c97c6307cbda6cd27ac3b030949da8e23be1a1a924ad2f25b9d78038f7b198596c6cc4a9ccf93223c08722d684f240ff6569075ed81591fd93f9fff1110b3a75bc67e426012e5588959cc5a4c192173a03c00731cf84544f65a2fb9378989f72e9694a6a394a8a30997c2e67f95a504e631cd2c5f55246024761b245",
},
.{
.input_len = 4097,
.hash = "9b4052b38f1c5fc8b1f9ff7ac7b27cd242487b3d890d15c96a1c25b8aa0fb99505f91b0b5600a11251652eacfa9497b31cd3c409ce2e45cfe6c0a016967316c426bd26f619eab5d70af9a418b845c608840390f361630bd497b1ab44019316357c61dbe091ce72fc16dc340ac3d6e009e050b3adac4b5b2c92e722cffdc46501531956",
.keyed_hash = "00df940cd36bb9fa7cbbc3556744e0dbc8191401afe70520ba292ee3ca80abbc606db4976cfdd266ae0abf667d9481831ff12e0caa268e7d3e57260c0824115a54ce595ccc897786d9dcbf495599cfd90157186a46ec800a6763f1c59e36197e9939e900809f7077c102f888caaf864b253bc41eea812656d46742e4ea42769f89b83f",
.derive_key = "aca51029626b55fda7117b42a7c211f8c6e9ba4fe5b7a8ca922f34299500ead8a897f66a400fed9198fd61dd2d58d382458e64e100128075fc54b860934e8de2e84170734b06e1d212a117100820dbc48292d148afa50567b8b84b1ec336ae10d40c8c975a624996e12de31abbe135d9d159375739c333798a80c64ae895e51e22f3ad",
},
.{
.input_len = 5120,
.hash = "9cadc15fed8b5d854562b26a9536d9707cadeda9b143978f319ab34230535833acc61c8fdc114a2010ce8038c853e121e1544985133fccdd0a2d507e8e615e611e9a0ba4f47915f49e53d721816a9198e8b30f12d20ec3689989175f1bf7a300eee0d9321fad8da232ece6efb8e9fd81b42ad161f6b9550a069e66b11b40487a5f5059",
.keyed_hash = "2c493e48e9b9bf31e0553a22b23503c0a3388f035cece68eb438d22fa1943e209b4dc9209cd80ce7c1f7c9a744658e7e288465717ae6e56d5463d4f80cdb2ef56495f6a4f5487f69749af0c34c2cdfa857f3056bf8d807336a14d7b89bf62bef2fb54f9af6a546f818dc1e98b9e07f8a5834da50fa28fb5874af91bf06020d1bf0120e",
.derive_key = "7a7acac8a02adcf3038d74cdd1d34527de8a0fcc0ee3399d1262397ce5817f6055d0cefd84d9d57fe792d65a278fd20384ac6c30fdb340092f1a74a92ace99c482b28f0fc0ef3b923e56ade20c6dba47e49227166251337d80a037e987ad3a7f728b5ab6dfafd6e2ab1bd583a95d9c895ba9c2422c24ea0f62961f0dca45cad47bfa0d",
},
.{
.input_len = 5121,
.hash = "628bd2cb2004694adaab7bbd778a25df25c47b9d4155a55f8fbd79f2fe154cff96adaab0613a6146cdaabe498c3a94e529d3fc1da2bd08edf54ed64d40dcd6777647eac51d8277d70219a9694334a68bc8f0f23e20b0ff70ada6f844542dfa32cd4204ca1846ef76d811cdb296f65e260227f477aa7aa008bac878f72257484f2b6c95",
.keyed_hash = "6ccf1c34753e7a044db80798ecd0782a8f76f33563accaddbfbb2e0ea4b2d0240d07e63f13667a8d1490e5e04f13eb617aea16a8c8a5aaed1ef6fbde1b0515e3c81050b361af6ead126032998290b563e3caddeaebfab592e155f2e161fb7cba939092133f23f9e65245e58ec23457b78a2e8a125588aad6e07d7f11a85b88d375b72d",
.derive_key = "b07f01e518e702f7ccb44a267e9e112d403a7b3f4883a47ffbed4b48339b3c341a0add0ac032ab5aaea1e4e5b004707ec5681ae0fcbe3796974c0b1cf31a194740c14519273eedaabec832e8a784b6e7cfc2c5952677e6c3f2c3914454082d7eb1ce1766ac7d75a4d3001fc89544dd46b5147382240d689bbbaefc359fb6ae30263165",
},
.{
.input_len = 6144,
.hash = "3e2e5b74e048f3add6d21faab3f83aa44d3b2278afb83b80b3c35164ebeca2054d742022da6fdda444ebc384b04a54c3ac5839b49da7d39f6d8a9db03deab32aade156c1c0311e9b3435cde0ddba0dce7b26a376cad121294b689193508dd63151603c6ddb866ad16c2ee41585d1633a2cea093bea714f4c5d6b903522045b20395c83",
.keyed_hash = "3d6b6d21281d0ade5b2b016ae4034c5dec10ca7e475f90f76eac7138e9bc8f1dc35754060091dc5caf3efabe0603c60f45e415bb3407db67e6beb3d11cf8e4f7907561f05dace0c15807f4b5f389c841eb114d81a82c02a00b57206b1d11fa6e803486b048a5ce87105a686dee041207e095323dfe172df73deb8c9532066d88f9da7e",
.derive_key = "2a95beae63ddce523762355cf4b9c1d8f131465780a391286a5d01abb5683a1597099e3c6488aab6c48f3c15dbe1942d21dbcdc12115d19a8b8465fb54e9053323a9178e4275647f1a9927f6439e52b7031a0b465c861a3fc531527f7758b2b888cf2f20582e9e2c593709c0a44f9c6e0f8b963994882ea4168827823eef1f64169fef",
},
.{
.input_len = 6145,
.hash = "f1323a8631446cc50536a9f705ee5cb619424d46887f3c376c695b70e0f0507f18a2cfdd73c6e39dd75ce7c1c6e3ef238fd54465f053b25d21044ccb2093beb015015532b108313b5829c3621ce324b8e14229091b7c93f32db2e4e63126a377d2a63a3597997d4f1cba59309cb4af240ba70cebff9a23d5e3ff0cdae2cfd54e070022",
.keyed_hash = "9ac301e9e39e45e3250a7e3b3df701aa0fb6889fbd80eeecf28dbc6300fbc539f3c184ca2f59780e27a576c1d1fb9772e99fd17881d02ac7dfd39675aca918453283ed8c3169085ef4a466b91c1649cc341dfdee60e32231fc34c9c4e0b9a2ba87ca8f372589c744c15fd6f985eec15e98136f25beeb4b13c4e43dc84abcc79cd4646c",
.derive_key = "379bcc61d0051dd489f686c13de00d5b14c505245103dc040d9e4dd1facab8e5114493d029bdbd295aaa744a59e31f35c7f52dba9c3642f773dd0b4262a9980a2aef811697e1305d37ba9d8b6d850ef07fe41108993180cf779aeece363704c76483458603bbeeb693cffbbe5588d1f3535dcad888893e53d977424bb707201569a8d2",
},
.{
.input_len = 7168,
.hash = "61da957ec2499a95d6b8023e2b0e604ec7f6b50e80a9678b89d2628e99ada77a5707c321c83361793b9af62a40f43b523df1c8633cecb4cd14d00bdc79c78fca5165b863893f6d38b02ff7236c5a9a8ad2dba87d24c547cab046c29fc5bc1ed142e1de4763613bb162a5a538e6ef05ed05199d751f9eb58d332791b8d73fb74e4fce95",
.keyed_hash = "b42835e40e9d4a7f42ad8cc04f85a963a76e18198377ed84adddeaecacc6f3fca2f01d5277d69bb681c70fa8d36094f73ec06e452c80d2ff2257ed82e7ba348400989a65ee8daa7094ae0933e3d2210ac6395c4af24f91c2b590ef87d7788d7066ea3eaebca4c08a4f14b9a27644f99084c3543711b64a070b94f2c9d1d8a90d035d52",
.derive_key = "11c37a112765370c94a51415d0d651190c288566e295d505defdad895dae223730d5a5175a38841693020669c7638f40b9bc1f9f39cf98bda7a5b54ae24218a800a2116b34665aa95d846d97ea988bfcb53dd9c055d588fa21ba78996776ea6c40bc428b53c62b5f3ccf200f647a5aae8067f0ea1976391fcc72af1945100e2a6dcb88",
},
.{
.input_len = 7169,
.hash = "a003fc7a51754a9b3c7fae0367ab3d782dccf28855a03d435f8cfe74605e781798a8b20534be1ca9eb2ae2df3fae2ea60e48c6fb0b850b1385b5de0fe460dbe9d9f9b0d8db4435da75c601156df9d047f4ede008732eb17adc05d96180f8a73548522840779e6062d643b79478a6e8dbce68927f36ebf676ffa7d72d5f68f050b119c8",
.keyed_hash = "ed9b1a922c046fdb3d423ae34e143b05ca1bf28b710432857bf738bcedbfa5113c9e28d72fcbfc020814ce3f5d4fc867f01c8f5b6caf305b3ea8a8ba2da3ab69fabcb438f19ff11f5378ad4484d75c478de425fb8e6ee809b54eec9bdb184315dc856617c09f5340451bf42fd3270a7b0b6566169f242e533777604c118a6358250f54",
.derive_key = "554b0a5efea9ef183f2f9b931b7497995d9eb26f5c5c6dad2b97d62fc5ac31d99b20652c016d88ba2a611bbd761668d5eda3e568e940faae24b0d9991c3bd25a65f770b89fdcadabcb3d1a9c1cb63e69721cacf1ae69fefdcef1e3ef41bc5312ccc17222199e47a26552c6adc460cf47a72319cb5039369d0060eaea59d6c65130f1dd",
},
.{
.input_len = 8192,
.hash = "aae792484c8efe4f19e2ca7d371d8c467ffb10748d8a5a1ae579948f718a2a635fe51a27db045a567c1ad51be5aa34c01c6651c4d9b5b5ac5d0fd58cf18dd61a47778566b797a8c67df7b1d60b97b19288d2d877bb2df417ace009dcb0241ca1257d62712b6a4043b4ff33f690d849da91ea3bf711ed583cb7b7a7da2839ba71309bbf",
.keyed_hash = "dc9637c8845a770b4cbf76b8daec0eebf7dc2eac11498517f08d44c8fc00d58a4834464159dcbc12a0ba0c6d6eb41bac0ed6585cabfe0aca36a375e6c5480c22afdc40785c170f5a6b8a1107dbee282318d00d915ac9ed1143ad40765ec120042ee121cd2baa36250c618adaf9e27260fda2f94dea8fb6f08c04f8f10c78292aa46102",
.derive_key = "ad01d7ae4ad059b0d33baa3c01319dcf8088094d0359e5fd45d6aeaa8b2d0c3d4c9e58958553513b67f84f8eac653aeeb02ae1d5672dcecf91cd9985a0e67f4501910ecba25555395427ccc7241d70dc21c190e2aadee875e5aae6bf1912837e53411dabf7a56cbf8e4fb780432b0d7fe6cec45024a0788cf5874616407757e9e6bef7",
},
.{
.input_len = 8193,
.hash = "bab6c09cb8ce8cf459261398d2e7aef35700bf488116ceb94a36d0f5f1b7bc3bb2282aa69be089359ea1154b9a9286c4a56af4de975a9aa4a5c497654914d279bea60bb6d2cf7225a2fa0ff5ef56bbe4b149f3ed15860f78b4e2ad04e158e375c1e0c0b551cd7dfc82f1b155c11b6b3ed51ec9edb30d133653bb5709d1dbd55f4e1ff6",
.keyed_hash = "954a2a75420c8d6547e3ba5b98d963e6fa6491addc8c023189cc519821b4a1f5f03228648fd983aef045c2fa8290934b0866b615f585149587dda2299039965328835a2b18f1d63b7e300fc76ff260b571839fe44876a4eae66cbac8c67694411ed7e09df51068a22c6e67d6d3dd2cca8ff12e3275384006c80f4db68023f24eebba57",
.derive_key = "af1e0346e389b17c23200270a64aa4e1ead98c61695d917de7d5b00491c9b0f12f20a01d6d622edf3de026a4db4e4526225debb93c1237934d71c7340bb5916158cbdafe9ac3225476b6ab57a12357db3abbad7a26c6e66290e44034fb08a20a8d0ec264f309994d2810c49cfba6989d7abb095897459f5425adb48aba07c5fb3c83c0",
},
.{
.input_len = 16384,
.hash = "f875d6646de28985646f34ee13be9a576fd515f76b5b0a26bb324735041ddde49d764c270176e53e97bdffa58d549073f2c660be0e81293767ed4e4929f9ad34bbb39a529334c57c4a381ffd2a6d4bfdbf1482651b172aa883cc13408fa67758a3e47503f93f87720a3177325f7823251b85275f64636a8f1d599c2e49722f42e93893",
.keyed_hash = "9e9fc4eb7cf081ea7c47d1807790ed211bfec56aa25bb7037784c13c4b707b0df9e601b101e4cf63a404dfe50f2e1865bb12edc8fca166579ce0c70dba5a5c0fc960ad6f3772183416a00bd29d4c6e651ea7620bb100c9449858bf14e1ddc9ecd35725581ca5b9160de04060045993d972571c3e8f71e9d0496bfa744656861b169d65",
.derive_key = "160e18b5878cd0df1c3af85eb25a0db5344d43a6fbd7a8ef4ed98d0714c3f7e160dc0b1f09caa35f2f417b9ef309dfe5ebd67f4c9507995a531374d099cf8ae317542e885ec6f589378864d3ea98716b3bbb65ef4ab5e0ab5bb298a501f19a41ec19af84a5e6b428ecd813b1a47ed91c9657c3fba11c406bc316768b58f6802c9e9b57",
},
.{
.input_len = 31744,
.hash = "62b6960e1a44bcc1eb1a611a8d6235b6b4b78f32e7abc4fb4c6cdcce94895c47860cc51f2b0c28a7b77304bd55fe73af663c02d3f52ea053ba43431ca5bab7bfea2f5e9d7121770d88f70ae9649ea713087d1914f7f312147e247f87eb2d4ffef0ac978bf7b6579d57d533355aa20b8b77b13fd09748728a5cc327a8ec470f4013226f",
.keyed_hash = "efa53b389ab67c593dba624d898d0f7353ab99e4ac9d42302ee64cbf9939a4193a7258db2d9cd32a7a3ecfce46144114b15c2fcb68a618a976bd74515d47be08b628be420b5e830fade7c080e351a076fbc38641ad80c736c8a18fe3c66ce12f95c61c2462a9770d60d0f77115bbcd3782b593016a4e728d4c06cee4505cb0c08a42ec",
.derive_key = "39772aef80e0ebe60596361e45b061e8f417429d529171b6764468c22928e28e9759adeb797a3fbf771b1bcea30150a020e317982bf0d6e7d14dd9f064bc11025c25f31e81bd78a921db0174f03dd481d30e93fd8e90f8b2fee209f849f2d2a52f31719a490fb0ba7aea1e09814ee912eba111a9fde9d5c274185f7bae8ba85d300a2b",
},
.{
.input_len = 102400,
.hash = "bc3e3d41a1146b069abffad3c0d44860cf664390afce4d9661f7902e7943e085e01c59dab908c04c3342b816941a26d69c2605ebee5ec5291cc55e15b76146e6745f0601156c3596cb75065a9c57f35585a52e1ac70f69131c23d611ce11ee4ab1ec2c009012d236648e77be9295dd0426f29b764d65de58eb7d01dd42248204f45f8e",
.keyed_hash = "1c35d1a5811083fd7119f5d5d1ba027b4d01c0c6c49fb6ff2cf75393ea5db4a7f9dbdd3e1d81dcbca3ba241bb18760f207710b751846faaeb9dff8262710999a59b2aa1aca298a032d94eacfadf1aa192418eb54808db23b56e34213266aa08499a16b354f018fc4967d05f8b9d2ad87a7278337be9693fc638a3bfdbe314574ee6fc4",
.derive_key = "4652cff7a3f385a6103b5c260fc1593e13c778dbe608efb092fe7ee69df6e9c6d83a3e041bc3a48df2879f4a0a3ed40e7c961c73eff740f3117a0504c2dff4786d44fb17f1549eb0ba585e40ec29bf7732f0b7e286ff8acddc4cb1e23b87ff5d824a986458dcc6a04ac83969b80637562953df51ed1a7e90a7926924d2763778be8560",
},
},
};
fn test_blake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) void {
// Setup input pattern
var input_pattern: [251]u8 = undefined;
for (input_pattern) |*e, i| e.* = @truncate(u8, i);
// Write repeating input pattern to hasher
var input_counter = input_len;
while (input_counter > 0) {
const update_len = math.min(input_counter, input_pattern.len);
hasher.update(input_pattern[0..update_len]);
input_counter -= update_len;
}
// Read final hash value
var actual_bytes: [expected_hex.len / 2]u8 = undefined;
hasher.final(actual_bytes[0..]);
hasher.reset();
// Compare to expected value
var expected_bytes: [expected_hex.len / 2]u8 = undefined;
fmt.hexToBytes(expected_bytes[0..], expected_hex[0..]) catch unreachable;
testing.expectEqual(actual_bytes, expected_bytes);
}
test "BLAKE3 reference test cases" {
var hash = &Blake3.init();
var keyed_hash = &Blake3.init_keyed(reference_test.key.*);
var derive_key = &Blake3.init_derive_key(reference_test.context_string);
for (reference_test.cases) |t| {
test_blake3(hash, t.input_len, t.hash.*);
test_blake3(keyed_hash, t.input_len, t.keyed_hash.*);
test_blake3(derive_key, t.input_len, t.derive_key.*);
}
}

View File

@ -19,8 +19,8 @@ const windows = std.os.windows;
pub const leb = @import("debug/leb128.zig");
pub const FailingAllocator = @import("debug/failing_allocator.zig").FailingAllocator;
pub const failing_allocator = &FailingAllocator.init(global_allocator, 0).allocator;
pub const global_allocator = @compileError("Please switch to std.testing.allocator.");
pub const failing_allocator = @compileError("Please switch to std.testing.failing_allocator.");
pub const runtime_safety = switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
@ -131,6 +131,7 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
const tty_config = detectTTYConfig();
printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return;
const first_return_address = @intToPtr(*const usize, bp + @sizeOf(usize)).*;
if (first_return_address == 0) return; // The whole call stack may be optimized out
printSourceAtAddress(debug_info, stderr, first_return_address - 1, tty_config) catch return;
var it = StackIterator{
.first_addr = null,
@ -325,6 +326,7 @@ pub const StackIterator = struct {
}
const return_address = @intToPtr(*const usize, self.fp - fp_adjust_factor + @sizeOf(usize)).*;
if (return_address == 0) return null;
return return_address;
}
};
@ -470,7 +472,7 @@ fn printSourceAtAddressWindows(
line_index += @sizeOf(pdb.LineNumberEntry);
const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
if (relative_address <= vaddr_start) {
if (relative_address < vaddr_start) {
break;
}
}
@ -944,8 +946,8 @@ fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
fn findDwarfSectionFromElf(elf_file: *elf.Elf, name: []const u8) !?DwarfInfo.Section {
const elf_header = (try elf_file.findSection(name)) orelse return null;
return DwarfInfo.Section{
.offset = elf_header.offset,
.size = elf_header.size,
.offset = elf_header.sh_offset,
.size = elf_header.sh_size,
};
}
@ -985,12 +987,12 @@ pub fn openElfDebugInfo(
var di = DwarfInfo{
.endian = efile.endian,
.debug_info = (data[@intCast(usize, debug_info.offset)..@intCast(usize, debug_info.offset + debug_info.size)]),
.debug_abbrev = (data[@intCast(usize, debug_abbrev.offset)..@intCast(usize, debug_abbrev.offset + debug_abbrev.size)]),
.debug_str = (data[@intCast(usize, debug_str.offset)..@intCast(usize, debug_str.offset + debug_str.size)]),
.debug_line = (data[@intCast(usize, debug_line.offset)..@intCast(usize, debug_line.offset + debug_line.size)]),
.debug_info = (data[@intCast(usize, debug_info.sh_offset)..@intCast(usize, debug_info.sh_offset + debug_info.sh_size)]),
.debug_abbrev = (data[@intCast(usize, debug_abbrev.sh_offset)..@intCast(usize, debug_abbrev.sh_offset + debug_abbrev.sh_size)]),
.debug_str = (data[@intCast(usize, debug_str.sh_offset)..@intCast(usize, debug_str.sh_offset + debug_str.sh_size)]),
.debug_line = (data[@intCast(usize, debug_line.sh_offset)..@intCast(usize, debug_line.sh_offset + debug_line.sh_size)]),
.debug_ranges = if (opt_debug_ranges) |debug_ranges|
data[@intCast(usize, debug_ranges.offset)..@intCast(usize, debug_ranges.offset + debug_ranges.size)]
data[@intCast(usize, debug_ranges.sh_offset)..@intCast(usize, debug_ranges.sh_offset + debug_ranges.sh_size)]
else
null,
};
@ -1390,8 +1392,12 @@ pub const DwarfInfo = struct {
// All the addresses in the list are relative to the value
// specified by DW_AT_low_pc or to some other value encoded
// in the list itself
var base_address = try compile_unit.die.getAttrAddr(DW.AT_low_pc);
// in the list itself.
// If no starting value is specified use zero.
var base_address = compile_unit.die.getAttrAddr(DW.AT_low_pc) catch |err| switch (err) {
error.MissingDebugInfo => 0,
else => return err,
};
try s.seekable_stream.seekTo(ranges_offset);
@ -1410,8 +1416,6 @@ pub const DwarfInfo = struct {
return compile_unit;
}
}
return error.InvalidDebugInfo;
} else |err| {
if (err != error.MissingDebugInfo) return err;
continue;
@ -2192,11 +2196,6 @@ fn readInitialLength(comptime E: type, in_stream: *io.InStream(E), is_64: *bool)
}
}
/// This should only be used in temporary test programs.
pub const global_allocator = &global_fixed_allocator.allocator;
var global_fixed_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(global_allocator_mem[0..]);
var global_allocator_mem: [100 * 1024]u8 = undefined;
/// TODO multithreaded awareness
var debug_info_allocator: ?*mem.Allocator = null;
var debug_info_arena_allocator: std.heap.ArenaAllocator = undefined;
@ -2307,16 +2306,39 @@ fn handleSegfaultLinux(sig: i32, info: *const os.siginfo_t, ctx_ptr: *const c_vo
}
fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(.Stdcall) c_long {
const exception_address = @ptrToInt(info.ExceptionRecord.ExceptionAddress);
switch (info.ExceptionRecord.ExceptionCode) {
windows.EXCEPTION_DATATYPE_MISALIGNMENT => panicExtra(null, exception_address, "Unaligned Memory Access", .{}),
windows.EXCEPTION_ACCESS_VIOLATION => panicExtra(null, exception_address, "Segmentation fault at address 0x{x}", .{info.ExceptionRecord.ExceptionInformation[1]}),
windows.EXCEPTION_ILLEGAL_INSTRUCTION => panicExtra(null, exception_address, "Illegal Instruction", .{}),
windows.EXCEPTION_STACK_OVERFLOW => panicExtra(null, exception_address, "Stack Overflow", .{}),
windows.EXCEPTION_DATATYPE_MISALIGNMENT => handleSegfaultWindowsExtra(info, 0, "Unaligned Memory Access"),
windows.EXCEPTION_ACCESS_VIOLATION => handleSegfaultWindowsExtra(info, 1, null),
windows.EXCEPTION_ILLEGAL_INSTRUCTION => handleSegfaultWindowsExtra(info, 2, null),
windows.EXCEPTION_STACK_OVERFLOW => handleSegfaultWindowsExtra(info, 0, "Stack Overflow"),
else => return windows.EXCEPTION_CONTINUE_SEARCH,
}
}
// zig won't let me use an anon enum here https://github.com/ziglang/zig/issues/3707
fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, comptime msg: u8, comptime format: ?[]const u8) noreturn {
const exception_address = @ptrToInt(info.ExceptionRecord.ExceptionAddress);
if (@hasDecl(windows, "CONTEXT")) {
const regs = info.ContextRecord.getRegs();
switch (msg) {
0 => std.debug.warn("{}\n", .{format.?}),
1 => std.debug.warn("Segmentation fault at address 0x{x}\n", .{info.ExceptionRecord.ExceptionInformation[1]}),
2 => std.debug.warn("Illegal instruction at address 0x{x}\n", .{regs.ip}),
else => unreachable,
}
dumpStackTraceFromBase(regs.bp, regs.ip);
os.abort();
} else {
switch (msg) {
0 => panicExtra(null, exception_address, format.?, .{}),
1 => panicExtra(null, exception_address, "Segmentation fault at address 0x{x}", .{info.ExceptionRecord.ExceptionInformation[1]}),
2 => panicExtra(null, exception_address, "Illegal Instruction", .{}),
else => unreachable,
}
}
}
pub fn dumpStackPointerAddr(prefix: []const u8) void {
const sp = asm (""
: [argc] "={rsp}" (-> usize)

View File

@ -330,19 +330,8 @@ pub const ET = extern enum(u16) {
pub const HIPROC = 0xffff;
};
/// TODO delete this in favor of Elf64_Shdr
pub const SectionHeader = struct {
name: u32,
sh_type: u32,
flags: u64,
addr: u64,
offset: u64,
size: u64,
link: u32,
info: u32,
addr_align: u64,
ent_size: u64,
};
pub const SectionHeader = Elf64_Shdr;
pub const ProgramHeader = Elf64_Phdr;
pub const Elf = struct {
seekable_stream: *io.SeekableStream(anyerror, anyerror),
@ -357,6 +346,7 @@ pub const Elf = struct {
string_section_index: usize,
string_section: *SectionHeader,
section_headers: []SectionHeader,
program_headers: []ProgramHeader,
allocator: *mem.Allocator,
/// Call close when done.
@ -421,14 +411,24 @@ pub const Elf = struct {
try seekable_stream.seekBy(4);
const header_size = try in.readInt(u16, elf.endian);
if ((elf.is_64 and header_size != 64) or (!elf.is_64 and header_size != 52)) {
if ((elf.is_64 and header_size != @sizeOf(Elf64_Ehdr)) or (!elf.is_64 and header_size != @sizeOf(Elf32_Ehdr))) {
return error.InvalidFormat;
}
const ph_entry_size = try in.readInt(u16, elf.endian);
const ph_entry_count = try in.readInt(u16, elf.endian);
if ((elf.is_64 and ph_entry_size != @sizeOf(Elf64_Phdr)) or (!elf.is_64 and ph_entry_size != @sizeOf(Elf32_Phdr))) {
return error.InvalidFormat;
}
const sh_entry_size = try in.readInt(u16, elf.endian);
const sh_entry_count = try in.readInt(u16, elf.endian);
if ((elf.is_64 and sh_entry_size != @sizeOf(Elf64_Shdr)) or (!elf.is_64 and sh_entry_size != @sizeOf(Elf32_Shdr))) {
return error.InvalidFormat;
}
elf.string_section_index = @as(usize, try in.readInt(u16, elf.endian));
if (elf.string_section_index >= sh_entry_count) return error.InvalidFormat;
@ -443,47 +443,72 @@ pub const Elf = struct {
return error.InvalidFormat;
}
try seekable_stream.seekTo(elf.program_header_offset);
elf.program_headers = try elf.allocator.alloc(ProgramHeader, ph_entry_count);
errdefer elf.allocator.free(elf.program_headers);
if (elf.is_64) {
for (elf.program_headers) |*elf_program| {
elf_program.p_type = try in.readInt(Elf64_Word, elf.endian);
elf_program.p_flags = try in.readInt(Elf64_Word, elf.endian);
elf_program.p_offset = try in.readInt(Elf64_Off, elf.endian);
elf_program.p_vaddr = try in.readInt(Elf64_Addr, elf.endian);
elf_program.p_paddr = try in.readInt(Elf64_Addr, elf.endian);
elf_program.p_filesz = try in.readInt(Elf64_Xword, elf.endian);
elf_program.p_memsz = try in.readInt(Elf64_Xword, elf.endian);
elf_program.p_align = try in.readInt(Elf64_Xword, elf.endian);
}
} else {
for (elf.program_headers) |*elf_program| {
elf_program.p_type = @as(Elf64_Word, try in.readInt(Elf32_Word, elf.endian));
elf_program.p_offset = @as(Elf64_Off, try in.readInt(Elf32_Off, elf.endian));
elf_program.p_vaddr = @as(Elf64_Addr, try in.readInt(Elf32_Addr, elf.endian));
elf_program.p_paddr = @as(Elf64_Addr, try in.readInt(Elf32_Addr, elf.endian));
elf_program.p_filesz = @as(Elf64_Word, try in.readInt(Elf32_Word, elf.endian));
elf_program.p_memsz = @as(Elf64_Word, try in.readInt(Elf32_Word, elf.endian));
elf_program.p_flags = @as(Elf64_Word, try in.readInt(Elf32_Word, elf.endian));
elf_program.p_align = @as(Elf64_Word, try in.readInt(Elf32_Word, elf.endian));
}
}
try seekable_stream.seekTo(elf.section_header_offset);
elf.section_headers = try elf.allocator.alloc(SectionHeader, sh_entry_count);
errdefer elf.allocator.free(elf.section_headers);
if (elf.is_64) {
if (sh_entry_size != 64) return error.InvalidFormat;
for (elf.section_headers) |*elf_section| {
elf_section.name = try in.readInt(u32, elf.endian);
elf_section.sh_name = try in.readInt(u32, elf.endian);
elf_section.sh_type = try in.readInt(u32, elf.endian);
elf_section.flags = try in.readInt(u64, elf.endian);
elf_section.addr = try in.readInt(u64, elf.endian);
elf_section.offset = try in.readInt(u64, elf.endian);
elf_section.size = try in.readInt(u64, elf.endian);
elf_section.link = try in.readInt(u32, elf.endian);
elf_section.info = try in.readInt(u32, elf.endian);
elf_section.addr_align = try in.readInt(u64, elf.endian);
elf_section.ent_size = try in.readInt(u64, elf.endian);
elf_section.sh_flags = try in.readInt(u64, elf.endian);
elf_section.sh_addr = try in.readInt(u64, elf.endian);
elf_section.sh_offset = try in.readInt(u64, elf.endian);
elf_section.sh_size = try in.readInt(u64, elf.endian);
elf_section.sh_link = try in.readInt(u32, elf.endian);
elf_section.sh_info = try in.readInt(u32, elf.endian);
elf_section.sh_addralign = try in.readInt(u64, elf.endian);
elf_section.sh_entsize = try in.readInt(u64, elf.endian);
}
} else {
if (sh_entry_size != 40) return error.InvalidFormat;
for (elf.section_headers) |*elf_section| {
// TODO (multiple occurrences) allow implicit cast from %u32 -> %u64 ?
elf_section.name = try in.readInt(u32, elf.endian);
elf_section.sh_name = try in.readInt(u32, elf.endian);
elf_section.sh_type = try in.readInt(u32, elf.endian);
elf_section.flags = @as(u64, try in.readInt(u32, elf.endian));
elf_section.addr = @as(u64, try in.readInt(u32, elf.endian));
elf_section.offset = @as(u64, try in.readInt(u32, elf.endian));
elf_section.size = @as(u64, try in.readInt(u32, elf.endian));
elf_section.link = try in.readInt(u32, elf.endian);
elf_section.info = try in.readInt(u32, elf.endian);
elf_section.addr_align = @as(u64, try in.readInt(u32, elf.endian));
elf_section.ent_size = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_flags = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_addr = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_offset = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_size = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_link = try in.readInt(u32, elf.endian);
elf_section.sh_info = try in.readInt(u32, elf.endian);
elf_section.sh_addralign = @as(u64, try in.readInt(u32, elf.endian));
elf_section.sh_entsize = @as(u64, try in.readInt(u32, elf.endian));
}
}
for (elf.section_headers) |*elf_section| {
if (elf_section.sh_type != SHT_NOBITS) {
const file_end_offset = try math.add(u64, elf_section.offset, elf_section.size);
const file_end_offset = try math.add(u64, elf_section.sh_offset, elf_section.sh_size);
if (stream_end < file_end_offset) return error.InvalidFormat;
}
}
@ -499,13 +524,14 @@ pub const Elf = struct {
pub fn close(elf: *Elf) void {
elf.allocator.free(elf.section_headers);
elf.allocator.free(elf.program_headers);
}
pub fn findSection(elf: *Elf, name: []const u8) !?*SectionHeader {
section_loop: for (elf.section_headers) |*elf_section| {
if (elf_section.sh_type == SHT_NULL) continue;
const name_offset = elf.string_section.offset + elf_section.name;
const name_offset = elf.string_section.sh_offset + elf_section.sh_name;
try elf.seekable_stream.seekTo(name_offset);
for (name) |expected_c| {
@ -523,7 +549,7 @@ pub const Elf = struct {
}
pub fn seekToSection(elf: *Elf, elf_section: *SectionHeader) !void {
try elf.seekable_stream.seekTo(elf_section.offset);
try elf.seekable_stream.seekTo(elf_section.sh_offset);
}
};
@ -578,6 +604,26 @@ pub const Elf64_Ehdr = extern struct {
e_shnum: Elf64_Half,
e_shstrndx: Elf64_Half,
};
pub const Elf32_Phdr = extern struct {
p_type: Elf32_Word,
p_offset: Elf32_Off,
p_vaddr: Elf32_Addr,
p_paddr: Elf32_Addr,
p_filesz: Elf32_Word,
p_memsz: Elf32_Word,
p_flags: Elf32_Word,
p_align: Elf32_Word,
};
pub const Elf64_Phdr = extern struct {
p_type: Elf64_Word,
p_flags: Elf64_Word,
p_offset: Elf64_Off,
p_vaddr: Elf64_Addr,
p_paddr: Elf64_Addr,
p_filesz: Elf64_Xword,
p_memsz: Elf64_Xword,
p_align: Elf64_Xword,
};
pub const Elf32_Shdr = extern struct {
sh_name: Elf32_Word,
sh_type: Elf32_Word,
@ -655,26 +701,6 @@ pub const Elf64_Rela = extern struct {
r_info: Elf64_Xword,
r_addend: Elf64_Sxword,
};
pub const Elf32_Phdr = extern struct {
p_type: Elf32_Word,
p_offset: Elf32_Off,
p_vaddr: Elf32_Addr,
p_paddr: Elf32_Addr,
p_filesz: Elf32_Word,
p_memsz: Elf32_Word,
p_flags: Elf32_Word,
p_align: Elf32_Word,
};
pub const Elf64_Phdr = extern struct {
p_type: Elf64_Word,
p_flags: Elf64_Word,
p_offset: Elf64_Off,
p_vaddr: Elf64_Addr,
p_paddr: Elf64_Addr,
p_filesz: Elf64_Xword,
p_memsz: Elf64_Xword,
p_align: Elf64_Xword,
};
pub const Elf32_Dyn = extern struct {
d_tag: Elf32_Sword,
d_un: extern union {
@ -833,6 +859,17 @@ pub const Elf_MIPS_ABIFlags_v0 = extern struct {
flags2: Elf32_Word,
};
comptime {
debug.assert(@sizeOf(Elf32_Ehdr) == 52);
debug.assert(@sizeOf(Elf64_Ehdr) == 64);
debug.assert(@sizeOf(Elf32_Phdr) == 32);
debug.assert(@sizeOf(Elf64_Phdr) == 56);
debug.assert(@sizeOf(Elf32_Shdr) == 40);
debug.assert(@sizeOf(Elf64_Shdr) == 64);
}
pub const Auxv = switch (@sizeOf(usize)) {
4 => Elf32_auxv_t,
8 => Elf64_auxv_t,

View File

@ -160,12 +160,12 @@ pub fn pwriteWindows(fd: fd_t, data: []const u8, offset: u64) os.WindowsWriteErr
var bytes_transferred: windows.DWORD = undefined;
if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
switch (windows.kernel32.GetLastError()) {
windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.INVALID_USER_BUFFER => return error.SystemResources,
windows.ERROR.NOT_ENOUGH_MEMORY => return error.SystemResources,
windows.ERROR.OPERATION_ABORTED => return error.OperationAborted,
windows.ERROR.NOT_ENOUGH_QUOTA => return error.SystemResources,
windows.ERROR.BROKEN_PIPE => return error.BrokenPipe,
.IO_PENDING => unreachable,
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return windows.unexpectedError(err),
}
}
@ -320,10 +320,10 @@ pub fn preadWindows(fd: fd_t, data: []u8, offset: u64) !usize {
var bytes_transferred: windows.DWORD = undefined;
if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
switch (windows.kernel32.GetLastError()) {
windows.ERROR.IO_PENDING => unreachable,
windows.ERROR.OPERATION_ABORTED => return error.OperationAborted,
windows.ERROR.BROKEN_PIPE => return error.BrokenPipe,
windows.ERROR.HANDLE_EOF => return @as(usize, bytes_transferred),
.IO_PENDING => unreachable,
.OPERATION_ABORTED => return error.OperationAborted,
.BROKEN_PIPE => return error.BrokenPipe,
.HANDLE_EOF => return @as(usize, bytes_transferred),
else => |err| return windows.unexpectedError(err),
}
}

View File

@ -347,7 +347,7 @@ pub fn LinearFifo(
}
test "LinearFifo(u8, .Dynamic)" {
var fifo = LinearFifo(u8, .Dynamic).init(debug.global_allocator);
var fifo = LinearFifo(u8, .Dynamic).init(testing.allocator);
defer fifo.deinit();
try fifo.write("HELLO");
@ -422,7 +422,7 @@ test "LinearFifo" {
var fifo = switch (bt) {
.Static => FifoType.init(),
.Slice => FifoType.init(buf[0..]),
.Dynamic => FifoType.init(debug.global_allocator),
.Dynamic => FifoType.init(testing.allocator),
};
defer fifo.deinit();

View File

@ -21,17 +21,6 @@ pub const FormatOptions = struct {
fill: u8 = ' ',
};
fn nextArg(comptime used_pos_args: *u32, comptime maybe_pos_arg: ?comptime_int, comptime next_arg: *comptime_int) comptime_int {
if (maybe_pos_arg) |pos_arg| {
used_pos_args.* |= 1 << pos_arg;
return pos_arg;
} else {
const arg = next_arg.*;
next_arg.* += 1;
return arg;
}
}
fn peekIsAlign(comptime fmt: []const u8) bool {
// Should only be called during a state transition to the format segment.
comptime assert(fmt[0] == ':');
@ -113,12 +102,36 @@ pub fn format(
comptime var start_index = 0;
comptime var state = State.Start;
comptime var next_arg = 0;
comptime var maybe_pos_arg: ?comptime_int = null;
comptime var used_pos_args: ArgSetType = 0;
comptime var specifier_start = 0;
comptime var specifier_end = 0;
comptime var options = FormatOptions{};
comptime var arg_state: struct {
next_arg: usize = 0,
used_args: ArgSetType = 0,
args_len: usize = args.len,
fn hasUnusedArgs(comptime self: *@This()) bool {
return (@popCount(ArgSetType, self.used_args) != self.args_len);
}
fn nextArg(comptime self: *@This(), comptime pos_arg: ?comptime_int) comptime_int {
const next_idx = pos_arg orelse blk: {
const arg = self.next_arg;
self.next_arg += 1;
break :blk arg;
};
if (next_idx >= self.args_len) {
@compileError("Too few arguments");
}
// Mark this argument as used
self.used_args |= 1 << next_idx;
return next_idx;
}
} = .{};
inline for (fmt) |c, i| {
switch (state) {
@ -166,11 +179,7 @@ pub fn format(
}
},
'}' => {
const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg);
if (arg_to_print >= args.len) {
@compileError("Too few arguments");
}
const arg_to_print = comptime arg_state.nextArg(maybe_pos_arg);
try formatType(
args[arg_to_print],
@ -203,7 +212,7 @@ pub fn format(
state = if (comptime peekIsAlign(fmt[i..])) State.FormatFillAndAlign else State.FormatWidth;
},
'}' => {
const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg);
const arg_to_print = comptime arg_state.nextArg(maybe_pos_arg);
try formatType(
args[arg_to_print],
@ -250,7 +259,7 @@ pub fn format(
state = .FormatPrecision;
},
'}' => {
const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg);
const arg_to_print = comptime arg_state.nextArg(maybe_pos_arg);
try formatType(
args[arg_to_print],
@ -278,7 +287,7 @@ pub fn format(
options.precision.? += c - '0';
},
'}' => {
const arg_to_print = comptime nextArg(&used_pos_args, maybe_pos_arg, &next_arg);
const arg_to_print = comptime arg_state.nextArg(maybe_pos_arg);
try formatType(
args[arg_to_print],
@ -299,13 +308,7 @@ pub fn format(
}
}
comptime {
// All arguments must have been printed but we allow mixing positional and fixed to achieve this.
var i: usize = 0;
inline while (i < next_arg) : (i += 1) {
used_pos_args |= 1 << i;
}
if (@popCount(ArgSetType, used_pos_args) != args.len) {
if (comptime arg_state.hasUnusedArgs()) {
@compileError("Unused arguments");
}
if (state != State.Start) {
@ -362,14 +365,21 @@ pub fn formatType(
try output(context, "error.");
return output(context, @errorName(value));
},
.Enum => {
.Enum => |enumInfo| {
if (comptime std.meta.trait.hasFn("format")(T)) {
return value.format(fmt, options, context, Errors, output);
}
try output(context, @typeName(T));
try output(context, ".");
return formatType(@tagName(value), "", options, context, Errors, output, max_depth);
if (enumInfo.is_exhaustive) {
try output(context, ".");
return formatType(@tagName(value), "", options, context, Errors, output, max_depth);
} else {
// TODO: when @tagName works on exhaustive enums print known enum strings
try output(context, "(");
try formatType(@enumToInt(value), "", options, context, Errors, output, max_depth);
try output(context, ")");
}
},
.Union => {
if (comptime std.meta.trait.hasFn("format")(T)) {
@ -1598,7 +1608,8 @@ test "hexToBytes" {
test "formatIntValue with comptime_int" {
const value: comptime_int = 123456789123456789;
var buf = try std.Buffer.init(std.debug.global_allocator, "");
var buf = try std.Buffer.init(std.testing.allocator, "");
defer buf.deinit();
try formatIntValue(value, "", FormatOptions{}, &buf, @TypeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append);
std.testing.expect(mem.eql(u8, buf.toSlice(), "123456789123456789"));
}
@ -1652,19 +1663,23 @@ test "formatType max_depth" {
inst.a = &inst;
inst.tu.ptr = &inst.tu;
var buf0 = try std.Buffer.init(std.debug.global_allocator, "");
var buf0 = try std.Buffer.init(std.testing.allocator, "");
defer buf0.deinit();
try formatType(inst, "", FormatOptions{}, &buf0, @TypeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 0);
std.testing.expect(mem.eql(u8, buf0.toSlice(), "S{ ... }"));
var buf1 = try std.Buffer.init(std.debug.global_allocator, "");
var buf1 = try std.Buffer.init(std.testing.allocator, "");
defer buf1.deinit();
try formatType(inst, "", FormatOptions{}, &buf1, @TypeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 1);
std.testing.expect(mem.eql(u8, buf1.toSlice(), "S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }"));
var buf2 = try std.Buffer.init(std.debug.global_allocator, "");
var buf2 = try std.Buffer.init(std.testing.allocator, "");
defer buf2.deinit();
try formatType(inst, "", FormatOptions{}, &buf2, @TypeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 2);
std.testing.expect(mem.eql(u8, buf2.toSlice(), "S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }"));
var buf3 = try std.Buffer.init(std.debug.global_allocator, "");
var buf3 = try std.Buffer.init(std.testing.allocator, "");
defer buf3.deinit();
try formatType(inst, "", FormatOptions{}, &buf3, @TypeOf(std.Buffer.append).ReturnType.ErrorSet, std.Buffer.append, 3);
std.testing.expect(mem.eql(u8, buf3.toSlice(), "S{ .a = S{ .a = S{ .a = S{ ... }, .tu = TU{ ... }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ ... } }, .e = E.Two, .vec = (10.200,2.220) }, .tu = TU{ .ptr = TU{ .ptr = TU{ ... } } }, .e = E.Two, .vec = (10.200,2.220) }"));
}

View File

@ -598,8 +598,8 @@ pub const Dir = struct {
self.index = 0;
self.end_index = io.Information;
switch (rc) {
w.STATUS.SUCCESS => {},
w.STATUS.ACCESS_DENIED => return error.AccessDenied,
.SUCCESS => {},
.ACCESS_DENIED => return error.AccessDenied,
else => return w.unexpectedStatus(rc),
}
}
@ -837,16 +837,16 @@ pub const Dir = struct {
0,
);
switch (rc) {
w.STATUS.SUCCESS => return result,
w.STATUS.OBJECT_NAME_INVALID => unreachable,
w.STATUS.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
w.STATUS.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
w.STATUS.INVALID_PARAMETER => unreachable,
w.STATUS.SHARING_VIOLATION => return error.SharingViolation,
w.STATUS.ACCESS_DENIED => return error.AccessDenied,
w.STATUS.PIPE_BUSY => return error.PipeBusy,
w.STATUS.OBJECT_PATH_SYNTAX_BAD => unreachable,
w.STATUS.OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
.SUCCESS => return result,
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.INVALID_PARAMETER => unreachable,
.SHARING_VIOLATION => return error.SharingViolation,
.ACCESS_DENIED => return error.AccessDenied,
.PIPE_BUSY => return error.PipeBusy,
.OBJECT_PATH_SYNTAX_BAD => unreachable,
.OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
else => return w.unexpectedStatus(rc),
}
}
@ -990,11 +990,11 @@ pub const Dir = struct {
0,
);
switch (rc) {
w.STATUS.SUCCESS => return result,
w.STATUS.OBJECT_NAME_INVALID => unreachable,
w.STATUS.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
w.STATUS.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
w.STATUS.INVALID_PARAMETER => unreachable,
.SUCCESS => return result,
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
.INVALID_PARAMETER => unreachable,
else => return w.unexpectedStatus(rc),
}
}

View File

@ -225,10 +225,10 @@ pub const File = struct {
var info: windows.FILE_ALL_INFORMATION = undefined;
const rc = windows.ntdll.NtQueryInformationFile(self.handle, &io_status_block, &info, @sizeOf(windows.FILE_ALL_INFORMATION), .FileAllInformation);
switch (rc) {
windows.STATUS.SUCCESS => {},
windows.STATUS.BUFFER_OVERFLOW => {},
windows.STATUS.INVALID_PARAMETER => unreachable,
windows.STATUS.ACCESS_DENIED => return error.AccessDenied,
.SUCCESS => {},
.BUFFER_OVERFLOW => {},
.INVALID_PARAMETER => unreachable,
.ACCESS_DENIED => return error.AccessDenied,
else => return windows.unexpectedStatus(rc),
}
return Stat{

View File

@ -665,15 +665,16 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
}
test "resolve" {
const cwd = try process.getCwdAlloc(debug.global_allocator);
const cwd = try process.getCwdAlloc(testing.allocator);
defer testing.allocator.free(cwd);
if (builtin.os == .windows) {
if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) {
cwd[0] = asciiUpper(cwd[0]);
}
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{"."}), cwd));
try testResolveWindows(&[_][]const u8{"."}, cwd);
} else {
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }), cwd));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{"."}), cwd));
try testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }, cwd);
try testResolvePosix(&[_][]const u8{"."}, cwd);
}
}
@ -683,66 +684,71 @@ test "resolveWindows" {
return error.SkipZigTest;
}
if (builtin.os == .windows) {
const cwd = try process.getCwdAlloc(debug.global_allocator);
const cwd = try process.getCwdAlloc(testing.allocator);
defer testing.allocator.free(cwd);
const parsed_cwd = windowsParsePath(cwd);
{
const result = testResolveWindows(&[_][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" });
const expected = try join(debug.global_allocator, &[_][]const u8{
const expected = try join(testing.allocator, &[_][]const u8{
parsed_cwd.disk_designator,
"usr\\local\\lib\\zig\\std\\array_list.zig",
});
defer testing.allocator.free(expected);
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
expected[0] = asciiUpper(parsed_cwd.disk_designator[0]);
}
testing.expect(mem.eql(u8, result, expected));
try testResolveWindows(&[_][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" }, expected);
}
{
const result = testResolveWindows(&[_][]const u8{ "usr/local", "lib\\zig" });
const expected = try join(debug.global_allocator, &[_][]const u8{
const expected = try join(testing.allocator, &[_][]const u8{
cwd,
"usr\\local\\lib\\zig",
});
defer testing.allocator.free(expected);
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
expected[0] = asciiUpper(parsed_cwd.disk_designator[0]);
}
testing.expect(mem.eql(u8, result, expected));
try testResolveWindows(&[_][]const u8{ "usr/local", "lib\\zig" }, expected);
}
}
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:\\a\\b\\c", "/hi", "ok" }), "C:\\hi\\ok"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "c:../a" }), "C:\\blah\\a"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "C:../a" }), "C:\\blah\\a"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/ignore", "d:\\a/b\\c/d", "\\e.exe" }), "D:\\e.exe"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/ignore", "c:/some/file" }), "C:\\some\\file"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "d:/ignore", "d:some/dir//" }), "D:\\ignore\\some\\dir"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "//server/share", "..", "relative\\" }), "\\\\server\\share\\relative"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/", "//" }), "C:\\"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/", "//dir" }), "C:\\dir"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/", "//server/share" }), "\\\\server\\share\\"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/", "//server//share" }), "\\\\server\\share\\"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "c:/", "///some//dir" }), "C:\\some\\dir"));
testing.expect(mem.eql(u8, testResolveWindows(&[_][]const u8{ "C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js" }), "C:\\foo\\tmp.3\\cycles\\root.js"));
try testResolveWindows(&[_][]const u8{ "c:\\a\\b\\c", "/hi", "ok" }, "C:\\hi\\ok");
try testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "c:../a" }, "C:\\blah\\a");
try testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "C:../a" }, "C:\\blah\\a");
try testResolveWindows(&[_][]const u8{ "c:/ignore", "d:\\a/b\\c/d", "\\e.exe" }, "D:\\e.exe");
try testResolveWindows(&[_][]const u8{ "c:/ignore", "c:/some/file" }, "C:\\some\\file");
try testResolveWindows(&[_][]const u8{ "d:/ignore", "d:some/dir//" }, "D:\\ignore\\some\\dir");
try testResolveWindows(&[_][]const u8{ "//server/share", "..", "relative\\" }, "\\\\server\\share\\relative");
try testResolveWindows(&[_][]const u8{ "c:/", "//" }, "C:\\");
try testResolveWindows(&[_][]const u8{ "c:/", "//dir" }, "C:\\dir");
try testResolveWindows(&[_][]const u8{ "c:/", "//server/share" }, "\\\\server\\share\\");
try testResolveWindows(&[_][]const u8{ "c:/", "//server//share" }, "\\\\server\\share\\");
try testResolveWindows(&[_][]const u8{ "c:/", "///some//dir" }, "C:\\some\\dir");
try testResolveWindows(&[_][]const u8{ "C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js" }, "C:\\foo\\tmp.3\\cycles\\root.js");
}
test "resolvePosix" {
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/a/b", "c" }), "/a/b/c"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/a/b", "c", "//d", "e///" }), "/d/e"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/a/b/c", "..", "../" }), "/a"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/", "..", ".." }), "/"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{"/a/b/c/"}), "/a/b/c"));
try testResolvePosix(&[_][]const u8{ "/a/b", "c" }, "/a/b/c");
try testResolvePosix(&[_][]const u8{ "/a/b", "c", "//d", "e///" }, "/d/e");
try testResolvePosix(&[_][]const u8{ "/a/b/c", "..", "../" }, "/a");
try testResolvePosix(&[_][]const u8{ "/", "..", ".." }, "/");
try testResolvePosix(&[_][]const u8{"/a/b/c/"}, "/a/b/c");
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/var/lib", "../", "file/" }), "/var/file"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/var/lib", "/../", "file/" }), "/file"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/some/dir", ".", "/absolute/" }), "/absolute"));
testing.expect(mem.eql(u8, testResolvePosix(&[_][]const u8{ "/foo/tmp.3/", "../tmp.3/cycles/root.js" }), "/foo/tmp.3/cycles/root.js"));
try testResolvePosix(&[_][]const u8{ "/var/lib", "../", "file/" }, "/var/file");
try testResolvePosix(&[_][]const u8{ "/var/lib", "/../", "file/" }, "/file");
try testResolvePosix(&[_][]const u8{ "/some/dir", ".", "/absolute/" }, "/absolute");
try testResolvePosix(&[_][]const u8{ "/foo/tmp.3/", "../tmp.3/cycles/root.js" }, "/foo/tmp.3/cycles/root.js");
}
fn testResolveWindows(paths: []const []const u8) []u8 {
return resolveWindows(debug.global_allocator, paths) catch unreachable;
fn testResolveWindows(paths: []const []const u8, expected: []const u8) !void {
const actual = try resolveWindows(testing.allocator, paths);
defer testing.allocator.free(actual);
return testing.expect(mem.eql(u8, actual, expected));
}
fn testResolvePosix(paths: []const []const u8) []u8 {
return resolvePosix(debug.global_allocator, paths) catch unreachable;
fn testResolvePosix(paths: []const []const u8, expected: []const u8) !void {
const actual = try resolvePosix(testing.allocator, paths);
defer testing.allocator.free(actual);
return testing.expect(mem.eql(u8, actual, expected));
}
/// If the path is a file in the current directory (no directory component)
@ -1126,51 +1132,53 @@ test "relative" {
// TODO https://github.com/ziglang/zig/issues/3288
return error.SkipZigTest;
}
testRelativeWindows("c:/blah\\blah", "d:/games", "D:\\games");
testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa", "..");
testRelativeWindows("c:/aaaa/bbbb", "c:/cccc", "..\\..\\cccc");
testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa/bbbb", "");
testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa/cccc", "..\\cccc");
testRelativeWindows("c:/aaaa/", "c:/aaaa/cccc", "cccc");
testRelativeWindows("c:/", "c:\\aaaa\\bbbb", "aaaa\\bbbb");
testRelativeWindows("c:/aaaa/bbbb", "d:\\", "D:\\");
testRelativeWindows("c:/AaAa/bbbb", "c:/aaaa/bbbb", "");
testRelativeWindows("c:/aaaaa/", "c:/aaaa/cccc", "..\\aaaa\\cccc");
testRelativeWindows("C:\\foo\\bar\\baz\\quux", "C:\\", "..\\..\\..\\..");
testRelativeWindows("C:\\foo\\test", "C:\\foo\\test\\bar\\package.json", "bar\\package.json");
testRelativeWindows("C:\\foo\\bar\\baz-quux", "C:\\foo\\bar\\baz", "..\\baz");
testRelativeWindows("C:\\foo\\bar\\baz", "C:\\foo\\bar\\baz-quux", "..\\baz-quux");
testRelativeWindows("\\\\foo\\bar", "\\\\foo\\bar\\baz", "baz");
testRelativeWindows("\\\\foo\\bar\\baz", "\\\\foo\\bar", "..");
testRelativeWindows("\\\\foo\\bar\\baz-quux", "\\\\foo\\bar\\baz", "..\\baz");
testRelativeWindows("\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz-quux", "..\\baz-quux");
testRelativeWindows("C:\\baz-quux", "C:\\baz", "..\\baz");
testRelativeWindows("C:\\baz", "C:\\baz-quux", "..\\baz-quux");
testRelativeWindows("\\\\foo\\baz-quux", "\\\\foo\\baz", "..\\baz");
testRelativeWindows("\\\\foo\\baz", "\\\\foo\\baz-quux", "..\\baz-quux");
testRelativeWindows("C:\\baz", "\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz");
testRelativeWindows("\\\\foo\\bar\\baz", "C:\\baz", "C:\\baz");
try testRelativeWindows("c:/blah\\blah", "d:/games", "D:\\games");
try testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa", "..");
try testRelativeWindows("c:/aaaa/bbbb", "c:/cccc", "..\\..\\cccc");
try testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa/bbbb", "");
try testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa/cccc", "..\\cccc");
try testRelativeWindows("c:/aaaa/", "c:/aaaa/cccc", "cccc");
try testRelativeWindows("c:/", "c:\\aaaa\\bbbb", "aaaa\\bbbb");
try testRelativeWindows("c:/aaaa/bbbb", "d:\\", "D:\\");
try testRelativeWindows("c:/AaAa/bbbb", "c:/aaaa/bbbb", "");
try testRelativeWindows("c:/aaaaa/", "c:/aaaa/cccc", "..\\aaaa\\cccc");
try testRelativeWindows("C:\\foo\\bar\\baz\\quux", "C:\\", "..\\..\\..\\..");
try testRelativeWindows("C:\\foo\\test", "C:\\foo\\test\\bar\\package.json", "bar\\package.json");
try testRelativeWindows("C:\\foo\\bar\\baz-quux", "C:\\foo\\bar\\baz", "..\\baz");
try testRelativeWindows("C:\\foo\\bar\\baz", "C:\\foo\\bar\\baz-quux", "..\\baz-quux");
try testRelativeWindows("\\\\foo\\bar", "\\\\foo\\bar\\baz", "baz");
try testRelativeWindows("\\\\foo\\bar\\baz", "\\\\foo\\bar", "..");
try testRelativeWindows("\\\\foo\\bar\\baz-quux", "\\\\foo\\bar\\baz", "..\\baz");
try testRelativeWindows("\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz-quux", "..\\baz-quux");
try testRelativeWindows("C:\\baz-quux", "C:\\baz", "..\\baz");
try testRelativeWindows("C:\\baz", "C:\\baz-quux", "..\\baz-quux");
try testRelativeWindows("\\\\foo\\baz-quux", "\\\\foo\\baz", "..\\baz");
try testRelativeWindows("\\\\foo\\baz", "\\\\foo\\baz-quux", "..\\baz-quux");
try testRelativeWindows("C:\\baz", "\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz");
try testRelativeWindows("\\\\foo\\bar\\baz", "C:\\baz", "C:\\baz");
testRelativePosix("/var/lib", "/var", "..");
testRelativePosix("/var/lib", "/bin", "../../bin");
testRelativePosix("/var/lib", "/var/lib", "");
testRelativePosix("/var/lib", "/var/apache", "../apache");
testRelativePosix("/var/", "/var/lib", "lib");
testRelativePosix("/", "/var/lib", "var/lib");
testRelativePosix("/foo/test", "/foo/test/bar/package.json", "bar/package.json");
testRelativePosix("/Users/a/web/b/test/mails", "/Users/a/web/b", "../..");
testRelativePosix("/foo/bar/baz-quux", "/foo/bar/baz", "../baz");
testRelativePosix("/foo/bar/baz", "/foo/bar/baz-quux", "../baz-quux");
testRelativePosix("/baz-quux", "/baz", "../baz");
testRelativePosix("/baz", "/baz-quux", "../baz-quux");
try testRelativePosix("/var/lib", "/var", "..");
try testRelativePosix("/var/lib", "/bin", "../../bin");
try testRelativePosix("/var/lib", "/var/lib", "");
try testRelativePosix("/var/lib", "/var/apache", "../apache");
try testRelativePosix("/var/", "/var/lib", "lib");
try testRelativePosix("/", "/var/lib", "var/lib");
try testRelativePosix("/foo/test", "/foo/test/bar/package.json", "bar/package.json");
try testRelativePosix("/Users/a/web/b/test/mails", "/Users/a/web/b", "../..");
try testRelativePosix("/foo/bar/baz-quux", "/foo/bar/baz", "../baz");
try testRelativePosix("/foo/bar/baz", "/foo/bar/baz-quux", "../baz-quux");
try testRelativePosix("/baz-quux", "/baz", "../baz");
try testRelativePosix("/baz", "/baz-quux", "../baz-quux");
}
fn testRelativePosix(from: []const u8, to: []const u8, expected_output: []const u8) void {
const result = relativePosix(debug.global_allocator, from, to) catch unreachable;
fn testRelativePosix(from: []const u8, to: []const u8, expected_output: []const u8) !void {
const result = try relativePosix(testing.allocator, from, to);
defer testing.allocator.free(result);
testing.expectEqualSlices(u8, expected_output, result);
}
fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []const u8) void {
const result = relativeWindows(debug.global_allocator, from, to) catch unreachable;
fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []const u8) !void {
const result = try relativeWindows(testing.allocator, from, to);
defer testing.allocator.free(result);
testing.expectEqualSlices(u8, expected_output, result);
}

View File

@ -234,8 +234,8 @@ test "hash pointer" {
test "hash slice shallow" {
// Allocate one array dynamically so that we're assured it is not merged
// with the other by the optimization passes.
const array1 = try std.heap.page_allocator.create([6]u32);
defer std.heap.page_allocator.destroy(array1);
const array1 = try std.testing.allocator.create([6]u32);
defer std.testing.allocator.destroy(array1);
array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
const a = array1[0..];
@ -250,8 +250,8 @@ test "hash slice shallow" {
test "hash slice deep" {
// Allocate one array dynamically so that we're assured it is not merged
// with the other by the optimization passes.
const array1 = try std.heap.page_allocator.create([6]u32);
defer std.heap.page_allocator.destroy(array1);
const array1 = try std.testing.allocator.create([6]u32);
defer std.testing.allocator.destroy(array1);
array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
const a = array1[0..];
@ -278,7 +278,7 @@ test "hash struct deep" {
}
};
const allocator = std.heap.page_allocator;
const allocator = std.testing.allocator;
const foo = try Foo.init(allocator, 123, 1.0, true);
const bar = try Foo.init(allocator, 123, 1.0, true);
const baz = try Foo.init(allocator, 123, 1.0, false);

View File

@ -250,13 +250,13 @@ pub fn main() !void {
if (H.has_iterative_api) {
prng.seed(seed);
const result = try benchmarkHash(H, count);
try stdout.print(" iterative: {:4} MiB/s [{x:0<16}]\n", .{result.throughput / (1 * MiB), result.hash});
try stdout.print(" iterative: {:4} MiB/s [{x:0<16}]\n", .{ result.throughput / (1 * MiB), result.hash });
}
if (!test_iterative_only) {
prng.seed(seed);
const result_small = try benchmarkHashSmallKeys(H, key_size, count);
try stdout.print(" small keys: {:4} MiB/s [{x:0<16}]\n", .{result_small.throughput / (1 * MiB), result_small.hash});
try stdout.print(" small keys: {:4} MiB/s [{x:0<16}]\n", .{ result_small.throughput / (1 * MiB), result_small.hash });
}
}
}

View File

@ -419,7 +419,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
}
test "basic hash map usage" {
var map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
testing.expect((try map.put(1, 11)) == null);
@ -463,7 +463,7 @@ test "basic hash map usage" {
}
test "iterator hash map" {
var reset_map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
var reset_map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer reset_map.deinit();
try reset_map.putNoClobber(1, 11);
@ -509,7 +509,7 @@ test "iterator hash map" {
}
test "ensure capacity" {
var map = AutoHashMap(i32, i32).init(std.heap.page_allocator);
var map = AutoHashMap(i32, i32).init(std.testing.allocator);
defer map.deinit();
try map.ensureCapacity(20);

View File

@ -711,6 +711,10 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
self.end_index = 0;
}
};
}
};

View File

@ -57,7 +57,7 @@ test "LoggingAllocator" {
var slice_stream = std.io.SliceOutStream.init(buf[0..]);
const stream = &slice_stream.stream;
const allocator = &LoggingAllocator.init(std.heap.page_allocator, @ptrCast(*AnyErrorOutStream, stream)).allocator;
const allocator = &LoggingAllocator.init(std.testing.allocator, @ptrCast(*AnyErrorOutStream, stream)).allocator;
const ptr = try allocator.alloc(u8, 10);
allocator.free(ptr);

View File

@ -891,7 +891,7 @@ pub fn readLineSlice(slice: []u8) ![]u8 {
pub fn readLineSliceFrom(stream: var, slice: []u8) ![]u8 {
// We cannot use Buffer.fromOwnedSlice, as it wants to append a null byte
// after taking ownership, which would always require an allocation.
var buf = std.Buffer{ .list = std.ArrayList(u8).fromOwnedSlice(debug.failing_allocator, slice) };
var buf = std.Buffer{ .list = std.ArrayList(u8).fromOwnedSlice(testing.failing_allocator, slice) };
try buf.resize(0);
return try readLineFrom(stream, &buf);
}

View File

@ -143,7 +143,7 @@ pub fn SinglyLinkedList(comptime T: type) type {
}
test "basic SinglyLinkedList test" {
const allocator = debug.global_allocator;
const allocator = testing.allocator;
var list = SinglyLinkedList(u32).init();
var one = try list.createNode(1, allocator);
@ -404,7 +404,7 @@ pub fn TailQueue(comptime T: type) type {
}
test "basic TailQueue test" {
const allocator = debug.global_allocator;
const allocator = testing.allocator;
var list = TailQueue(u32).init();
var one = try list.createNode(1, allocator);
@ -456,7 +456,7 @@ test "basic TailQueue test" {
}
test "TailQueue concatenation" {
const allocator = debug.global_allocator;
const allocator = testing.allocator;
var list1 = TailQueue(u32).init();
var list2 = TailQueue(u32).init();

View File

@ -126,7 +126,7 @@ else if (builtin.os == .windows)
// then unset the WAKE bit so that another unlocker can wake up a thread.
} else if (@cmpxchgWeak(u32, &self.waiters, waiters, (waiters + WAIT) | 1, .Monotonic, .Monotonic) == null) {
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == 0);
assert(rc == .SUCCESS);
_ = @atomicRmw(u32, &self.waiters, .Sub, WAKE, .Monotonic);
}
}
@ -154,7 +154,7 @@ else if (builtin.os == .windows)
// try to decrease the waiter count & set the WAKE bit meaning a thread is waking up
if (@cmpxchgWeak(u32, &self.mutex.waiters, waiters, waiters - WAIT + WAKE, .Release, .Monotonic) == null) {
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == 0);
assert(rc == .SUCCESS);
return;
}
}
@ -306,12 +306,6 @@ const TestContext = struct {
};
test "std.Mutex" {
var plenty_of_memory = try std.heap.page_allocator.alloc(u8, 300 * 1024);
defer std.heap.page_allocator.free(plenty_of_memory);
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
var a = &fixed_buffer_allocator.allocator;
var mutex = Mutex.init();
defer mutex.deinit();

View File

@ -1176,15 +1176,15 @@ pub fn unlinkatW(dirfd: fd_t, sub_path_w: [*:0]const u16, flags: u32) UnlinkatEr
null,
0,
);
if (rc == w.STATUS.SUCCESS) {
if (rc == .SUCCESS) {
rc = w.ntdll.NtClose(tmp_handle);
}
switch (rc) {
w.STATUS.SUCCESS => return,
w.STATUS.OBJECT_NAME_INVALID => unreachable,
w.STATUS.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
w.STATUS.INVALID_PARAMETER => unreachable,
w.STATUS.FILE_IS_A_DIRECTORY => return error.IsDir,
.SUCCESS => return,
.OBJECT_NAME_INVALID => unreachable,
.OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
.INVALID_PARAMETER => unreachable,
.FILE_IS_A_DIRECTORY => return error.IsDir,
else => return w.unexpectedStatus(rc),
}
}
@ -2345,9 +2345,9 @@ pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!v
return;
}
switch (windows.kernel32.GetLastError()) {
windows.ERROR.FILE_NOT_FOUND => return error.FileNotFound,
windows.ERROR.PATH_NOT_FOUND => return error.FileNotFound,
windows.ERROR.ACCESS_DENIED => return error.PermissionDenied,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.ACCESS_DENIED => return error.PermissionDenied,
else => |err| return windows.unexpectedError(err),
}
}

View File

@ -604,6 +604,11 @@ pub const CLONE_NEWPID = 0x20000000;
pub const CLONE_NEWNET = 0x40000000;
pub const CLONE_IO = 0x80000000;
// Flags for the clone3() syscall.
/// Clear any signal handler and reset to SIG_DFL.
pub const CLONE_CLEAR_SIGHAND = 0x100000000;
pub const EFD_SEMAPHORE = 1;
pub const EFD_CLOEXEC = O_CLOEXEC;
pub const EFD_NONBLOCK = O_NONBLOCK;
@ -1120,17 +1125,22 @@ pub const io_uring_params = extern struct {
// io_uring_params.features flags
pub const IORING_FEAT_SINGLE_MMAP = 1 << 0;
pub const IORING_FEAT_NODROP = 1 << 1;
pub const IORING_FEAT_SUBMIT_STABLE = 1 << 2;
// io_uring_params.flags
/// io_context is polled
pub const IORING_SETUP_IOPOLL = (1 << 0);
pub const IORING_SETUP_IOPOLL = 1 << 0;
/// SQ poll thread
pub const IORING_SETUP_SQPOLL = (1 << 1);
pub const IORING_SETUP_SQPOLL = 1 << 1;
/// sq_thread_cpu is valid
pub const IORING_SETUP_SQ_AFF = (1 << 2);
pub const IORING_SETUP_SQ_AFF = 1 << 2;
/// app defines CQ size
pub const IORING_SETUP_CQSIZE = 1 << 3;
pub const io_sqring_offsets = extern struct {
/// offset of ring head
@ -1178,52 +1188,73 @@ pub const io_uring_sqe = extern struct {
flags: u8,
ioprio: u16,
fd: i32,
off: u64,
pub const union1 = extern union {
off: u64,
addr2: u64,
};
union1: union1,
addr: u64,
len: u32,
pub const union1 = extern union {
pub const union2 = extern union {
rw_flags: kernel_rwf,
fsync_flags: u32,
poll_events: u16,
sync_range_flags: u32,
msg_flags: u32,
timeout_flags: u32,
accept_flags: u32,
cancel_flags: u32,
};
union1: union1,
union2: union2,
user_data: u64,
pub const union2 = extern union {
pub const union3 = extern union {
buf_index: u16,
__pad2: [3]u64,
};
union2: union2,
union3: union3,
};
// io_uring_sqe.flags
/// use fixed fileset
pub const IOSQE_FIXED_FILE = (1 << 0);
pub const IOSQE_FIXED_FILE = 1 << 0;
/// issue after inflight IO
pub const IOSQE_IO_DRAIN = (1 << 1);
pub const IOSQE_IO_DRAIN = 1 << 1;
/// links next sqe
pub const IOSQE_IO_LINK = (1 << 2);
pub const IOSQE_IO_LINK = 1 << 2;
pub const IORING_OP_NOP = 0;
pub const IORING_OP_READV = 1;
pub const IORING_OP_WRITEV = 2;
pub const IORING_OP_FSYNC = 3;
pub const IORING_OP_READ_FIXED = 4;
pub const IORING_OP_WRITE_FIXED = 5;
pub const IORING_OP_POLL_ADD = 6;
pub const IORING_OP_POLL_REMOVE = 7;
pub const IORING_OP_SYNC_FILE_RANGE = 8;
pub const IORING_OP_SENDMSG = 9;
pub const IORING_OP_RECVMSG = 10;
pub const IORING_OP_TIMEOUT = 11;
/// like LINK, but stronger
pub const IOSQE_IO_HARDLINK = 1 << 3;
pub const IORING_OP = extern enum {
NOP,
READV,
WRITEV,
FSYNC,
READ_FIXED,
WRITE_FIXED,
POLL_ADD,
POLL_REMOVE,
SYNC_FILE_RANGE,
SENDMSG,
RECVMSG,
TIMEOUT,
TIMEOUT_REMOVE,
ACCEPT,
ASYNC_CANCEL,
LINK_TIMEOUT,
CONNECT,
_,
};
// io_uring_sqe.fsync_flags
pub const IORING_FSYNC_DATASYNC = (1 << 0);
pub const IORING_FSYNC_DATASYNC = 1 << 0;
// io_uring_sqe.timeout_flags
pub const IORING_TIMEOUT_ABS = 1 << 0;
// IO completion data structure (Completion Queue Entry)
pub const io_uring_cqe = extern struct {
@ -1240,8 +1271,8 @@ pub const IORING_OFF_CQ_RING = 0x8000000;
pub const IORING_OFF_SQES = 0x10000000;
// io_uring_enter flags
pub const IORING_ENTER_GETEVENTS = (1 << 0);
pub const IORING_ENTER_SQ_WAKEUP = (1 << 1);
pub const IORING_ENTER_GETEVENTS = 1 << 0;
pub const IORING_ENTER_SQ_WAKEUP = 1 << 1;
// io_uring_register opcodes and arguments
pub const IORING_REGISTER_BUFFERS = 0;
@ -1250,6 +1281,13 @@ pub const IORING_REGISTER_FILES = 2;
pub const IORING_UNREGISTER_FILES = 3;
pub const IORING_REGISTER_EVENTFD = 4;
pub const IORING_UNREGISTER_EVENTFD = 5;
pub const IORING_REGISTER_FILES_UPDATE = 6;
pub const io_uring_files_update = struct {
offset: u32,
resv: u32,
fds: u64,
};
pub const utsname = extern struct {
sysname: [65]u8,

View File

@ -9,7 +9,7 @@ const elf = std.elf;
const File = std.fs.File;
const Thread = std.Thread;
const a = std.debug.global_allocator;
const a = std.testing.allocator;
const builtin = @import("builtin");
const AtomicRmwOp = builtin.AtomicRmwOp;
@ -235,8 +235,8 @@ test "pipe" {
}
test "argsAlloc" {
var args = try std.process.argsAlloc(std.heap.page_allocator);
std.process.argsFree(std.heap.page_allocator, args);
var args = try std.process.argsAlloc(std.testing.allocator);
std.process.argsFree(std.testing.allocator, args);
}
test "memfd_create" {

View File

@ -40,7 +40,8 @@ pub const Guid = extern struct {
self.time_mid,
self.time_high_and_version,
self.clock_seq_high_and_reserved,
self.clock_seq_low, self.node,
self.clock_seq_low,
self.node,
});
} else {
@compileError("Unknown format character: '" ++ f ++ "'");

View File

@ -15,6 +15,7 @@ pub const advapi32 = @import("windows/advapi32.zig");
pub const kernel32 = @import("windows/kernel32.zig");
pub const ntdll = @import("windows/ntdll.zig");
pub const ole32 = @import("windows/ole32.zig");
pub const psapi = @import("windows/psapi.zig");
pub const shell32 = @import("windows/shell32.zig");
pub const ws2_32 = @import("windows/ws2_32.zig");
@ -72,14 +73,14 @@ pub fn CreateFileW(
if (result == INVALID_HANDLE_VALUE) {
switch (kernel32.GetLastError()) {
ERROR.SHARING_VIOLATION => return error.SharingViolation,
ERROR.ALREADY_EXISTS => return error.PathAlreadyExists,
ERROR.FILE_EXISTS => return error.PathAlreadyExists,
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.ACCESS_DENIED => return error.AccessDenied,
ERROR.PIPE_BUSY => return error.PipeBusy,
ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
.SHARING_VIOLATION => return error.SharingViolation,
.ALREADY_EXISTS => return error.PathAlreadyExists,
.FILE_EXISTS => return error.PathAlreadyExists,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.ACCESS_DENIED => return error.AccessDenied,
.PIPE_BUSY => return error.PipeBusy,
.FILENAME_EXCED_RANGE => return error.NameTooLong,
else => |err| return unexpectedError(err),
}
}
@ -132,7 +133,7 @@ pub fn DeviceIoControl(
overlapped,
) == 0) {
switch (kernel32.GetLastError()) {
ERROR.IO_PENDING => if (overlapped == null) unreachable,
.IO_PENDING => if (overlapped == null) unreachable,
else => |err| return unexpectedError(err),
}
}
@ -143,7 +144,7 @@ pub fn GetOverlappedResult(h: HANDLE, overlapped: *OVERLAPPED, wait: bool) !DWOR
var bytes: DWORD = undefined;
if (kernel32.GetOverlappedResult(h, overlapped, &bytes, @boolToInt(wait)) == 0) {
switch (kernel32.GetLastError()) {
ERROR.IO_INCOMPLETE => if (!wait) return error.WouldBlock else unreachable,
.IO_INCOMPLETE => if (!wait) return error.WouldBlock else unreachable,
else => |err| return unexpectedError(err),
}
}
@ -246,8 +247,8 @@ pub fn FindFirstFile(dir_path: []const u8, find_file_data: *WIN32_FIND_DATAW) Fi
if (handle == INVALID_HANDLE_VALUE) {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
else => |err| return unexpectedError(err),
}
}
@ -261,7 +262,7 @@ pub const FindNextFileError = error{Unexpected};
pub fn FindNextFile(handle: HANDLE, find_file_data: *WIN32_FIND_DATAW) FindNextFileError!bool {
if (kernel32.FindNextFileW(handle, find_file_data) == 0) {
switch (kernel32.GetLastError()) {
ERROR.NO_MORE_FILES => return false,
.NO_MORE_FILES => return false,
else => |err| return unexpectedError(err),
}
}
@ -278,7 +279,7 @@ pub fn CreateIoCompletionPort(
) CreateIoCompletionPortError!HANDLE {
const handle = kernel32.CreateIoCompletionPort(file_handle, existing_completion_port, completion_key, concurrent_thread_count) orelse {
switch (kernel32.GetLastError()) {
ERROR.INVALID_PARAMETER => unreachable,
.INVALID_PARAMETER => unreachable,
else => |err| return unexpectedError(err),
}
};
@ -322,9 +323,9 @@ pub fn GetQueuedCompletionStatus(
dwMilliseconds,
) == FALSE) {
switch (kernel32.GetLastError()) {
ERROR.ABANDONED_WAIT_0 => return GetQueuedCompletionStatusResult.Aborted,
ERROR.OPERATION_ABORTED => return GetQueuedCompletionStatusResult.Cancelled,
ERROR.HANDLE_EOF => return GetQueuedCompletionStatusResult.EOF,
.ABANDONED_WAIT_0 => return GetQueuedCompletionStatusResult.Aborted,
.OPERATION_ABORTED => return GetQueuedCompletionStatusResult.Cancelled,
.HANDLE_EOF => return GetQueuedCompletionStatusResult.EOF,
else => |err| {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected error: {}\n", .{err});
@ -352,8 +353,8 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8) ReadFileError!usize {
var amt_read: DWORD = undefined;
if (kernel32.ReadFile(in_hFile, buffer.ptr + index, want_read_count, &amt_read, null) == 0) {
switch (kernel32.GetLastError()) {
ERROR.OPERATION_ABORTED => continue,
ERROR.BROKEN_PIPE => return index,
.OPERATION_ABORTED => continue,
.BROKEN_PIPE => return index,
else => |err| return unexpectedError(err),
}
}
@ -377,12 +378,12 @@ pub fn WriteFile(handle: HANDLE, bytes: []const u8) WriteFileError!void {
// TODO replace this @intCast with a loop that writes all the bytes
if (kernel32.WriteFile(handle, bytes.ptr, @intCast(u32, bytes.len), &bytes_written, null) == 0) {
switch (kernel32.GetLastError()) {
ERROR.INVALID_USER_BUFFER => return error.SystemResources,
ERROR.NOT_ENOUGH_MEMORY => return error.SystemResources,
ERROR.OPERATION_ABORTED => return error.OperationAborted,
ERROR.NOT_ENOUGH_QUOTA => return error.SystemResources,
ERROR.IO_PENDING => unreachable, // this function is for blocking files only
ERROR.BROKEN_PIPE => return error.BrokenPipe,
.INVALID_USER_BUFFER => return error.SystemResources,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.OPERATION_ABORTED => return error.OperationAborted,
.NOT_ENOUGH_QUOTA => return error.SystemResources,
.IO_PENDING => unreachable, // this function is for blocking files only
.BROKEN_PIPE => return error.BrokenPipe,
else => |err| return unexpectedError(err),
}
}
@ -456,12 +457,12 @@ pub fn DeleteFile(filename: []const u8) DeleteFileError!void {
pub fn DeleteFileW(filename: [*:0]const u16) DeleteFileError!void {
if (kernel32.DeleteFileW(filename) == 0) {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.ACCESS_DENIED => return error.AccessDenied,
ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
ERROR.INVALID_PARAMETER => return error.NameTooLong,
ERROR.SHARING_VIOLATION => return error.FileBusy,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.ACCESS_DENIED => return error.AccessDenied,
.FILENAME_EXCED_RANGE => return error.NameTooLong,
.INVALID_PARAMETER => return error.NameTooLong,
.SHARING_VIOLATION => return error.FileBusy,
else => |err| return unexpectedError(err),
}
}
@ -497,8 +498,8 @@ pub fn CreateDirectory(pathname: []const u8, attrs: ?*SECURITY_ATTRIBUTES) Creat
pub fn CreateDirectoryW(pathname: [*:0]const u16, attrs: ?*SECURITY_ATTRIBUTES) CreateDirectoryError!void {
if (kernel32.CreateDirectoryW(pathname, attrs) == 0) {
switch (kernel32.GetLastError()) {
ERROR.ALREADY_EXISTS => return error.PathAlreadyExists,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
.ALREADY_EXISTS => return error.PathAlreadyExists,
.PATH_NOT_FOUND => return error.FileNotFound,
else => |err| return unexpectedError(err),
}
}
@ -518,8 +519,8 @@ pub fn RemoveDirectory(dir_path: []const u8) RemoveDirectoryError!void {
pub fn RemoveDirectoryW(dir_path_w: [*:0]const u16) RemoveDirectoryError!void {
if (kernel32.RemoveDirectoryW(dir_path_w) == 0) {
switch (kernel32.GetLastError()) {
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.DIR_NOT_EMPTY => return error.DirNotEmpty,
.PATH_NOT_FOUND => return error.FileNotFound,
.DIR_NOT_EMPTY => return error.DirNotEmpty,
else => |err| return unexpectedError(err),
}
}
@ -550,8 +551,8 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v
const ipos = @bitCast(LARGE_INTEGER, offset);
if (kernel32.SetFilePointerEx(handle, ipos, null, FILE_BEGIN) == 0) {
switch (kernel32.GetLastError()) {
ERROR.INVALID_PARAMETER => unreachable,
ERROR.INVALID_HANDLE => unreachable,
.INVALID_PARAMETER => unreachable,
.INVALID_HANDLE => unreachable,
else => |err| return unexpectedError(err),
}
}
@ -561,8 +562,8 @@ pub fn SetFilePointerEx_BEGIN(handle: HANDLE, offset: u64) SetFilePointerError!v
pub fn SetFilePointerEx_CURRENT(handle: HANDLE, offset: i64) SetFilePointerError!void {
if (kernel32.SetFilePointerEx(handle, offset, null, FILE_CURRENT) == 0) {
switch (kernel32.GetLastError()) {
ERROR.INVALID_PARAMETER => unreachable,
ERROR.INVALID_HANDLE => unreachable,
.INVALID_PARAMETER => unreachable,
.INVALID_HANDLE => unreachable,
else => |err| return unexpectedError(err),
}
}
@ -572,8 +573,8 @@ pub fn SetFilePointerEx_CURRENT(handle: HANDLE, offset: i64) SetFilePointerError
pub fn SetFilePointerEx_END(handle: HANDLE, offset: i64) SetFilePointerError!void {
if (kernel32.SetFilePointerEx(handle, offset, null, FILE_END) == 0) {
switch (kernel32.GetLastError()) {
ERROR.INVALID_PARAMETER => unreachable,
ERROR.INVALID_HANDLE => unreachable,
.INVALID_PARAMETER => unreachable,
.INVALID_HANDLE => unreachable,
else => |err| return unexpectedError(err),
}
}
@ -584,8 +585,8 @@ pub fn SetFilePointerEx_CURRENT_get(handle: HANDLE) SetFilePointerError!u64 {
var result: LARGE_INTEGER = undefined;
if (kernel32.SetFilePointerEx(handle, 0, &result, FILE_CURRENT) == 0) {
switch (kernel32.GetLastError()) {
ERROR.INVALID_PARAMETER => unreachable,
ERROR.INVALID_HANDLE => unreachable,
.INVALID_PARAMETER => unreachable,
.INVALID_HANDLE => unreachable,
else => |err| return unexpectedError(err),
}
}
@ -610,11 +611,11 @@ pub fn GetFinalPathNameByHandleW(
const rc = kernel32.GetFinalPathNameByHandleW(hFile, buf_ptr, buf_len, flags);
if (rc == 0) {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.NOT_ENOUGH_MEMORY => return error.SystemResources,
ERROR.FILENAME_EXCED_RANGE => return error.NameTooLong,
ERROR.INVALID_PARAMETER => unreachable,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.NOT_ENOUGH_MEMORY => return error.SystemResources,
.FILENAME_EXCED_RANGE => return error.NameTooLong,
.INVALID_PARAMETER => unreachable,
else => |err| return unexpectedError(err),
}
}
@ -648,9 +649,9 @@ pub fn GetFileAttributesW(lpFileName: [*:0]const u16) GetFileAttributesError!DWO
const rc = kernel32.GetFileAttributesW(lpFileName);
if (rc == INVALID_FILE_ATTRIBUTES) {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.ACCESS_DENIED => return error.PermissionDenied,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.ACCESS_DENIED => return error.PermissionDenied,
else => |err| return unexpectedError(err),
}
}
@ -661,7 +662,7 @@ pub fn WSAStartup(majorVersion: u8, minorVersion: u8) !ws2_32.WSADATA {
var wsadata: ws2_32.WSADATA = undefined;
return switch (ws2_32.WSAStartup((@as(WORD, minorVersion) << 8) | majorVersion, &wsadata)) {
0 => wsadata,
else => |err| unexpectedWSAError(err),
else => |err| unexpectedWSAError(@intToEnum(WinsockError, err)),
};
}
@ -686,10 +687,10 @@ pub fn WSASocketW(
const rc = ws2_32.WSASocketW(af, socket_type, protocol, protocolInfo, g, dwFlags);
if (rc == ws2_32.INVALID_SOCKET) {
switch (ws2_32.WSAGetLastError()) {
ws2_32.WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported,
ws2_32.WSAEMFILE => return error.ProcessFdQuotaExceeded,
ws2_32.WSAENOBUFS => return error.SystemResources,
ws2_32.WSAEPROTONOSUPPORT => return error.ProtocolNotSupported,
.WSAEAFNOSUPPORT => return error.AddressFamilyNotSupported,
.WSAEMFILE => return error.ProcessFdQuotaExceeded,
.WSAENOBUFS => return error.SystemResources,
.WSAEPROTONOSUPPORT => return error.ProtocolNotSupported,
else => |err| return unexpectedWSAError(err),
}
}
@ -800,7 +801,7 @@ pub fn GetEnvironmentVariableW(lpName: LPWSTR, lpBuffer: [*]u16, nSize: DWORD) G
const rc = kernel32.GetEnvironmentVariableW(lpName, lpBuffer, nSize);
if (rc == 0) {
switch (kernel32.GetLastError()) {
ERROR.ENVVAR_NOT_FOUND => return error.EnvironmentVariableNotFound,
.ENVVAR_NOT_FOUND => return error.EnvironmentVariableNotFound,
else => |err| return unexpectedError(err),
}
}
@ -839,11 +840,11 @@ pub fn CreateProcessW(
lpProcessInformation,
) == 0) {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.ACCESS_DENIED => return error.AccessDenied,
ERROR.INVALID_PARAMETER => unreachable,
ERROR.INVALID_NAME => return error.InvalidName,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.ACCESS_DENIED => return error.AccessDenied,
.INVALID_PARAMETER => unreachable,
.INVALID_NAME => return error.InvalidName,
else => |err| return unexpectedError(err),
}
}
@ -857,9 +858,9 @@ pub const LoadLibraryError = error{
pub fn LoadLibraryW(lpLibFileName: [*:0]const u16) LoadLibraryError!HMODULE {
return kernel32.LoadLibraryW(lpLibFileName) orelse {
switch (kernel32.GetLastError()) {
ERROR.FILE_NOT_FOUND => return error.FileNotFound,
ERROR.PATH_NOT_FOUND => return error.FileNotFound,
ERROR.MOD_NOT_FOUND => return error.FileNotFound,
.FILE_NOT_FOUND => return error.FileNotFound,
.PATH_NOT_FOUND => return error.FileNotFound,
.MOD_NOT_FOUND => return error.FileNotFound,
else => |err| return unexpectedError(err),
}
};
@ -1036,28 +1037,28 @@ inline fn MAKELANGID(p: c_ushort, s: c_ushort) LANGID {
/// Call this when you made a windows DLL call or something that does SetLastError
/// and you get an unexpected error.
pub fn unexpectedError(err: DWORD) std.os.UnexpectedError {
pub fn unexpectedError(err: Win32Error) std.os.UnexpectedError {
if (std.os.unexpected_error_tracing) {
// 614 is the length of the longest windows error desciption
var buf_u16: [614]u16 = undefined;
var buf_u8: [614]u8 = undefined;
var len = kernel32.FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, null, err, MAKELANGID(LANG.NEUTRAL, SUBLANG.DEFAULT), buf_u16[0..].ptr, buf_u16.len / @sizeOf(TCHAR), null);
_ = std.unicode.utf16leToUtf8(&buf_u8, buf_u16[0..len]) catch unreachable;
std.debug.warn("error.Unexpected: GetLastError({}): {}\n", .{ err, buf_u8[0..len] });
std.debug.warn("error.Unexpected: GetLastError({}): {}\n", .{ @enumToInt(err), buf_u8[0..len] });
std.debug.dumpCurrentStackTrace(null);
}
return error.Unexpected;
}
pub fn unexpectedWSAError(err: c_int) std.os.UnexpectedError {
return unexpectedError(@intCast(DWORD, err));
pub fn unexpectedWSAError(err: WinsockError) std.os.UnexpectedError {
return unexpectedError(@intToEnum(Win32Error, @enumToInt(err)));
}
/// Call this when you made a windows NtDll call
/// and you get an unexpected status.
pub fn unexpectedStatus(status: NTSTATUS) std.os.UnexpectedError {
if (std.os.unexpected_error_tracing) {
std.debug.warn("error.Unexpected NTSTATUS=0x{x}\n", .{status});
std.debug.warn("error.Unexpected NTSTATUS=0x{x}\n", .{@enumToInt(status)});
std.debug.dumpCurrentStackTrace(null);
}
return error.Unexpected;

View File

@ -5,8 +5,8 @@ const std = @import("../../std.zig");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
pub const ERROR = @import("error.zig");
pub const STATUS = @import("status.zig");
pub usingnamespace @import("win32error.zig");
pub usingnamespace @import("ntstatus.zig");
pub const LANG = @import("lang.zig");
pub const SUBLANG = @import("sublang.zig");
@ -23,7 +23,6 @@ pub const BOOL = c_int;
pub const BOOLEAN = BYTE;
pub const BYTE = u8;
pub const CHAR = u8;
pub const DWORD = u32;
pub const FLOAT = f32;
pub const HANDLE = *c_void;
pub const HCRYPTPROV = ULONG_PTR;
@ -52,6 +51,8 @@ pub const DWORD_PTR = ULONG_PTR;
pub const UNICODE = false;
pub const WCHAR = u16;
pub const WORD = u16;
pub const DWORD = u32;
pub const DWORD64 = u64;
pub const LARGE_INTEGER = i64;
pub const USHORT = u16;
pub const SHORT = i16;
@ -61,7 +62,6 @@ pub const ULONGLONG = u64;
pub const LONGLONG = i64;
pub const HLOCAL = HANDLE;
pub const LANGID = c_ushort;
pub const NTSTATUS = ULONG;
pub const va_list = *@OpaqueType();
@ -887,9 +887,236 @@ pub const EXCEPTION_RECORD = extern struct {
ExceptionInformation: [15]usize,
};
pub usingnamespace switch (builtin.arch) {
.i386 => struct {
pub const FLOATING_SAVE_AREA = extern struct {
ControlWord: DWORD,
StatusWord: DWORD,
TagWord: DWORD,
ErrorOffset: DWORD,
ErrorSelector: DWORD,
DataOffset: DWORD,
DataSelector: DWORD,
RegisterArea: [80]BYTE,
Cr0NpxState: DWORD,
};
pub const CONTEXT = extern struct {
ContextFlags: DWORD,
Dr0: DWORD,
Dr1: DWORD,
Dr2: DWORD,
Dr3: DWORD,
Dr6: DWORD,
Dr7: DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: DWORD,
SegFs: DWORD,
SegEs: DWORD,
SegDs: DWORD,
Edi: DWORD,
Esi: DWORD,
Ebx: DWORD,
Edx: DWORD,
Ecx: DWORD,
Eax: DWORD,
Ebp: DWORD,
Eip: DWORD,
SegCs: DWORD,
EFlags: DWORD,
Esp: DWORD,
SegSs: DWORD,
ExtendedRegisters: [512]BYTE,
pub fn getRegs(ctx: *const CONTEXT) struct { bp: usize, ip: usize } {
return .{ .bp = ctx.Ebp, .ip = ctx.Eip };
}
};
pub const PCONTEXT = *CONTEXT;
},
.x86_64 => struct {
pub const M128A = extern struct {
Low: ULONGLONG,
High: LONGLONG,
};
pub const XMM_SAVE_AREA32 = extern struct {
ControlWord: WORD,
StatusWord: WORD,
TagWord: BYTE,
Reserved1: BYTE,
ErrorOpcode: WORD,
ErrorOffset: DWORD,
ErrorSelector: WORD,
Reserved2: WORD,
DataOffset: DWORD,
DataSelector: WORD,
Reserved3: WORD,
MxCsr: DWORD,
MxCsr_Mask: DWORD,
FloatRegisters: [8]M128A,
XmmRegisters: [16]M128A,
Reserved4: [96]BYTE,
};
pub const CONTEXT = extern struct {
P1Home: DWORD64,
P2Home: DWORD64,
P3Home: DWORD64,
P4Home: DWORD64,
P5Home: DWORD64,
P6Home: DWORD64,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORD64,
Dr1: DWORD64,
Dr2: DWORD64,
Dr3: DWORD64,
Dr6: DWORD64,
Dr7: DWORD64,
Rax: DWORD64,
Rcx: DWORD64,
Rdx: DWORD64,
Rbx: DWORD64,
Rsp: DWORD64,
Rbp: DWORD64,
Rsi: DWORD64,
Rdi: DWORD64,
R8: DWORD64,
R9: DWORD64,
R10: DWORD64,
R11: DWORD64,
R12: DWORD64,
R13: DWORD64,
R14: DWORD64,
R15: DWORD64,
Rip: DWORD64,
DUMMYUNIONNAME: extern union {
FltSave: XMM_SAVE_AREA32,
FloatSave: XMM_SAVE_AREA32,
DUMMYSTRUCTNAME: extern struct {
Header: [2]M128A,
Legacy: [8]M128A,
Xmm0: M128A,
Xmm1: M128A,
Xmm2: M128A,
Xmm3: M128A,
Xmm4: M128A,
Xmm5: M128A,
Xmm6: M128A,
Xmm7: M128A,
Xmm8: M128A,
Xmm9: M128A,
Xmm10: M128A,
Xmm11: M128A,
Xmm12: M128A,
Xmm13: M128A,
Xmm14: M128A,
Xmm15: M128A,
},
},
VectorRegister: [26]M128A,
VectorControl: DWORD64,
DebugControl: DWORD64,
LastBranchToRip: DWORD64,
LastBranchFromRip: DWORD64,
LastExceptionToRip: DWORD64,
LastExceptionFromRip: DWORD64,
pub fn getRegs(ctx: *const CONTEXT) struct { bp: usize, ip: usize } {
return .{ .bp = ctx.Rbp, .ip = ctx.Rip };
}
};
pub const PCONTEXT = *CONTEXT;
},
.aarch64 => struct {
pub const NEON128 = extern union {
DUMMYSTRUCTNAME: extern struct {
Low: ULONGLONG,
High: LONGLONG,
},
D: [2]f64,
S: [4]f32,
H: [8]WORD,
B: [16]BYTE,
};
pub const CONTEXT = extern struct {
ContextFlags: ULONG,
Cpsr: ULONG,
DUMMYUNIONNAME: extern union {
DUMMYSTRUCTNAME: extern struct {
X0: DWORD64,
X1: DWORD64,
X2: DWORD64,
X3: DWORD64,
X4: DWORD64,
X5: DWORD64,
X6: DWORD64,
X7: DWORD64,
X8: DWORD64,
X9: DWORD64,
X10: DWORD64,
X11: DWORD64,
X12: DWORD64,
X13: DWORD64,
X14: DWORD64,
X15: DWORD64,
X16: DWORD64,
X17: DWORD64,
X18: DWORD64,
X19: DWORD64,
X20: DWORD64,
X21: DWORD64,
X22: DWORD64,
X23: DWORD64,
X24: DWORD64,
X25: DWORD64,
X26: DWORD64,
X27: DWORD64,
X28: DWORD64,
Fp: DWORD64,
Lr: DWORD64,
},
X: [31]DWORD64,
},
Sp: DWORD64,
Pc: DWORD64,
V: [32]NEON128,
Fpcr: DWORD,
Fpsr: DWORD,
Bcr: [8]DWORD,
Bvr: [8]DWORD64,
Wcr: [2]DWORD,
Wvr: [2]DWORD64,
pub fn getRegs(ctx: *const CONTEXT) struct { bp: usize, ip: usize } {
return .{
.bp = ctx.DUMMYUNIONNAME.DUMMYSTRUCTNAME.Fp,
.ip = ctx.Pc,
};
}
};
pub const PCONTEXT = *CONTEXT;
},
else => struct {
pub const PCONTEXT = *c_void;
},
};
pub const EXCEPTION_POINTERS = extern struct {
ExceptionRecord: *EXCEPTION_RECORD,
ContextRecord: *c_void,
ContextRecord: PCONTEXT,
};
pub const VECTORED_EXCEPTION_HANDLER = fn (ExceptionInfo: *EXCEPTION_POINTERS) callconv(.Stdcall) c_long;
@ -1012,3 +1239,85 @@ pub const CURDIR = extern struct {
};
pub const DUPLICATE_SAME_ACCESS = 2;
pub const MODULEINFO = extern struct {
lpBaseOfDll: LPVOID,
SizeOfImage: DWORD,
EntryPoint: LPVOID,
};
pub const LPMODULEINFO = *MODULEINFO;
pub const PSAPI_WS_WATCH_INFORMATION = extern struct {
FaultingPc: LPVOID,
FaultingVa: LPVOID,
};
pub const PPSAPI_WS_WATCH_INFORMATION = *PSAPI_WS_WATCH_INFORMATION;
pub const PROCESS_MEMORY_COUNTERS = extern struct {
cb: DWORD,
PageFaultCount: DWORD,
PeakWorkingSetSize: SIZE_T,
WorkingSetSize: SIZE_T,
QuotaPeakPagedPoolUsage: SIZE_T,
QuotaPagedPoolUsage: SIZE_T,
QuotaPeakNonPagedPoolUsage: SIZE_T,
QuotaNonPagedPoolUsage: SIZE_T,
PagefileUsage: SIZE_T,
PeakPagefileUsage: SIZE_T,
};
pub const PPROCESS_MEMORY_COUNTERS = *PROCESS_MEMORY_COUNTERS;
pub const PROCESS_MEMORY_COUNTERS_EX = extern struct {
cb: DWORD,
PageFaultCount: DWORD,
PeakWorkingSetSize: SIZE_T,
WorkingSetSize: SIZE_T,
QuotaPeakPagedPoolUsage: SIZE_T,
QuotaPagedPoolUsage: SIZE_T,
QuotaPeakNonPagedPoolUsage: SIZE_T,
QuotaNonPagedPoolUsage: SIZE_T,
PagefileUsage: SIZE_T,
PeakPagefileUsage: SIZE_T,
PrivateUsage: SIZE_T,
};
pub const PPROCESS_MEMORY_COUNTERS_EX = *PROCESS_MEMORY_COUNTERS_EX;
pub const PERFORMANCE_INFORMATION = extern struct {
cb: DWORD,
CommitTotal: SIZE_T,
CommitLimit: SIZE_T,
CommitPeak: SIZE_T,
PhysicalTotal: SIZE_T,
PhysicalAvailable: SIZE_T,
SystemCache: SIZE_T,
KernelTotal: SIZE_T,
KernelPaged: SIZE_T,
KernelNonpaged: SIZE_T,
PageSize: SIZE_T,
HandleCount: DWORD,
ProcessCount: DWORD,
ThreadCount: DWORD,
};
pub const PPERFORMANCE_INFORMATION = *PERFORMANCE_INFORMATION;
pub const PERFORMACE_INFORMATION = PERFORMANCE_INFORMATION;
pub const PPERFORMACE_INFORMATION = *PERFORMANCE_INFORMATION;
pub const ENUM_PAGE_FILE_INFORMATION = extern struct {
cb: DWORD,
Reserved: DWORD,
TotalSize: SIZE_T,
TotalInUse: SIZE_T,
PeakUsage: SIZE_T,
};
pub const PENUM_PAGE_FILE_INFORMATION = *ENUM_PAGE_FILE_INFORMATION;
pub const PENUM_PAGE_FILE_CALLBACKW = ?fn (?LPVOID, PENUM_PAGE_FILE_INFORMATION, LPCWSTR) callconv(.C) BOOL;
pub const PENUM_PAGE_FILE_CALLBACKA = ?fn (?LPVOID, PENUM_PAGE_FILE_INFORMATION, LPCSTR) callconv(.C) BOOL;
pub const PSAPI_WS_WATCH_INFORMATION_EX = extern struct {
BasicInfo: PSAPI_WS_WATCH_INFORMATION,
FaultingThreadId: ULONG_PTR,
Flags: ULONG_PTR,
};
pub const PPSAPI_WS_WATCH_INFORMATION_EX = *PSAPI_WS_WATCH_INFORMATION_EX;

File diff suppressed because it is too large Load Diff

View File

@ -73,7 +73,7 @@ pub extern "kernel32" fn FindFirstFileW(lpFileName: [*:0]const u16, lpFindFileDa
pub extern "kernel32" fn FindClose(hFindFile: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn FindNextFileW(hFindFile: HANDLE, lpFindFileData: *WIN32_FIND_DATAW) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn FormatMessageW(dwFlags: DWORD, lpSource: ?LPVOID, dwMessageId: DWORD, dwLanguageId: DWORD, lpBuffer: [*]u16, nSize: DWORD, Arguments: ?*va_list) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn FormatMessageW(dwFlags: DWORD, lpSource: ?LPVOID, dwMessageId: Win32Error, dwLanguageId: DWORD, lpBuffer: [*]u16, nSize: DWORD, Arguments: ?*va_list) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn FreeEnvironmentStringsW(penv: [*:0]u16) callconv(.Stdcall) BOOL;
@ -88,6 +88,8 @@ pub extern "kernel32" fn GetCurrentDirectoryW(nBufferLength: DWORD, lpBuffer: ?[
pub extern "kernel32" fn GetCurrentThread() callconv(.Stdcall) HANDLE;
pub extern "kernel32" fn GetCurrentThreadId() callconv(.Stdcall) DWORD;
pub extern "kernel32" fn GetCurrentProcess() callconv(.Stdcall) HANDLE;
pub extern "kernel32" fn GetEnvironmentStringsW() callconv(.Stdcall) ?[*:0]u16;
pub extern "kernel32" fn GetEnvironmentVariableW(lpName: LPWSTR, lpBuffer: [*]u16, nSize: DWORD) callconv(.Stdcall) DWORD;
@ -102,7 +104,7 @@ pub extern "kernel32" fn GetModuleFileNameW(hModule: ?HMODULE, lpFilename: [*]u1
pub extern "kernel32" fn GetModuleHandleW(lpModuleName: ?[*]const WCHAR) callconv(.Stdcall) HMODULE;
pub extern "kernel32" fn GetLastError() callconv(.Stdcall) DWORD;
pub extern "kernel32" fn GetLastError() callconv(.Stdcall) Win32Error;
pub extern "kernel32" fn GetFileInformationByHandle(
hFile: HANDLE,
@ -246,3 +248,31 @@ pub extern "kernel32" fn LeaveCriticalSection(lpCriticalSection: *CRITICAL_SECTI
pub extern "kernel32" fn DeleteCriticalSection(lpCriticalSection: *CRITICAL_SECTION) callconv(.Stdcall) void;
pub extern "kernel32" fn InitOnceExecuteOnce(InitOnce: *INIT_ONCE, InitFn: INIT_ONCE_FN, Parameter: ?*c_void, Context: ?*c_void) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EmptyWorkingSet(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumDeviceDrivers(lpImageBase: [*]LPVOID, cb: DWORD, lpcbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumPageFilesA(pCallBackRoutine: PENUM_PAGE_FILE_CALLBACKA, pContext: LPVOID) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumPageFilesW(pCallBackRoutine: PENUM_PAGE_FILE_CALLBACKW, pContext: LPVOID) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumProcessModules(hProcess: HANDLE, lphModule: [*]HMODULE, cb: DWORD, lpcbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumProcessModulesEx(hProcess: HANDLE, lphModule: [*]HMODULE, cb: DWORD, lpcbNeeded: LPDWORD, dwFilterFlag: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32EnumProcesses(lpidProcess: [*]DWORD, cb: DWORD, cbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32GetDeviceDriverBaseNameA(ImageBase: LPVOID, lpBaseName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetDeviceDriverBaseNameW(ImageBase: LPVOID, lpBaseName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetDeviceDriverFileNameA(ImageBase: LPVOID, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetDeviceDriverFileNameW(ImageBase: LPVOID, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetMappedFileNameA(hProcess: HANDLE, lpv: ?LPVOID, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetMappedFileNameW(hProcess: HANDLE, lpv: ?LPVOID, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetModuleBaseNameA(hProcess: HANDLE, hModule: ?HMODULE, lpBaseName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetModuleBaseNameW(hProcess: HANDLE, hModule: ?HMODULE, lpBaseName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetModuleFileNameExA(hProcess: HANDLE, hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetModuleFileNameExW(hProcess: HANDLE, hModule: ?HMODULE, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetModuleInformation(hProcess: HANDLE, hModule: HMODULE, lpmodinfo: LPMODULEINFO, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32GetPerformanceInfo(pPerformanceInformation: PPERFORMACE_INFORMATION, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32GetProcessImageFileNameA(hProcess: HANDLE, lpImageFileName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetProcessImageFileNameW(hProcess: HANDLE, lpImageFileName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "kernel32" fn K32GetProcessMemoryInfo(Process: HANDLE, ppsmemCounters: PPROCESS_MEMORY_COUNTERS, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32GetWsChanges(hProcess: HANDLE, lpWatchInfo: PPSAPI_WS_WATCH_INFORMATION, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSAPI_WS_WATCH_INFORMATION_EX, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,29 @@
usingnamespace @import("bits.zig");
pub extern "psapi" fn EmptyWorkingSet(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumDeviceDrivers(lpImageBase: [*]LPVOID, cb: DWORD, lpcbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumPageFilesA(pCallBackRoutine: PENUM_PAGE_FILE_CALLBACKA, pContext: LPVOID) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumPageFilesW(pCallBackRoutine: PENUM_PAGE_FILE_CALLBACKW, pContext: LPVOID) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumProcessModules(hProcess: HANDLE, lphModule: [*]HMODULE, cb: DWORD, lpcbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumProcessModulesEx(hProcess: HANDLE, lphModule: [*]HMODULE, cb: DWORD, lpcbNeeded: LPDWORD, dwFilterFlag: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn EnumProcesses(lpidProcess: [*]DWORD, cb: DWORD, cbNeeded: LPDWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn GetDeviceDriverBaseNameA(ImageBase: LPVOID, lpBaseName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetDeviceDriverBaseNameW(ImageBase: LPVOID, lpBaseName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetDeviceDriverFileNameA(ImageBase: LPVOID, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetDeviceDriverFileNameW(ImageBase: LPVOID, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetMappedFileNameA(hProcess: HANDLE, lpv: ?LPVOID, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetMappedFileNameW(hProcess: HANDLE, lpv: ?LPVOID, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetModuleBaseNameA(hProcess: HANDLE, hModule: ?HMODULE, lpBaseName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetModuleBaseNameW(hProcess: HANDLE, hModule: ?HMODULE, lpBaseName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetModuleFileNameExA(hProcess: HANDLE, hModule: ?HMODULE, lpFilename: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetModuleFileNameExW(hProcess: HANDLE, hModule: ?HMODULE, lpFilename: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetModuleInformation(hProcess: HANDLE, hModule: HMODULE, lpmodinfo: LPMODULEINFO, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn GetPerformanceInfo(pPerformanceInformation: PPERFORMACE_INFORMATION, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn GetProcessImageFileNameA(hProcess: HANDLE, lpImageFileName: LPSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetProcessImageFileNameW(hProcess: HANDLE, lpImageFileName: LPWSTR, nSize: DWORD) callconv(.Stdcall) DWORD;
pub extern "psapi" fn GetProcessMemoryInfo(Process: HANDLE, ppsmemCounters: PPROCESS_MEMORY_COUNTERS, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn GetWsChanges(hProcess: HANDLE, lpWatchInfo: PPSAPI_WS_WATCH_INFORMATION, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSAPI_WS_WATCH_INFORMATION_EX, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "psapi" fn QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "psapi" fn QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -205,101 +205,454 @@ pub const WSAMSG = extern struct {
dwFlags: DWORD,
};
pub const WSA_INVALID_HANDLE = 6;
pub const WSA_NOT_ENOUGH_MEMORY = 8;
pub const WSA_INVALID_PARAMETER = 87;
pub const WSA_OPERATION_ABORTED = 995;
pub const WSA_IO_INCOMPLETE = 996;
pub const WSA_IO_PENDING = 997;
pub const WSAEINTR = 10004;
pub const WSAEBADF = 10009;
pub const WSAEACCES = 10013;
pub const WSAEFAULT = 10014;
pub const WSAEINVAL = 10022;
pub const WSAEMFILE = 10024;
pub const WSAEWOULDBLOCK = 10035;
pub const WSAEINPROGRESS = 10036;
pub const WSAEALREADY = 10037;
pub const WSAENOTSOCK = 10038;
pub const WSAEDESTADDRREQ = 10039;
pub const WSAEMSGSIZE = 10040;
pub const WSAEPROTOTYPE = 10041;
pub const WSAENOPROTOOPT = 10042;
pub const WSAEPROTONOSUPPORT = 10043;
pub const WSAESOCKTNOSUPPORT = 10044;
pub const WSAEOPNOTSUPP = 10045;
pub const WSAEPFNOSUPPORT = 10046;
pub const WSAEAFNOSUPPORT = 10047;
pub const WSAEADDRINUSE = 10048;
pub const WSAEADDRNOTAVAIL = 10049;
pub const WSAENETDOWN = 10050;
pub const WSAENETUNREACH = 10051;
pub const WSAENETRESET = 10052;
pub const WSAECONNABORTED = 10053;
pub const WSAECONNRESET = 10054;
pub const WSAENOBUFS = 10055;
pub const WSAEISCONN = 10056;
pub const WSAENOTCONN = 10057;
pub const WSAESHUTDOWN = 10058;
pub const WSAETOOMANYREFS = 10059;
pub const WSAETIMEDOUT = 10060;
pub const WSAECONNREFUSED = 10061;
pub const WSAELOOP = 10062;
pub const WSAENAMETOOLONG = 10063;
pub const WSAEHOSTDOWN = 10064;
pub const WSAEHOSTUNREACH = 10065;
pub const WSAENOTEMPTY = 10066;
pub const WSAEPROCLIM = 10067;
pub const WSAEUSERS = 10068;
pub const WSAEDQUOT = 10069;
pub const WSAESTALE = 10070;
pub const WSAEREMOTE = 10071;
pub const WSASYSNOTREADY = 10091;
pub const WSAVERNOTSUPPORTED = 10092;
pub const WSANOTINITIALISED = 10093;
pub const WSAEDISCON = 10101;
pub const WSAENOMORE = 10102;
pub const WSAECANCELLED = 10103;
pub const WSAEINVALIDPROCTABLE = 10104;
pub const WSAEINVALIDPROVIDER = 10105;
pub const WSAEPROVIDERFAILEDINIT = 10106;
pub const WSASYSCALLFAILURE = 10107;
pub const WSASERVICE_NOT_FOUND = 10108;
pub const WSATYPE_NOT_FOUND = 10109;
pub const WSA_E_NO_MORE = 10110;
pub const WSA_E_CANCELLED = 10111;
pub const WSAEREFUSED = 10112;
pub const WSAHOST_NOT_FOUND = 11001;
pub const WSATRY_AGAIN = 11002;
pub const WSANO_RECOVERY = 11003;
pub const WSANO_DATA = 11004;
pub const WSA_QOS_RECEIVERS = 11005;
pub const WSA_QOS_SENDERS = 11006;
pub const WSA_QOS_NO_SENDERS = 11007;
pub const WSA_QOS_NO_RECEIVERS = 11008;
pub const WSA_QOS_REQUEST_CONFIRMED = 11009;
pub const WSA_QOS_ADMISSION_FAILURE = 11010;
pub const WSA_QOS_POLICY_FAILURE = 11011;
pub const WSA_QOS_BAD_STYLE = 11012;
pub const WSA_QOS_BAD_OBJECT = 11013;
pub const WSA_QOS_TRAFFIC_CTRL_ERROR = 11014;
pub const WSA_QOS_GENERIC_ERROR = 11015;
pub const WSA_QOS_ESERVICETYPE = 11016;
pub const WSA_QOS_EFLOWSPEC = 11017;
pub const WSA_QOS_EPROVSPECBUF = 11018;
pub const WSA_QOS_EFILTERSTYLE = 11019;
pub const WSA_QOS_EFILTERTYPE = 11020;
pub const WSA_QOS_EFILTERCOUNT = 11021;
pub const WSA_QOS_EOBJLENGTH = 11022;
pub const WSA_QOS_EFLOWCOUNT = 11023;
pub const WSA_QOS_EUNKOWNPSOBJ = 11024;
pub const WSA_QOS_EPOLICYOBJ = 11025;
pub const WSA_QOS_EFLOWDESC = 11026;
pub const WSA_QOS_EPSFLOWSPEC = 11027;
pub const WSA_QOS_EPSFILTERSPEC = 11028;
pub const WSA_QOS_ESDMODEOBJ = 11029;
pub const WSA_QOS_ESHAPERATEOBJ = 11030;
pub const WSA_QOS_RESERVED_PETYPE = 11031;
// https://docs.microsoft.com/en-au/windows/win32/winsock/windows-sockets-error-codes-2
pub const WinsockError = extern enum(u16) {
/// Specified event object handle is invalid.
/// An application attempts to use an event object, but the specified handle is not valid.
WSA_INVALID_HANDLE = 6,
/// Insufficient memory available.
/// An application used a Windows Sockets function that directly maps to a Windows function.
/// The Windows function is indicating a lack of required memory resources.
WSA_NOT_ENOUGH_MEMORY = 8,
/// One or more parameters are invalid.
/// An application used a Windows Sockets function which directly maps to a Windows function.
/// The Windows function is indicating a problem with one or more parameters.
WSA_INVALID_PARAMETER = 87,
/// Overlapped operation aborted.
/// An overlapped operation was canceled due to the closure of the socket, or the execution of the SIO_FLUSH command in WSAIoctl.
WSA_OPERATION_ABORTED = 995,
/// Overlapped I/O event object not in signaled state.
/// The application has tried to determine the status of an overlapped operation which is not yet completed.
/// Applications that use WSAGetOverlappedResult (with the fWait flag set to FALSE) in a polling mode to determine when an overlapped operation has completed, get this error code until the operation is complete.
WSA_IO_INCOMPLETE = 996,
/// The application has initiated an overlapped operation that cannot be completed immediately.
/// A completion indication will be given later when the operation has been completed.
WSA_IO_PENDING = 997,
/// Interrupted function call.
/// A blocking operation was interrupted by a call to WSACancelBlockingCall.
WSAEINTR = 10004,
/// File handle is not valid.
/// The file handle supplied is not valid.
WSAEBADF = 10009,
/// Permission denied.
/// An attempt was made to access a socket in a way forbidden by its access permissions.
/// An example is using a broadcast address for sendto without broadcast permission being set using setsockopt(SO_BROADCAST).
/// Another possible reason for the WSAEACCES error is that when the bind function is called (on Windows NT 4.0 with SP4 and later), another application, service, or kernel mode driver is bound to the same address with exclusive access.
/// Such exclusive access is a new feature of Windows NT 4.0 with SP4 and later, and is implemented by using the SO_EXCLUSIVEADDRUSE option.
WSAEACCES = 10013,
/// Bad address.
/// The system detected an invalid pointer address in attempting to use a pointer argument of a call.
/// This error occurs if an application passes an invalid pointer value, or if the length of the buffer is too small.
/// For instance, if the length of an argument, which is a sockaddr structure, is smaller than the sizeof(sockaddr).
WSAEFAULT = 10014,
/// Invalid argument.
/// Some invalid argument was supplied (for example, specifying an invalid level to the setsockopt function).
/// In some instances, it also refers to the current state of the socketfor instance, calling accept on a socket that is not listening.
WSAEINVAL = 10022,
/// Too many open files.
/// Too many open sockets. Each implementation may have a maximum number of socket handles available, either globally, per process, or per thread.
WSAEMFILE = 10024,
/// Resource temporarily unavailable.
/// This error is returned from operations on nonblocking sockets that cannot be completed immediately, for example recv when no data is queued to be read from the socket.
/// It is a nonfatal error, and the operation should be retried later.
/// It is normal for WSAEWOULDBLOCK to be reported as the result from calling connect on a nonblocking SOCK_STREAM socket, since some time must elapse for the connection to be established.
WSAEWOULDBLOCK = 10035,
/// Operation now in progress.
/// A blocking operation is currently executing.
/// Windows Sockets only allows a single blocking operationper- task or threadto be outstanding, and if any other function call is made (whether or not it references that or any other socket) the function fails with the WSAEINPROGRESS error.
WSAEINPROGRESS = 10036,
/// Operation already in progress.
/// An operation was attempted on a nonblocking socket with an operation already in progressthat is, calling connect a second time on a nonblocking socket that is already connecting, or canceling an asynchronous request (WSAAsyncGetXbyY) that has already been canceled or completed.
WSAEALREADY = 10037,
/// Socket operation on nonsocket.
/// An operation was attempted on something that is not a socket.
/// Either the socket handle parameter did not reference a valid socket, or for select, a member of an fd_set was not valid.
WSAENOTSOCK = 10038,
/// Destination address required.
/// A required address was omitted from an operation on a socket.
/// For example, this error is returned if sendto is called with the remote address of ADDR_ANY.
WSAEDESTADDRREQ = 10039,
/// Message too long.
/// A message sent on a datagram socket was larger than the internal message buffer or some other network limit, or the buffer used to receive a datagram was smaller than the datagram itself.
WSAEMSGSIZE = 10040,
/// Protocol wrong type for socket.
/// A protocol was specified in the socket function call that does not support the semantics of the socket type requested.
/// For example, the ARPA Internet UDP protocol cannot be specified with a socket type of SOCK_STREAM.
WSAEPROTOTYPE = 10041,
/// Bad protocol option.
/// An unknown, invalid or unsupported option or level was specified in a getsockopt or setsockopt call.
WSAENOPROTOOPT = 10042,
/// Protocol not supported.
/// The requested protocol has not been configured into the system, or no implementation for it exists.
/// For example, a socket call requests a SOCK_DGRAM socket, but specifies a stream protocol.
WSAEPROTONOSUPPORT = 10043,
/// Socket type not supported.
/// The support for the specified socket type does not exist in this address family.
/// For example, the optional type SOCK_RAW might be selected in a socket call, and the implementation does not support SOCK_RAW sockets at all.
WSAESOCKTNOSUPPORT = 10044,
/// Operation not supported.
/// The attempted operation is not supported for the type of object referenced.
/// Usually this occurs when a socket descriptor to a socket that cannot support this operation is trying to accept a connection on a datagram socket.
WSAEOPNOTSUPP = 10045,
/// Protocol family not supported.
/// The protocol family has not been configured into the system or no implementation for it exists.
/// This message has a slightly different meaning from WSAEAFNOSUPPORT.
/// However, it is interchangeable in most cases, and all Windows Sockets functions that return one of these messages also specify WSAEAFNOSUPPORT.
WSAEPFNOSUPPORT = 10046,
/// Address family not supported by protocol family.
/// An address incompatible with the requested protocol was used.
/// All sockets are created with an associated address family (that is, AF_INET for Internet Protocols) and a generic protocol type (that is, SOCK_STREAM).
/// This error is returned if an incorrect protocol is explicitly requested in the socket call, or if an address of the wrong family is used for a socket, for example, in sendto.
WSAEAFNOSUPPORT = 10047,
/// Address already in use.
/// Typically, only one usage of each socket address (protocol/IP address/port) is permitted.
/// This error occurs if an application attempts to bind a socket to an IP address/port that has already been used for an existing socket, or a socket that was not closed properly, or one that is still in the process of closing.
/// For server applications that need to bind multiple sockets to the same port number, consider using setsockopt (SO_REUSEADDR).
/// Client applications usually need not call bind at allconnect chooses an unused port automatically.
/// When bind is called with a wildcard address (involving ADDR_ANY), a WSAEADDRINUSE error could be delayed until the specific address is committed.
/// This could happen with a call to another function later, including connect, listen, WSAConnect, or WSAJoinLeaf.
WSAEADDRINUSE = 10048,
/// Cannot assign requested address.
/// The requested address is not valid in its context.
/// This normally results from an attempt to bind to an address that is not valid for the local computer.
/// This can also result from connect, sendto, WSAConnect, WSAJoinLeaf, or WSASendTo when the remote address or port is not valid for a remote computer (for example, address or port 0).
WSAEADDRNOTAVAIL = 10049,
/// Network is down.
/// A socket operation encountered a dead network.
/// This could indicate a serious failure of the network system (that is, the protocol stack that the Windows Sockets DLL runs over), the network interface, or the local network itself.
WSAENETDOWN = 10050,
/// Network is unreachable.
/// A socket operation was attempted to an unreachable network.
/// This usually means the local software knows no route to reach the remote host.
WSAENETUNREACH = 10051,
/// Network dropped connection on reset.
/// The connection has been broken due to keep-alive activity detecting a failure while the operation was in progress.
/// It can also be returned by setsockopt if an attempt is made to set SO_KEEPALIVE on a connection that has already failed.
WSAENETRESET = 10052,
/// Software caused connection abort.
/// An established connection was aborted by the software in your host computer, possibly due to a data transmission time-out or protocol error.
WSAECONNABORTED = 10053,
/// Connection reset by peer.
/// An existing connection was forcibly closed by the remote host.
/// This normally results if the peer application on the remote host is suddenly stopped, the host is rebooted, the host or remote network interface is disabled, or the remote host uses a hard close (see setsockopt for more information on the SO_LINGER option on the remote socket).
/// This error may also result if a connection was broken due to keep-alive activity detecting a failure while one or more operations are in progress.
/// Operations that were in progress fail with WSAENETRESET. Subsequent operations fail with WSAECONNRESET.
WSAECONNRESET = 10054,
/// No buffer space available.
/// An operation on a socket could not be performed because the system lacked sufficient buffer space or because a queue was full.
WSAENOBUFS = 10055,
/// Socket is already connected.
/// A connect request was made on an already-connected socket.
/// Some implementations also return this error if sendto is called on a connected SOCK_DGRAM socket (for SOCK_STREAM sockets, the to parameter in sendto is ignored) although other implementations treat this as a legal occurrence.
WSAEISCONN = 10056,
/// Socket is not connected.
/// A request to send or receive data was disallowed because the socket is not connected and (when sending on a datagram socket using sendto) no address was supplied.
/// Any other type of operation might also return this errorfor example, setsockopt setting SO_KEEPALIVE if the connection has been reset.
WSAENOTCONN = 10057,
/// Cannot send after socket shutdown.
/// A request to send or receive data was disallowed because the socket had already been shut down in that direction with a previous shutdown call.
/// By calling shutdown a partial close of a socket is requested, which is a signal that sending or receiving, or both have been discontinued.
WSAESHUTDOWN = 10058,
/// Too many references.
/// Too many references to some kernel object.
WSAETOOMANYREFS = 10059,
/// Connection timed out.
/// A connection attempt failed because the connected party did not properly respond after a period of time, or the established connection failed because the connected host has failed to respond.
WSAETIMEDOUT = 10060,
/// Connection refused.
/// No connection could be made because the target computer actively refused it.
/// This usually results from trying to connect to a service that is inactive on the foreign hostthat is, one with no server application running.
WSAECONNREFUSED = 10061,
/// Cannot translate name.
/// Cannot translate a name.
WSAELOOP = 10062,
/// Name too long.
/// A name component or a name was too long.
WSAENAMETOOLONG = 10063,
/// Host is down.
/// A socket operation failed because the destination host is down. A socket operation encountered a dead host.
/// Networking activity on the local host has not been initiated.
/// These conditions are more likely to be indicated by the error WSAETIMEDOUT.
WSAEHOSTDOWN = 10064,
/// No route to host.
/// A socket operation was attempted to an unreachable host. See WSAENETUNREACH.
WSAEHOSTUNREACH = 10065,
/// Directory not empty.
/// Cannot remove a directory that is not empty.
WSAENOTEMPTY = 10066,
/// Too many processes.
/// A Windows Sockets implementation may have a limit on the number of applications that can use it simultaneously.
/// WSAStartup may fail with this error if the limit has been reached.
WSAEPROCLIM = 10067,
/// User quota exceeded.
/// Ran out of user quota.
WSAEUSERS = 10068,
/// Disk quota exceeded.
/// Ran out of disk quota.
WSAEDQUOT = 10069,
/// Stale file handle reference.
/// The file handle reference is no longer available.
WSAESTALE = 10070,
/// Item is remote.
/// The item is not available locally.
WSAEREMOTE = 10071,
/// Network subsystem is unavailable.
/// This error is returned by WSAStartup if the Windows Sockets implementation cannot function at this time because the underlying system it uses to provide network services is currently unavailable.
/// Users should check:
/// - That the appropriate Windows Sockets DLL file is in the current path.
/// - That they are not trying to use more than one Windows Sockets implementation simultaneously.
/// - If there is more than one Winsock DLL on your system, be sure the first one in the path is appropriate for the network subsystem currently loaded.
/// - The Windows Sockets implementation documentation to be sure all necessary components are currently installed and configured correctly.
WSASYSNOTREADY = 10091,
/// Winsock.dll version out of range.
/// The current Windows Sockets implementation does not support the Windows Sockets specification version requested by the application.
/// Check that no old Windows Sockets DLL files are being accessed.
WSAVERNOTSUPPORTED = 10092,
/// Successful WSAStartup not yet performed.
/// Either the application has not called WSAStartup or WSAStartup failed.
/// The application may be accessing a socket that the current active task does not own (that is, trying to share a socket between tasks), or WSACleanup has been called too many times.
WSANOTINITIALISED = 10093,
/// Graceful shutdown in progress.
/// Returned by WSARecv and WSARecvFrom to indicate that the remote party has initiated a graceful shutdown sequence.
WSAEDISCON = 10101,
/// No more results.
/// No more results can be returned by the WSALookupServiceNext function.
WSAENOMORE = 10102,
/// Call has been canceled.
/// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled.
WSAECANCELLED = 10103,
/// Procedure call table is invalid.
/// The service provider procedure call table is invalid.
/// A service provider returned a bogus procedure table to Ws2_32.dll.
/// This is usually caused by one or more of the function pointers being NULL.
WSAEINVALIDPROCTABLE = 10104,
/// Service provider is invalid.
/// The requested service provider is invalid.
/// This error is returned by the WSCGetProviderInfo and WSCGetProviderInfo32 functions if the protocol entry specified could not be found.
/// This error is also returned if the service provider returned a version number other than 2.0.
WSAEINVALIDPROVIDER = 10105,
/// Service provider failed to initialize.
/// The requested service provider could not be loaded or initialized.
/// This error is returned if either a service provider's DLL could not be loaded (LoadLibrary failed) or the provider's WSPStartup or NSPStartup function failed.
WSAEPROVIDERFAILEDINIT = 10106,
/// System call failure.
/// A system call that should never fail has failed.
/// This is a generic error code, returned under various conditions.
/// Returned when a system call that should never fail does fail.
/// For example, if a call to WaitForMultipleEvents fails or one of the registry functions fails trying to manipulate the protocol/namespace catalogs.
/// Returned when a provider does not return SUCCESS and does not provide an extended error code.
/// Can indicate a service provider implementation error.
WSASYSCALLFAILURE = 10107,
/// Service not found.
/// No such service is known. The service cannot be found in the specified name space.
WSASERVICE_NOT_FOUND = 10108,
/// Class type not found.
/// The specified class was not found.
WSATYPE_NOT_FOUND = 10109,
/// No more results.
/// No more results can be returned by the WSALookupServiceNext function.
WSA_E_NO_MORE = 10110,
/// Call was canceled.
/// A call to the WSALookupServiceEnd function was made while this call was still processing. The call has been canceled.
WSA_E_CANCELLED = 10111,
/// Database query was refused.
/// A database query failed because it was actively refused.
WSAEREFUSED = 10112,
/// Host not found.
/// No such host is known. The name is not an official host name or alias, or it cannot be found in the database(s) being queried.
/// This error may also be returned for protocol and service queries, and means that the specified name could not be found in the relevant database.
WSAHOST_NOT_FOUND = 11001,
/// Nonauthoritative host not found.
/// This is usually a temporary error during host name resolution and means that the local server did not receive a response from an authoritative server. A retry at some time later may be successful.
WSATRY_AGAIN = 11002,
/// This is a nonrecoverable error.
/// This indicates that some sort of nonrecoverable error occurred during a database lookup.
/// This may be because the database files (for example, BSD-compatible HOSTS, SERVICES, or PROTOCOLS files) could not be found, or a DNS request was returned by the server with a severe error.
WSANO_RECOVERY = 11003,
/// Valid name, no data record of requested type.
/// The requested name is valid and was found in the database, but it does not have the correct associated data being resolved for.
/// The usual example for this is a host name-to-address translation attempt (using gethostbyname or WSAAsyncGetHostByName) which uses the DNS (Domain Name Server).
/// An MX record is returned but no A recordindicating the host itself exists, but is not directly reachable.
WSANO_DATA = 11004,
/// QoS receivers.
/// At least one QoS reserve has arrived.
WSA_QOS_RECEIVERS = 11005,
/// QoS senders.
/// At least one QoS send path has arrived.
WSA_QOS_SENDERS = 11006,
/// No QoS senders.
/// There are no QoS senders.
WSA_QOS_NO_SENDERS = 11007,
/// QoS no receivers.
/// There are no QoS receivers.
WSA_QOS_NO_RECEIVERS = 11008,
/// QoS request confirmed.
/// The QoS reserve request has been confirmed.
WSA_QOS_REQUEST_CONFIRMED = 11009,
/// QoS admission error.
/// A QoS error occurred due to lack of resources.
WSA_QOS_ADMISSION_FAILURE = 11010,
/// QoS policy failure.
/// The QoS request was rejected because the policy system couldn't allocate the requested resource within the existing policy.
WSA_QOS_POLICY_FAILURE = 11011,
/// QoS bad style.
/// An unknown or conflicting QoS style was encountered.
WSA_QOS_BAD_STYLE = 11012,
/// QoS bad object.
/// A problem was encountered with some part of the filterspec or the provider-specific buffer in general.
WSA_QOS_BAD_OBJECT = 11013,
/// QoS traffic control error.
/// An error with the underlying traffic control (TC) API as the generic QoS request was converted for local enforcement by the TC API.
/// This could be due to an out of memory error or to an internal QoS provider error.
WSA_QOS_TRAFFIC_CTRL_ERROR = 11014,
/// QoS generic error.
/// A general QoS error.
WSA_QOS_GENERIC_ERROR = 11015,
/// QoS service type error.
/// An invalid or unrecognized service type was found in the QoS flowspec.
WSA_QOS_ESERVICETYPE = 11016,
/// QoS flowspec error.
/// An invalid or inconsistent flowspec was found in the QOS structure.
WSA_QOS_EFLOWSPEC = 11017,
/// Invalid QoS provider buffer.
/// An invalid QoS provider-specific buffer.
WSA_QOS_EPROVSPECBUF = 11018,
/// Invalid QoS filter style.
/// An invalid QoS filter style was used.
WSA_QOS_EFILTERSTYLE = 11019,
/// Invalid QoS filter type.
/// An invalid QoS filter type was used.
WSA_QOS_EFILTERTYPE = 11020,
/// Incorrect QoS filter count.
/// An incorrect number of QoS FILTERSPECs were specified in the FLOWDESCRIPTOR.
WSA_QOS_EFILTERCOUNT = 11021,
/// Invalid QoS object length.
/// An object with an invalid ObjectLength field was specified in the QoS provider-specific buffer.
WSA_QOS_EOBJLENGTH = 11022,
/// Incorrect QoS flow count.
/// An incorrect number of flow descriptors was specified in the QoS structure.
WSA_QOS_EFLOWCOUNT = 11023,
/// Unrecognized QoS object.
/// An unrecognized object was found in the QoS provider-specific buffer.
WSA_QOS_EUNKOWNPSOBJ = 11024,
/// Invalid QoS policy object.
/// An invalid policy object was found in the QoS provider-specific buffer.
WSA_QOS_EPOLICYOBJ = 11025,
/// Invalid QoS flow descriptor.
/// An invalid QoS flow descriptor was found in the flow descriptor list.
WSA_QOS_EFLOWDESC = 11026,
/// Invalid QoS provider-specific flowspec.
/// An invalid or inconsistent flowspec was found in the QoS provider-specific buffer.
WSA_QOS_EPSFLOWSPEC = 11027,
/// Invalid QoS provider-specific filterspec.
/// An invalid FILTERSPEC was found in the QoS provider-specific buffer.
WSA_QOS_EPSFILTERSPEC = 11028,
/// Invalid QoS shape discard mode object.
/// An invalid shape discard mode object was found in the QoS provider-specific buffer.
WSA_QOS_ESDMODEOBJ = 11029,
/// Invalid QoS shaping rate object.
/// An invalid shaping rate object was found in the QoS provider-specific buffer.
WSA_QOS_ESHAPERATEOBJ = 11030,
/// Reserved policy QoS element type.
/// A reserved policy element was found in the QoS provider-specific buffer.
WSA_QOS_RESERVED_PETYPE = 11031,
_,
};
/// no parameters
const IOC_VOID = 0x80000000;
@ -320,7 +673,7 @@ pub extern "ws2_32" fn WSAStartup(
lpWSAData: *WSADATA,
) callconv(.Stdcall) c_int;
pub extern "ws2_32" fn WSACleanup() callconv(.Stdcall) c_int;
pub extern "ws2_32" fn WSAGetLastError() callconv(.Stdcall) c_int;
pub extern "ws2_32" fn WSAGetLastError() callconv(.Stdcall) WinsockError;
pub extern "ws2_32" fn WSASocketA(
af: c_int,
type: c_int,

View File

@ -604,7 +604,7 @@ test "PackedIntArray at end of available memory" {
p: PackedArray,
};
const allocator = std.heap.page_allocator;
const allocator = std.testing.allocator;
var pad = try allocator.create(Padded);
defer allocator.destroy(pad);
@ -618,7 +618,7 @@ test "PackedIntSlice at end of available memory" {
}
const PackedSlice = PackedIntSlice(u11);
const allocator = std.heap.page_allocator;
const allocator = std.testing.allocator;
var page = try allocator.alloc(u8, std.mem.page_size);
defer allocator.free(page);

View File

@ -1,10 +1,10 @@
const std = @import("std.zig");
const Allocator = std.mem.Allocator;
const debug = std.debug;
const assert = debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const assert = std.debug.assert;
const testing = std.testing;
const expect = testing.expect;
const expectEqual = testing.expectEqual;
const expectError = testing.expectError;
/// Priority queue for storing generic data. Initialize with `init`.
pub fn PriorityQueue(comptime T: type) type {
@ -239,7 +239,7 @@ fn greaterThan(a: u32, b: u32) bool {
const PQ = PriorityQueue(u32);
test "std.PriorityQueue: add and remove min heap" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.add(54);
@ -257,7 +257,7 @@ test "std.PriorityQueue: add and remove min heap" {
}
test "std.PriorityQueue: add and remove same min heap" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.add(1);
@ -275,14 +275,14 @@ test "std.PriorityQueue: add and remove same min heap" {
}
test "std.PriorityQueue: removeOrNull on empty" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
expect(queue.removeOrNull() == null);
}
test "std.PriorityQueue: edge case 3 elements" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.add(9);
@ -294,7 +294,7 @@ test "std.PriorityQueue: edge case 3 elements" {
}
test "std.PriorityQueue: peek" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
expect(queue.peek() == null);
@ -306,7 +306,7 @@ test "std.PriorityQueue: peek" {
}
test "std.PriorityQueue: sift up with odd indices" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
for (items) |e| {
@ -320,7 +320,7 @@ test "std.PriorityQueue: sift up with odd indices" {
}
test "std.PriorityQueue: addSlice" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
try queue.addSlice(items[0..]);
@ -333,8 +333,8 @@ test "std.PriorityQueue: addSlice" {
test "std.PriorityQueue: fromOwnedSlice" {
const items = [_]u32{ 15, 7, 21, 14, 13, 22, 12, 6, 7, 25, 5, 24, 11, 16, 15, 24, 2, 1 };
const heap_items = try std.mem.dupe(debug.global_allocator, u32, items[0..]);
var queue = PQ.fromOwnedSlice(debug.global_allocator, lessThan, heap_items[0..]);
const heap_items = try std.mem.dupe(testing.allocator, u32, items[0..]);
var queue = PQ.fromOwnedSlice(testing.allocator, lessThan, heap_items[0..]);
defer queue.deinit();
const sorted_items = [_]u32{ 1, 2, 5, 6, 7, 7, 11, 12, 13, 14, 15, 15, 16, 21, 22, 24, 24, 25 };
@ -344,7 +344,7 @@ test "std.PriorityQueue: fromOwnedSlice" {
}
test "std.PriorityQueue: add and remove max heap" {
var queue = PQ.init(debug.global_allocator, greaterThan);
var queue = PQ.init(testing.allocator, greaterThan);
defer queue.deinit();
try queue.add(54);
@ -362,7 +362,7 @@ test "std.PriorityQueue: add and remove max heap" {
}
test "std.PriorityQueue: add and remove same max heap" {
var queue = PQ.init(debug.global_allocator, greaterThan);
var queue = PQ.init(testing.allocator, greaterThan);
defer queue.deinit();
try queue.add(1);
@ -380,8 +380,8 @@ test "std.PriorityQueue: add and remove same max heap" {
}
test "std.PriorityQueue: iterator" {
var queue = PQ.init(debug.global_allocator, lessThan);
var map = std.AutoHashMap(u32, void).init(debug.global_allocator);
var queue = PQ.init(testing.allocator, lessThan);
var map = std.AutoHashMap(u32, void).init(testing.allocator);
defer {
queue.deinit();
map.deinit();
@ -402,7 +402,7 @@ test "std.PriorityQueue: iterator" {
}
test "std.PriorityQueue: remove at index" {
var queue = PQ.init(debug.global_allocator, lessThan);
var queue = PQ.init(testing.allocator, lessThan);
defer queue.deinit();
try queue.add(3);

View File

@ -79,7 +79,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
// https://github.com/WebAssembly/WASI/issues/27
var environ = try allocator.alloc(?[*:0]u8, environ_count + 1);
defer allocator.free(environ);
var environ_buf = try std.heap.page_allocator.alloc(u8, environ_buf_size);
var environ_buf = try allocator.alloc(u8, environ_buf_size);
defer allocator.free(environ_buf);
const environ_get_ret = os.wasi.environ_get(environ.ptr, environ_buf.ptr);
@ -114,7 +114,7 @@ pub fn getEnvMap(allocator: *Allocator) !BufMap {
}
test "os.getEnvMap" {
var env = try getEnvMap(std.debug.global_allocator);
var env = try getEnvMap(std.testing.allocator);
defer env.deinit();
}
@ -165,7 +165,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwned
}
test "os.getEnvVarOwned" {
var ga = std.debug.global_allocator;
var ga = std.testing.allocator;
testing.expectError(error.EnvironmentVariableNotFound, getEnvVarOwned(ga, "BADENV"));
}
@ -492,10 +492,11 @@ test "windows arg parsing" {
fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []const u8) void {
var it = ArgIteratorWindows.initWithCmdLine(input_cmd_line);
for (expected_args) |expected_arg| {
const arg = it.next(std.debug.global_allocator).? catch unreachable;
const arg = it.next(std.testing.allocator).? catch unreachable;
defer std.testing.allocator.free(arg);
testing.expectEqualSlices(u8, expected_arg, arg);
}
testing.expect(it.next(std.debug.global_allocator) == null);
testing.expect(it.next(std.testing.allocator) == null);
}
pub const UserInfo = struct {

View File

@ -1,4 +1,4 @@
const std = @import("std.zig");
const std = @import("std");
const assert = std.debug.assert;
const testing = std.testing;
const Order = std.math.Order;
@ -11,6 +11,7 @@ const Red = Color.Red;
const Black = Color.Black;
const ReplaceError = error{NotEqual};
const SortError = error{NotUnique}; // The new comparison function results in duplicates.
/// Insert this into your struct that you want to add to a red-black tree.
/// Do not use a pointer. Turn the *rb.Node results of the functions in rb
@ -122,7 +123,8 @@ pub const Node = struct {
return node;
}
fn getLast(node: *Node) *Node {
fn getLast(nodeconst: *Node) *Node {
var node = nodeconst;
while (node.right) |right| {
node = right;
}
@ -132,7 +134,21 @@ pub const Node = struct {
pub const Tree = struct {
root: ?*Node,
compareFn: fn (*Node, *Node) Order,
compareFn: fn (*Node, *Node, *Tree) Order,
/// Re-sorts a tree with a new compare function
pub fn sort(tree: *Tree, newCompareFn: fn (*Node, *Node, *Tree) Order) SortError!void {
var newTree = Tree.init(newCompareFn);
var node: *Node = undefined;
while (true) {
node = tree.first() orelse break;
tree.remove(node);
if (newTree.insert(node) != null) {
return error.NotUnique; // EEXISTS
}
}
tree.* = newTree;
}
/// If you have a need for a version that caches this, please file a bug.
pub fn first(tree: *Tree) ?*Node {
@ -244,6 +260,7 @@ pub const Tree = struct {
return doLookup(key, tree, &parent, &is_left);
}
/// If node is not part of tree, behavior is undefined.
pub fn remove(tree: *Tree, nodeconst: *Node) void {
var node = nodeconst;
// as this has the same value as node, it is unsafe to access node after newnode
@ -389,7 +406,7 @@ pub const Tree = struct {
var new = newconst;
// I assume this can get optimized out if the caller already knows.
if (tree.compareFn(old, new) != .eq) return ReplaceError.NotEqual;
if (tree.compareFn(old, new, tree) != .eq) return ReplaceError.NotEqual;
if (old.getParent()) |parent| {
parent.setChild(new, parent.left == old);
@ -404,9 +421,11 @@ pub const Tree = struct {
new.* = old.*;
}
pub fn init(tree: *Tree, f: fn (*Node, *Node) Order) void {
tree.root = null;
tree.compareFn = f;
pub fn init(f: fn (*Node, *Node, *Tree) Order) Tree {
return Tree{
.root = null,
.compareFn = f,
};
}
};
@ -469,7 +488,7 @@ fn doLookup(key: *Node, tree: *Tree, pparent: *?*Node, is_left: *bool) ?*Node {
is_left.* = false;
while (maybe_node) |node| {
const res = tree.compareFn(node, key);
const res = tree.compareFn(node, key, tree);
if (res == .eq) {
return node;
}
@ -498,7 +517,7 @@ fn testGetNumber(node: *Node) *testNumber {
return @fieldParentPtr(testNumber, "node", node);
}
fn testCompare(l: *Node, r: *Node) Order {
fn testCompare(l: *Node, r: *Node, contextIgnored: *Tree) Order {
var left = testGetNumber(l);
var right = testGetNumber(r);
@ -512,13 +531,17 @@ fn testCompare(l: *Node, r: *Node) Order {
unreachable;
}
fn testCompareReverse(l: *Node, r: *Node, contextIgnored: *Tree) Order {
return testCompare(r, l, contextIgnored);
}
test "rb" {
if (@import("builtin").arch == .aarch64) {
// TODO https://github.com/ziglang/zig/issues/3288
return error.SkipZigTest;
}
var tree: Tree = undefined;
var tree = Tree.init(testCompare);
var ns: [10]testNumber = undefined;
ns[0].value = 42;
ns[1].value = 41;
@ -534,7 +557,6 @@ test "rb" {
var dup: testNumber = undefined;
dup.value = 32345;
tree.init(testCompare);
_ = tree.insert(&ns[1].node);
_ = tree.insert(&ns[2].node);
_ = tree.insert(&ns[3].node);
@ -557,8 +579,7 @@ test "rb" {
}
test "inserting and looking up" {
var tree: Tree = undefined;
tree.init(testCompare);
var tree = Tree.init(testCompare);
var number: testNumber = undefined;
number.value = 1000;
_ = tree.insert(&number.node);
@ -582,8 +603,7 @@ test "multiple inserts, followed by calling first and last" {
// TODO https://github.com/ziglang/zig/issues/3288
return error.SkipZigTest;
}
var tree: Tree = undefined;
tree.init(testCompare);
var tree = Tree.init(testCompare);
var zeroth: testNumber = undefined;
zeroth.value = 0;
var first: testNumber = undefined;
@ -601,4 +621,8 @@ test "multiple inserts, followed by calling first and last" {
var lookupNode: testNumber = undefined;
lookupNode.value = 3;
assert(tree.lookup(&lookupNode.node) == &third.node);
tree.sort(testCompareReverse) catch unreachable;
assert(testGetNumber(tree.first().?).value == 3);
assert(testGetNumber(tree.last().?).value == 0);
assert(tree.lookup(&lookupNode.node) == &third.node);
}

View File

@ -283,7 +283,7 @@ const AtomicEvent = struct {
var waiting = wake_count;
while (waiting != 0) : (waiting -= 1) {
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == 0);
assert(rc == .SUCCESS);
}
}
@ -302,7 +302,7 @@ const AtomicEvent = struct {
// NtWaitForKeyedEvent doesnt have spurious wake-ups
var rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, timeout_ptr);
switch (rc) {
windows.WAIT_TIMEOUT => {
.TIMEOUT => {
// update the wait count to signal that we're not waiting anymore.
// if the .set() thread already observed that we are, perform a
// matching NtWaitForKeyedEvent so that the .set() thread doesn't
@ -311,7 +311,7 @@ const AtomicEvent = struct {
while (true) {
if (waiting == WAKE) {
rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
assert(rc == windows.WAIT_OBJECT_0);
assert(rc == .WAIT_0);
break;
} else {
waiting = @cmpxchgWeak(u32, waiters, waiting, waiting - WAIT, .Acquire, .Monotonic) orelse break;
@ -320,7 +320,7 @@ const AtomicEvent = struct {
}
return error.TimedOut;
},
windows.WAIT_OBJECT_0 => {},
.WAIT_0 => {},
else => unreachable,
}
}
@ -336,7 +336,7 @@ const AtomicEvent = struct {
EMPTY => handle = @cmpxchgWeak(usize, &event_handle, EMPTY, LOADING, .Acquire, .Monotonic) orelse {
const handle_ptr = @ptrCast(*windows.HANDLE, &handle);
const access_mask = windows.GENERIC_READ | windows.GENERIC_WRITE;
if (windows.ntdll.NtCreateKeyedEvent(handle_ptr, access_mask, null, 0) != 0)
if (windows.ntdll.NtCreateKeyedEvent(handle_ptr, access_mask, null, 0) != .SUCCESS)
handle = 0;
@atomicStore(usize, &event_handle, handle, .Monotonic);
return @intToPtr(?windows.HANDLE, handle);

View File

@ -339,7 +339,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
}
test "std.SegmentedList" {
var a = std.heap.page_allocator;
var a = std.testing.allocator;
try testSegmentedList(0, a);
try testSegmentedList(1, a);

View File

@ -5,6 +5,66 @@ const mem = std.mem;
const math = std.math;
const builtin = @import("builtin");
pub fn binarySearch(comptime T: type, key: T, items: []const T, comptime compareFn: fn (lhs: T, rhs: T) math.Order) ?usize {
if (items.len < 1)
return null;
var left: usize = 0;
var right: usize = items.len - 1;
while (left <= right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (compareFn(key, items[mid])) {
.eq => return mid,
.gt => left = mid + 1,
.lt => right = mid - 1,
}
}
return null;
}
test "std.sort.binarySearch" {
const S = struct {
fn order_u32(lhs: u32, rhs: u32) math.Order {
return math.order(lhs, rhs);
}
fn order_i32(lhs: i32, rhs: i32) math.Order {
return math.order(lhs, rhs);
}
};
testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{}, S.order_u32),
);
testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 1, &[_]u32{1}, S.order_u32),
);
testing.expectEqual(
@as(?usize, null),
binarySearch(u32, 1, &[_]u32{0}, S.order_u32),
);
testing.expectEqual(
@as(?usize, 4),
binarySearch(u32, 5, &[_]u32{ 1, 2, 3, 4, 5 }, S.order_u32),
);
testing.expectEqual(
@as(?usize, 0),
binarySearch(u32, 2, &[_]u32{ 2, 4, 8, 16, 32, 64 }, S.order_u32),
);
testing.expectEqual(
@as(?usize, 1),
binarySearch(i32, -4, &[_]i32{ -7, -4, 0, 9, 10 }, S.order_i32),
);
testing.expectEqual(
@as(?usize, 3),
binarySearch(i32, 98, &[_]i32{ -100, -25, 2, 98, 99, 100 }, S.order_i32),
);
}
/// Stable in-place sort. O(n) best case, O(pow(n, 2)) worst case. O(1) memory (no allocator required).
pub fn insertionSort(comptime T: type, items: []T, lessThan: fn (lhs: T, rhs: T) bool) void {
var i: usize = 1;

View File

@ -13,6 +13,8 @@ pub fn main() anyerror!void {
};
for (test_fn_list) |test_fn, i| {
std.testing.base_allocator_instance.reset();
var test_node = root_node.start(test_fn.name, null);
test_node.activate();
progress.refresh();
@ -22,6 +24,10 @@ pub fn main() anyerror!void {
if (test_fn.func()) |_| {
ok_count += 1;
test_node.end();
std.testing.allocator_instance.validate() catch |err| switch (err) {
error.Leak => std.debug.panic("", .{}),
else => std.debug.panic("error.{}", .{@errorName(err)}),
};
if (progress.terminal == null) std.debug.warn("OK\n", .{});
} else |err| switch (err) {
error.SkipZigTest => {

View File

@ -976,6 +976,23 @@ pub const Target = union(enum) {
}
}
pub fn getObjectFormat(self: Target) ObjectFormat {
switch (self) {
.Native => return @import("builtin").object_format,
.Cross => blk: {
if (self.isWindows() or self.isUefi()) {
return .coff;
} else if (self.isDarwin()) {
return .macho;
}
if (self.isWasm()) {
return .wasm;
}
return .elf;
},
}
}
pub fn isMinGW(self: Target) bool {
return self.isWindows() and self.isGnu();
}

View File

@ -2,6 +2,18 @@ const builtin = @import("builtin");
const TypeId = builtin.TypeId;
const std = @import("std.zig");
pub const LeakCountAllocator = @import("testing/leak_count_allocator.zig").LeakCountAllocator;
pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAllocator;
/// This should only be used in temporary test programs.
pub const allocator = &allocator_instance.allocator;
pub var allocator_instance = LeakCountAllocator.init(&base_allocator_instance.allocator);
pub const failing_allocator = &FailingAllocator.init(&base_allocator_instance.allocator, 0).allocator;
pub var base_allocator_instance = std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]);
var allocator_mem: [512 * 1024]u8 = undefined;
/// This function is intended to be used only in tests. It prints diagnostics to stderr
/// and then aborts when actual_error_union is not expected_error.
pub fn expectError(expected_error: anyerror, actual_error_union: var) void {

View File

@ -0,0 +1,50 @@
const std = @import("../std.zig");
/// This allocator is used in front of another allocator and counts the numbers of allocs and frees.
/// The test runner asserts every alloc has a corresponding free at the end of each test.
///
/// The detection algorithm is incredibly primitive and only accounts for number of calls.
/// This should be replaced by the general purpose debug allocator.
pub const LeakCountAllocator = struct {
count: usize,
allocator: std.mem.Allocator,
internal_allocator: *std.mem.Allocator,
pub fn init(allocator: *std.mem.Allocator) LeakCountAllocator {
return .{
.count = 0,
.allocator = .{
.reallocFn = realloc,
.shrinkFn = shrink,
},
.internal_allocator = allocator,
};
}
fn realloc(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
var data = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
if (old_mem.len == 0) {
self.count += 1;
}
return data;
}
fn shrink(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator);
if (new_size == 0) {
if (self.count == 0) {
std.debug.panic("error - too many calls to free, most likely double free", .{});
}
self.count -= 1;
}
return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
}
pub fn validate(self: LeakCountAllocator) !void {
if (self.count > 0) {
std.debug.warn("error - detected leaked allocations without matching free: {}\n", .{self.count});
return error.Leak;
}
}
};

View File

@ -501,14 +501,16 @@ test "utf16leToUtf8" {
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 'A');
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 'a');
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "Aa"));
}
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0x80);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xffff);
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "\xc2\x80" ++ "\xef\xbf\xbf"));
}
@ -516,7 +518,8 @@ test "utf16leToUtf8" {
// the values just outside the surrogate half range
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xd7ff);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xe000);
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "\xed\x9f\xbf" ++ "\xee\x80\x80"));
}
@ -524,7 +527,8 @@ test "utf16leToUtf8" {
// smallest surrogate pair
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xd800);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdc00);
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "\xf0\x90\x80\x80"));
}
@ -532,14 +536,16 @@ test "utf16leToUtf8" {
// largest surrogate pair
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdbff);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdfff);
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xbf\xbf"));
}
{
mem.writeIntSliceLittle(u16, utf16le_as_bytes[0..], 0xdbff);
mem.writeIntSliceLittle(u16, utf16le_as_bytes[2..], 0xdc00);
const utf8 = try utf16leToUtf8Alloc(std.debug.global_allocator, &utf16le);
const utf8 = try utf16leToUtf8Alloc(std.testing.allocator, &utf16le);
defer std.testing.allocator.free(utf8);
testing.expect(mem.eql(u8, utf8, "\xf4\x8f\xb0\x80"));
}
}

View File

@ -784,6 +784,11 @@ pub const Node = struct {
i -= 1;
}
if (self.align_expr) |align_expr| {
if (i < 1) return align_expr;
i -= 1;
}
if (self.value_expr) |value_expr| {
if (i < 1) return value_expr;
i -= 1;
@ -800,6 +805,11 @@ pub const Node = struct {
if (self.value_expr) |value_expr| {
return value_expr.lastToken();
}
if (self.align_expr) |align_expr| {
// The expression refers to what's inside the parenthesis, the
// last token is the closing one
return align_expr.lastToken() + 1;
}
if (self.type_expr) |type_expr| {
return type_expr.lastToken();
}
@ -2291,7 +2301,7 @@ pub const Node = struct {
test "iterate" {
var root = Node.Root{
.base = Node{ .id = Node.Id.Root },
.decls = Node.Root.DeclList.init(std.debug.global_allocator),
.decls = Node.Root.DeclList.init(std.testing.allocator),
.eof_token = 0,
};
var base = &root.base;

View File

@ -1,3 +1,71 @@
test "zig fmt: trailing comma in container declaration" {
try testCanonical(
\\const X = struct { foo: i32 };
\\const X = struct { foo: i32, bar: i32 };
\\const X = struct { foo: i32 = 1, bar: i32 = 2 };
\\const X = struct { foo: i32 align(4), bar: i32 align(4) };
\\const X = struct { foo: i32 align(4) = 1, bar: i32 align(4) = 2 };
\\
);
try testCanonical(
\\test "" {
\\ comptime {
\\ const X = struct {
\\ x: i32
\\ };
\\ }
\\}
\\
);
try testTransform(
\\const X = struct {
\\ foo: i32, bar: i8 };
,
\\const X = struct {
\\ foo: i32, bar: i8
\\};
\\
);
}
test "zig fmt: trailing comma in fn parameter list" {
try testCanonical(
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) align(8) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) linksection(".text") i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) callconv(.C) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) align(8) linksection(".text") i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) align(8) callconv(.C) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) align(8) linksection(".text") callconv(.C) i32 {}
\\pub fn f(
\\ a: i32,
\\ b: i32,
\\) linksection(".text") callconv(.C) i32 {}
\\
);
}
// TODO: Remove condition after deprecating 'typeOf'. See https://github.com/ziglang/zig/issues/1348
test "zig fmt: change @typeOf to @TypeOf" {
try testTransform(
@ -689,10 +757,7 @@ test "zig fmt: enum decl with no trailing comma" {
try testTransform(
\\const StrLitKind = enum {Normal, C};
,
\\const StrLitKind = enum {
\\ Normal,
\\ C,
\\};
\\const StrLitKind = enum { Normal, C };
\\
);
}
@ -946,16 +1011,10 @@ test "zig fmt: empty block with only comment" {
}
test "zig fmt: no trailing comma on struct decl" {
try testTransform(
try testCanonical(
\\const RoundParam = struct {
\\ k: usize, s: u32, t: u32
\\};
,
\\const RoundParam = struct {
\\ k: usize,
\\ s: u32,
\\ t: u32,
\\};
\\
);
}
@ -2522,10 +2581,8 @@ test "zig fmt: if type expr" {
);
}
test "zig fmt: file ends with struct field" {
try testTransform(
try testCanonical(
\\a: bool
,
\\a: bool,
\\
);
}
@ -2773,7 +2830,7 @@ fn testTransform(source: []const u8, expected_source: []const u8) !void {
const needed_alloc_count = x: {
// Try it once with unlimited memory, make sure it works
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, maxInt(usize));
var anything_changed: bool = undefined;
const result_source = try testParse(source, &failing_allocator.allocator, &anything_changed);
if (!mem.eql(u8, result_source, expected_source)) {
@ -2797,7 +2854,7 @@ fn testTransform(source: []const u8, expected_source: []const u8) !void {
var fail_index: usize = 0;
while (fail_index < needed_alloc_count) : (fail_index += 1) {
var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]);
var failing_allocator = std.debug.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
var failing_allocator = std.testing.FailingAllocator.init(&fixed_allocator.allocator, fail_index);
var anything_changed: bool = undefined;
if (testParse(source, &failing_allocator.allocator, &anything_changed)) |_| {
return error.NondeterministicMemoryUsage;

View File

@ -25,7 +25,7 @@ pub fn main() !void {
var stdout_file = std.io.getStdOut();
const stdout = &stdout_file.outStream().stream;
try stdout.print("{:.3} MiB/s, {} KiB used \n", .{mb_per_sec, memory_used / 1024});
try stdout.print("{:.3} MiB/s, {} KiB used \n", .{ mb_per_sec, memory_used / 1024 });
}
fn testOnce() usize {

View File

@ -206,6 +206,10 @@ fn renderExtraNewline(tree: *ast.Tree, stream: var, start_col: *usize, node: *as
}
fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node) (@TypeOf(stream).Child.Error || Error)!void {
try renderContainerDecl(allocator, stream, tree, indent, start_col, decl, .Newline);
}
fn renderContainerDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, indent: usize, start_col: *usize, decl: *ast.Node, space: Space) (@TypeOf(stream).Child.Error || Error)!void {
switch (decl.id) {
.FnProto => {
const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
@ -213,11 +217,11 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
try renderDocComments(tree, stream, fn_proto, indent, start_col);
if (fn_proto.body_node) |body_node| {
try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Space);
try renderExpression(allocator, stream, tree, indent, start_col, body_node, Space.Newline);
try renderExpression(allocator, stream, tree, indent, start_col, decl, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, body_node, space);
} else {
try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.None);
try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, Space.Newline);
try renderExpression(allocator, stream, tree, indent, start_col, decl, .None);
try renderToken(tree, stream, tree.nextToken(decl.lastToken()), indent, start_col, space);
}
},
@ -225,11 +229,11 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
const use_decl = @fieldParentPtr(ast.Node.Use, "base", decl);
if (use_decl.visib_token) |visib_token| {
try renderToken(tree, stream, visib_token, indent, start_col, Space.Space); // pub
try renderToken(tree, stream, visib_token, indent, start_col, .Space); // pub
}
try renderToken(tree, stream, use_decl.use_token, indent, start_col, Space.Space); // usingnamespace
try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, Space.None);
try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, Space.Newline); // ;
try renderToken(tree, stream, use_decl.use_token, indent, start_col, .Space); // usingnamespace
try renderExpression(allocator, stream, tree, indent, start_col, use_decl.expr, .None);
try renderToken(tree, stream, use_decl.semicolon_token, indent, start_col, space); // ;
},
.VarDecl => {
@ -243,9 +247,9 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
const test_decl = @fieldParentPtr(ast.Node.TestDecl, "base", decl);
try renderDocComments(tree, stream, test_decl, indent, start_col);
try renderToken(tree, stream, test_decl.test_token, indent, start_col, Space.Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, Space.Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, Space.Newline);
try renderToken(tree, stream, test_decl.test_token, indent, start_col, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.name, .Space);
try renderExpression(allocator, stream, tree, indent, start_col, test_decl.body_node, space);
},
.ContainerField => {
@ -253,62 +257,76 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
try renderDocComments(tree, stream, field, indent, start_col);
if (field.comptime_token) |t| {
try renderToken(tree, stream, t, indent, start_col, Space.Space); // comptime
try renderToken(tree, stream, t, indent, start_col, .Space); // comptime
}
const src_has_trailing_comma = blk: {
const maybe_comma = tree.nextToken(field.lastToken());
break :blk tree.tokens.at(maybe_comma).id == .Comma;
};
// The trailing comma is emitted at the end, but if it's not present
// we still have to respect the specified `space` parameter
const last_token_space: Space = if (src_has_trailing_comma) .None else space;
if (field.type_expr == null and field.value_expr == null) {
return renderToken(tree, stream, field.name_token, indent, start_col, Space.Comma); // name,
try renderToken(tree, stream, field.name_token, indent, start_col, last_token_space); // name
} else if (field.type_expr != null and field.value_expr == null) {
try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // :
try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // :
if (field.align_expr) |align_value_expr| {
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
const lparen_token = tree.prevToken(align_value_expr.firstToken());
const align_kw = tree.prevToken(lparen_token);
const rparen_token = tree.nextToken(align_value_expr.lastToken());
try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, Space.None); // alignment
try renderToken(tree, stream, rparen_token, indent, start_col, Space.Comma); // ),
try renderToken(tree, stream, align_kw, indent, start_col, .None); // align
try renderToken(tree, stream, lparen_token, indent, start_col, .None); // (
try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment
try renderToken(tree, stream, rparen_token, indent, start_col, last_token_space); // )
} else {
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Comma); // type,
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, last_token_space); // type
}
} else if (field.type_expr == null and field.value_expr != null) {
try renderToken(tree, stream, field.name_token, indent, start_col, Space.Space); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // =
return renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, Space.Comma); // value
try renderToken(tree, stream, field.name_token, indent, start_col, .Space); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // =
try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value
} else {
try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // :
try renderToken(tree, stream, field.name_token, indent, start_col, .None); // name
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, .Space); // :
if (field.align_expr) |align_value_expr| {
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
const lparen_token = tree.prevToken(align_value_expr.firstToken());
const align_kw = tree.prevToken(lparen_token);
const rparen_token = tree.nextToken(align_value_expr.lastToken());
try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, Space.None); // alignment
try renderToken(tree, stream, rparen_token, indent, start_col, Space.Space); // )
try renderToken(tree, stream, align_kw, indent, start_col, .None); // align
try renderToken(tree, stream, lparen_token, indent, start_col, .None); // (
try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, .None); // alignment
try renderToken(tree, stream, rparen_token, indent, start_col, .Space); // )
} else {
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, .Space); // type
}
try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), indent, start_col, Space.Space); // =
return renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, Space.Comma); // value,
try renderToken(tree, stream, tree.prevToken(field.value_expr.?.firstToken()), indent, start_col, .Space); // =
try renderExpression(allocator, stream, tree, indent, start_col, field.value_expr.?, last_token_space); // value
}
if (src_has_trailing_comma) {
const comma = tree.nextToken(field.lastToken());
try renderToken(tree, stream, comma, indent, start_col, space);
}
},
.Comptime => {
assert(!decl.requireSemiColon());
try renderExpression(allocator, stream, tree, indent, start_col, decl, Space.Newline);
try renderExpression(allocator, stream, tree, indent, start_col, decl, space);
},
.DocComment => {
const comment = @fieldParentPtr(ast.Node.DocComment, "base", decl);
var it = comment.lines.iterator(0);
while (it.next()) |line_token_index| {
try renderToken(tree, stream, line_token_index.*, indent, start_col, Space.Newline);
try renderToken(tree, stream, line_token_index.*, indent, start_col, .Newline);
if (it.peek()) |_| {
try stream.writeByteNTimes(' ', indent);
}
@ -1150,14 +1168,42 @@ fn renderExpression(
if (container_decl.fields_and_decls.len == 0) {
try renderToken(tree, stream, container_decl.lbrace_token, indent + indent_delta, start_col, Space.None); // {
return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
} else {
}
const src_has_trailing_comma = blk: {
var maybe_comma = tree.prevToken(container_decl.lastToken());
// Doc comments for a field may also appear after the comma, eg.
// field_name: T, // comment attached to field_name
if (tree.tokens.at(maybe_comma).id == .DocComment)
maybe_comma = tree.prevToken(maybe_comma);
break :blk tree.tokens.at(maybe_comma).id == .Comma;
};
// Check if the first declaration and the { are on the same line
const src_has_newline = !tree.tokensOnSameLine(
container_decl.lbrace_token,
container_decl.fields_and_decls.at(0).*.firstToken(),
);
// We can only print all the elements in-line if all the
// declarations inside are fields
const src_has_only_fields = blk: {
var it = container_decl.fields_and_decls.iterator(0);
while (it.next()) |decl| {
if (decl.*.id != .ContainerField) break :blk false;
}
break :blk true;
};
if (src_has_trailing_comma or !src_has_only_fields) {
// One declaration per line
const new_indent = indent + indent_delta;
try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, Space.Newline); // {
try renderToken(tree, stream, container_decl.lbrace_token, new_indent, start_col, .Newline); // {
var it = container_decl.fields_and_decls.iterator(0);
while (it.next()) |decl| {
try stream.writeByteNTimes(' ', new_indent);
try renderTopLevelDecl(allocator, stream, tree, new_indent, start_col, decl.*);
try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl.*, .Newline);
if (it.peek()) |next_decl| {
try renderExtraNewline(tree, stream, start_col, next_decl.*);
@ -1165,8 +1211,32 @@ fn renderExpression(
}
try stream.writeByteNTimes(' ', indent);
return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
} else if (src_has_newline) {
// All the declarations on the same line, but place the items on
// their own line
try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Newline); // {
const new_indent = indent + indent_delta;
try stream.writeByteNTimes(' ', new_indent);
var it = container_decl.fields_and_decls.iterator(0);
while (it.next()) |decl| {
const space_after_decl: Space = if (it.peek() == null) .Newline else .Space;
try renderContainerDecl(allocator, stream, tree, new_indent, start_col, decl.*, space_after_decl);
}
try stream.writeByteNTimes(' ', indent);
} else {
// All the declarations on the same line
try renderToken(tree, stream, container_decl.lbrace_token, indent, start_col, .Space); // {
var it = container_decl.fields_and_decls.iterator(0);
while (it.next()) |decl| {
try renderContainerDecl(allocator, stream, tree, indent, start_col, decl.*, .Space);
}
}
return renderToken(tree, stream, container_decl.rbrace_token, indent, start_col, space); // }
},
.ErrorSetDecl => {
@ -1344,11 +1414,22 @@ fn renderExpression(
try renderToken(tree, stream, fn_proto.fn_token, indent, start_col, Space.Space); // fn
break :blk tree.nextToken(fn_proto.fn_token);
};
assert(tree.tokens.at(lparen).id == .LParen);
const rparen = tree.prevToken(switch (fn_proto.return_type) {
ast.Node.FnProto.ReturnType.Explicit => |node| node.firstToken(),
ast.Node.FnProto.ReturnType.InferErrorSet => |node| tree.prevToken(node.firstToken()),
const rparen = tree.prevToken(
// the first token for the annotation expressions is the left
// parenthesis, hence the need for two prevToken
if (fn_proto.align_expr) |align_expr|
tree.prevToken(tree.prevToken(align_expr.firstToken()))
else if (fn_proto.section_expr) |section_expr|
tree.prevToken(tree.prevToken(section_expr.firstToken()))
else if (fn_proto.callconv_expr) |callconv_expr|
tree.prevToken(tree.prevToken(callconv_expr.firstToken()))
else switch (fn_proto.return_type) {
.Explicit => |node| node.firstToken(),
.InferErrorSet => |node| tree.prevToken(node.firstToken()),
});
assert(tree.tokens.at(rparen).id == .RParen);
const src_params_trailing_comma = blk: {
const maybe_comma = tree.tokens.at(rparen - 1).id;

View File

@ -866,7 +866,7 @@ fn expectTokens(tl: *TokenList, src: [*:0]const u8, expected: []CToken) void {
}
test "tokenize macro" {
var tl = TokenList.init(std.heap.page_allocator);
var tl = TokenList.init(std.testing.allocator);
defer tl.deinit();
expectTokens(&tl, "TEST(0\n", &[_]CToken{
@ -904,7 +904,7 @@ test "tokenize macro" {
}
test "tokenize macro ops" {
var tl = TokenList.init(std.heap.page_allocator);
var tl = TokenList.init(std.testing.allocator);
defer tl.deinit();
expectTokens(&tl, "ADD A + B", &[_]CToken{

View File

@ -31,7 +31,7 @@ pub async fn renderToLlvm(comp: *Compilation, fn_val: *Value.Fn, code: *ir.Code)
llvm.SetTarget(module, comp.llvm_triple.toSliceConst());
llvm.SetDataLayout(module, comp.target_layout_str);
if (util.getObjectFormat(comp.target) == .coff) {
if (comp.target.getObjectFormat() == .coff) {
llvm.AddModuleCodeViewFlag(module);
} else {
llvm.AddModuleDebugInfoFlag(module);

View File

@ -894,7 +894,7 @@ fn printSection(out: var, label: []const u8, bytes: []const u8) !void {
fn printLabel(out: var, label: []const u8, bytes: []const u8) !void {
var buf: [80]u8 = undefined;
var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{label, bytes.len});
var text = try std.fmt.bufPrint(buf[0..], "{} {} bytes ", .{ label, bytes.len });
try out.write(text);
var i: usize = text.len;
const end = 79;

View File

@ -76,7 +76,7 @@ pub fn link(comp: *Compilation) !void {
std.debug.warn("\n", .{});
}
const extern_ofmt = toExternObjectFormatType(util.getObjectFormat(comp.target));
const extern_ofmt = toExternObjectFormatType(comp.target.getObjectFormat());
const args_slice = ctx.args.toSlice();
{
@ -105,7 +105,7 @@ extern fn ZigLLDLink(
context: *c_void,
) bool;
extern fn linkDiagCallback(context: *c_void, ptr: [*]const u8, len: usize) void {
fn linkDiagCallback(context: *c_void, ptr: [*]const u8, len: usize) callconv(.C) void {
const ctx = @ptrCast(*Context, @alignCast(@alignOf(Context), context));
ctx.link_err = linkDiagCallbackErrorable(ctx, ptr[0..len]);
}
@ -128,7 +128,7 @@ fn toExternObjectFormatType(ofmt: ObjectFormat) c.ZigLLVM_ObjectFormatType {
}
fn constructLinkerArgs(ctx: *Context) !void {
switch (util.getObjectFormat(ctx.comp.target)) {
switch (ctx.comp.target.getObjectFormat()) {
.unknown => unreachable,
.coff => return constructLinkerArgsCoff(ctx),
.elf => return constructLinkerArgsElf(ctx),

View File

@ -1991,6 +1991,29 @@ fn transInitListExprRecord(
return &init_node.base;
}
fn transCreateNodeArrayType(
rp: RestorePoint,
source_loc: ZigClangSourceLocation,
ty: *const ZigClangType,
len: var,
) TransError!*ast.Node {
var node = try transCreateNodePrefixOp(
rp.c,
.{
.ArrayType = .{
.len_expr = undefined,
.sentinel = null,
},
},
.LBracket,
"[",
);
node.op.ArrayType.len_expr = try transCreateNodeInt(rp.c, len);
_ = try appendToken(rp.c, .RBracket, "]");
node.rhs = try transType(rp, ty, source_loc);
return &node.base;
}
fn transInitListExprArray(
rp: RestorePoint,
scope: *Scope,
@ -2011,8 +2034,13 @@ fn transInitListExprArray(
var init_node: *ast.Node.SuffixOp = undefined;
var cat_tok: ast.TokenIndex = undefined;
if (init_count != 0) {
const dot_tok = try appendToken(rp.c, .Period, ".");
init_node = try transCreateNodeContainerInitializer(rp.c, dot_tok);
const ty_node = try transCreateNodeArrayType(
rp,
loc,
ZigClangQualType_getTypePtr(child_qt),
init_count,
);
init_node = try transCreateNodeArrayInitializer(rp.c, ty_node);
var i: c_uint = 0;
while (i < init_count) : (i += 1) {
const elem_expr = ZigClangInitListExpr_getInit(expr, i);
@ -2026,8 +2054,8 @@ fn transInitListExprArray(
cat_tok = try appendToken(rp.c, .PlusPlus, "++");
}
const dot_tok = try appendToken(rp.c, .Period, ".");
var filler_init_node = try transCreateNodeContainerInitializer(rp.c, dot_tok);
const ty_node = try transCreateNodeArrayType(rp, loc, ZigClangQualType_getTypePtr(child_qt), 1);
var filler_init_node = try transCreateNodeArrayInitializer(rp.c, ty_node);
const filler_val_expr = ZigClangInitListExpr_getArrayFiller(expr);
try filler_init_node.op.ArrayInitializer.push(try transExpr(rp, scope, filler_val_expr, .used, .r_value));
filler_init_node.rtoken = try appendToken(rp.c, .RBrace, "}");
@ -3418,31 +3446,10 @@ fn qualTypeToLog2IntRef(rp: RestorePoint, qt: ZigClangQualType, source_loc: ZigC
}
fn qualTypeChildIsFnProto(qt: ZigClangQualType) bool {
const ty = ZigClangQualType_getTypePtr(qt);
const ty = qualTypeCanon(qt);
switch (ZigClangType_getTypeClass(ty)) {
.FunctionProto, .FunctionNoProto => return true,
.Elaborated => {
const elaborated_ty = @ptrCast(*const ZigClangElaboratedType, ty);
return qualTypeChildIsFnProto(ZigClangElaboratedType_getNamedType(elaborated_ty));
},
.Typedef => {
const typedef_ty = @ptrCast(*const ZigClangTypedefType, ty);
const typedef_decl = ZigClangTypedefType_getDecl(typedef_ty);
return qualTypeChildIsFnProto(ZigClangTypedefNameDecl_getUnderlyingType(typedef_decl));
},
.Paren => {
const paren_type = @ptrCast(*const ZigClangParenType, ty);
const inner_type = ZigClangParenType_getInnerType(paren_type);
switch (ZigClangQualType_getTypeClass(inner_type)) {
.FunctionProto, .FunctionNoProto => return true,
else => return false,
}
},
.Attributed => {
const attr_type = @ptrCast(*const ZigClangAttributedType, ty);
return qualTypeChildIsFnProto(ZigClangAttributedType_getEquivalentType(attr_type));
},
else => return false,
}
}
@ -3878,11 +3885,11 @@ fn transCreateNodeBoolLiteral(c: *Context, value: bool) !*ast.Node {
return &node.base;
}
fn transCreateNodeContainerInitializer(c: *Context, dot_tok: ast.TokenIndex) !*ast.Node.SuffixOp {
fn transCreateNodeArrayInitializer(c: *Context, ty: *ast.Node) !*ast.Node.SuffixOp {
_ = try appendToken(c, .LBrace, "{");
const node = try c.a().create(ast.Node.SuffixOp);
node.* = ast.Node.SuffixOp{
.lhs = .{ .dot = dot_tok },
.lhs = .{ .node = ty },
.op = .{
.ArrayInitializer = ast.Node.SuffixOp.Op.InitList.init(c.a()),
},

View File

@ -32,23 +32,6 @@ pub fn getFloatAbi(self: Target) FloatAbi {
};
}
pub fn getObjectFormat(target: Target) Target.ObjectFormat {
switch (target) {
.Native => return @import("builtin").object_format,
.Cross => blk: {
if (target.isWindows() or target.isUefi()) {
return .coff;
} else if (target.isDarwin()) {
return .macho;
}
if (target.isWasm()) {
return .wasm;
}
return .elf;
},
}
}
pub fn getDynamicLinkerPath(self: Target) ?[]const u8 {
const env = self.getAbi();
const arch = self.getArch();

View File

@ -8846,13 +8846,13 @@ static void resolve_llvm_types_fn_type(CodeGen *g, ZigType *fn_type) {
ZigType *gen_return_type;
if (is_async) {
gen_return_type = g->builtin_types.entry_void;
param_di_types.append(get_llvm_di_type(g, gen_return_type));
param_di_types.append(nullptr);
} else if (!type_has_bits(fn_type_id->return_type)) {
gen_return_type = g->builtin_types.entry_void;
param_di_types.append(get_llvm_di_type(g, gen_return_type));
param_di_types.append(nullptr);
} else if (first_arg_return) {
gen_return_type = g->builtin_types.entry_void;
param_di_types.append(get_llvm_di_type(g, gen_return_type));
param_di_types.append(nullptr);
ZigType *gen_type = get_pointer_to_type(g, fn_type_id->return_type, false);
gen_param_types.append(get_llvm_type(g, gen_type));
param_di_types.append(get_llvm_di_type(g, gen_type));
@ -8955,7 +8955,7 @@ void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn) {
ZigList<ZigLLVMDIType *> param_di_types = {};
ZigList<LLVMTypeRef> gen_param_types = {};
// first "parameter" is return value
param_di_types.append(get_llvm_di_type(g, gen_return_type));
param_di_types.append(nullptr);
ZigType *frame_type = get_fn_frame_type(g, fn);
ZigType *ptr_type = get_pointer_to_type(g, frame_type, false);

View File

@ -8,7 +8,7 @@ const ChildProcess = std.ChildProcess;
var a: *std.mem.Allocator = undefined;
pub fn main() !void {
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
var arg_it = process.args();

View File

@ -445,7 +445,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\const std = @import("std");
\\const io = std.io;
\\const os = std.os;
\\const allocator = std.debug.global_allocator;
\\const allocator = std.testing.allocator;
\\
\\pub fn main() !void {
\\ var args_it = std.process.args();
@ -486,7 +486,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\const std = @import("std");
\\const io = std.io;
\\const os = std.os;
\\const allocator = std.debug.global_allocator;
\\const allocator = std.testing.allocator;
\\
\\pub fn main() !void {
\\ var args_it = std.process.args();

View File

@ -5765,7 +5765,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
\\
\\export fn entry() void {
\\ const a = MdNode.Header {
\\ .text = MdText.init(&std.debug.global_allocator),
\\ .text = MdText.init(&std.testing.allocator),
\\ .weight = HeaderWeight.H1,
\\ };
\\}

View File

@ -3,6 +3,22 @@ const tests = @import("tests.zig");
const nl = std.cstr.line_sep;
pub fn addCases(cases: *tests.RunTranslatedCContext) void {
cases.add("array initializer",
\\#include <stdlib.h>
\\int main(int argc, char **argv) {
\\ int a0[4] = {1};
\\ int a1[4] = {1,2,3,4};
\\ int s0 = 0, s1 = 0;
\\ for (int i = 0; i < 4; i++) {
\\ s0 += a0[i];
\\ s1 += a1[i];
\\ }
\\ if (s0 != 1) abort();
\\ if (s1 != 10) abort();
\\ return 0;
\\}
, "");
cases.add("forward declarations",
\\#include <stdlib.h>
\\int foo(int);

View File

@ -410,8 +410,8 @@ test "heap allocated async function frame" {
var x: i32 = 42;
fn doTheTest() !void {
const frame = try std.heap.page_allocator.create(@Frame(someFunc));
defer std.heap.page_allocator.destroy(frame);
const frame = try std.testing.allocator.create(@Frame(someFunc));
defer std.testing.allocator.destroy(frame);
expect(x == 42);
frame.* = async someFunc();
@ -671,7 +671,7 @@ fn testAsyncAwaitTypicalUsage(
}
fn amain() !void {
const allocator = std.heap.page_allocator; // TODO once we have the debug allocator, use that, so that this can detect leaks
const allocator = std.testing.allocator;
var download_frame = async fetchUrl(allocator, "https://example.com/");
var download_awaited = false;
errdefer if (!download_awaited) {
@ -935,12 +935,12 @@ fn recursiveAsyncFunctionTest(comptime suspending_implementation: bool) type {
_ = async amain(&result);
return result;
} else {
return fib(std.heap.page_allocator, 10) catch unreachable;
return fib(std.testing.allocator, 10) catch unreachable;
}
}
fn amain(result: *u32) void {
var x = async fib(std.heap.page_allocator, 10);
var x = async fib(std.testing.allocator, 10);
result.* = (await x) catch unreachable;
}
};

View File

@ -1,6 +1,7 @@
const std = @import("std");
const debug = std.debug;
const expect = std.testing.expect;
const testing = std.testing;
const expect = testing.expect;
var argv: [*]const [*]const u8 = undefined;
@ -22,7 +23,8 @@ fn foo(args: [][]const u8) void {
}
fn bar(argc: usize) void {
const args = debug.global_allocator.alloc([]const u8, argc) catch unreachable;
const args = testing.allocator.alloc([]const u8, argc) catch unreachable;
defer testing.allocator.free(args);
for (args) |_, i| {
const ptr = argv[i];
args[i] = ptr[0..strlen(ptr)];

View File

@ -201,7 +201,10 @@ pub fn main() !void {
}
test "invalid inputs" {
global_allocator = std.debug.global_allocator;
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
global_allocator = &arena.allocator;
expectError("}ABC", error.InvalidInput);
expectError("{ABC", error.InvalidInput);
@ -222,7 +225,10 @@ fn expectError(test_input: []const u8, expected_err: anyerror) void {
}
test "valid inputs" {
global_allocator = std.debug.global_allocator;
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
global_allocator = &arena.allocator;
expectExpansion("{x,y,z}", "x y z");
expectExpansion("{A,B}{x,y}", "Ax Ay Bx By");

View File

@ -4,7 +4,7 @@ const process = std.process;
const fs = std.fs;
const mem = std.mem;
const warn = std.debug.warn;
const allocator = std.debug.global_allocator;
const allocator = std.testing.allocator;
pub fn main() !void {
var args_it = process.args();

View File

@ -1,6 +1,6 @@
const std = @import("std");
pub fn main() void {
const env_map = std.process.getEnvMap(std.debug.global_allocator) catch @panic("unable to get env map");
const env_map = std.process.getEnvMap(std.testing.allocator) catch @panic("unable to get env map");
std.testing.expect(env_map.count() == 0);
}

View File

@ -1,8 +1,8 @@
const std = @import("std");
pub fn main() !void {
const args = try std.process.argsAlloc(std.debug.global_allocator);
defer std.process.argsFree(std.debug.global_allocator, args);
const args = try std.process.argsAlloc(std.testing.allocator);
defer std.process.argsFree(std.testing.allocator, args);
const dynlib_name = args[1];

View File

@ -3,6 +3,26 @@ const builtin = @import("builtin");
const Target = @import("std").Target;
pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("function prototype translated as optional",
\\typedef void (*fnptr_ty)(void);
\\typedef __attribute__((cdecl)) void (*fnptr_attr_ty)(void);
\\struct foo {
\\ __attribute__((cdecl)) void (*foo)(void);
\\ void (*bar)(void);
\\ fnptr_ty baz;
\\ fnptr_attr_ty qux;
\\};
, &[_][]const u8{
\\pub const fnptr_ty = ?fn () callconv(.C) void;
\\pub const fnptr_attr_ty = ?fn () callconv(.C) void;
\\pub const struct_foo = extern struct {
\\ foo: ?fn () callconv(.C) void,
\\ bar: ?fn () callconv(.C) void,
\\ baz: fnptr_ty,
\\ qux: fnptr_attr_ty,
\\};
});
cases.add("function prototype with parenthesis",
\\void (f0) (void *L);
\\void ((f1)) (void *L);
@ -18,7 +38,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\static const uuid_t UUID_NULL __attribute__ ((unused)) = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
, &[_][]const u8{
\\pub const uuid_t = [16]u8;
\\pub const UUID_NULL: uuid_t = .{
\\pub const UUID_NULL: uuid_t = [16]u8{
\\ @bitCast(u8, @truncate(i8, @as(c_int, 0))),
\\ @bitCast(u8, @truncate(i8, @as(c_int, 0))),
\\ @bitCast(u8, @truncate(i8, @as(c_int, 0))),
@ -87,7 +107,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ .x = @as(c_int, 1),
\\};
\\pub export var ub: union_unnamed_1 = union_unnamed_1{
\\ .c = .{
\\ .c = [4]u8{
\\ @bitCast(u8, @truncate(i8, @as(c_int, 'a'))),
\\ @bitCast(u8, @truncate(i8, @as(c_int, 'b'))),
\\ @bitCast(u8, @truncate(i8, @as(c_int, 'b'))),
@ -1118,12 +1138,12 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\}
, &[_][]const u8{
\\pub fn foo() callconv(.C) void {
\\ var arr: [10]u8 = .{
\\ var arr: [10]u8 = [1]u8{
\\ @bitCast(u8, @truncate(i8, @as(c_int, 1))),
\\ } ++ .{0} ** 9;
\\ var arr1: [10][*c]u8 = .{
\\ } ++ [1]u8{0} ** 9;
\\ var arr1: [10][*c]u8 = [1][*c]u8{
\\ null,
\\ } ++ .{null} ** 9;
\\ } ++ [1][*c]u8{null} ** 9;
\\}
});
@ -1570,7 +1590,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
cases.add("undefined array global",
\\int array[100] = {};
, &[_][]const u8{
\\pub export var array: [100]c_int = .{0} ** 100;
\\pub export var array: [100]c_int = [1]c_int{0} ** 100;
});
cases.add("restrict -> noalias",
@ -1904,7 +1924,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
\\ return array[index];
\\}
, &[_][]const u8{
\\pub export var array: [100]c_int = .{0} ** 100;
\\pub export var array: [100]c_int = [1]c_int{0} ** 100;
\\pub export fn foo(arg_index: c_int) c_int {
\\ var index = arg_index;
\\ return array[@intCast(c_uint, index)];